diff options
Diffstat (limited to 'fs/xfs/xfs_aops.c')
| -rw-r--r-- | fs/xfs/xfs_aops.c | 1291 |
1 files changed, 535 insertions, 756 deletions
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 338b9d9984e0..56a544638491 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000-2005 Silicon Graphics, Inc. - * Copyright (c) 2016-2018 Christoph Hellwig. + * Copyright (c) 2016-2025 Christoph Hellwig. * All Rights Reserved. */ #include "xfs.h" @@ -12,162 +12,55 @@ #include "xfs_mount.h" #include "xfs_inode.h" #include "xfs_trans.h" -#include "xfs_inode_item.h" -#include "xfs_alloc.h" -#include "xfs_error.h" #include "xfs_iomap.h" #include "xfs_trace.h" #include "xfs_bmap.h" #include "xfs_bmap_util.h" -#include "xfs_bmap_btree.h" #include "xfs_reflink.h" -#include <linux/writeback.h> +#include "xfs_errortag.h" +#include "xfs_error.h" +#include "xfs_icache.h" +#include "xfs_zone_alloc.h" +#include "xfs_rtgroup.h" -/* - * structure owned by writepages passed to individual writepage calls - */ struct xfs_writepage_ctx { - struct xfs_bmbt_irec imap; - unsigned int io_type; + struct iomap_writepage_ctx ctx; + unsigned int data_seq; unsigned int cow_seq; - struct xfs_ioend *ioend; }; -struct block_device * -xfs_find_bdev_for_inode( - struct inode *inode) +static inline struct xfs_writepage_ctx * +XFS_WPC(struct iomap_writepage_ctx *ctx) { - struct xfs_inode *ip = XFS_I(inode); - struct xfs_mount *mp = ip->i_mount; - - if (XFS_IS_REALTIME_INODE(ip)) - return mp->m_rtdev_targp->bt_bdev; - else - return mp->m_ddev_targp->bt_bdev; -} - -struct dax_device * -xfs_find_daxdev_for_inode( - struct inode *inode) -{ - struct xfs_inode *ip = XFS_I(inode); - struct xfs_mount *mp = ip->i_mount; - - if (XFS_IS_REALTIME_INODE(ip)) - return mp->m_rtdev_targp->bt_daxdev; - else - return mp->m_ddev_targp->bt_daxdev; -} - -static void -xfs_finish_page_writeback( - struct inode *inode, - struct bio_vec *bvec, - int error) -{ - struct iomap_page *iop = to_iomap_page(bvec->bv_page); - - if (error) { - SetPageError(bvec->bv_page); - mapping_set_error(inode->i_mapping, -EIO); - } - - ASSERT(iop || i_blocksize(inode) == PAGE_SIZE); - ASSERT(!iop || atomic_read(&iop->write_count) > 0); - - if (!iop || atomic_dec_and_test(&iop->write_count)) - end_page_writeback(bvec->bv_page); -} - -/* - * We're now finished for good with this ioend structure. Update the page - * state, release holds on bios, and finally free up memory. Do not use the - * ioend after this. - */ -STATIC void -xfs_destroy_ioend( - struct xfs_ioend *ioend, - int error) -{ - struct inode *inode = ioend->io_inode; - struct bio *bio = &ioend->io_inline_bio; - struct bio *last = ioend->io_bio, *next; - u64 start = bio->bi_iter.bi_sector; - bool quiet = bio_flagged(bio, BIO_QUIET); - - for (bio = &ioend->io_inline_bio; bio; bio = next) { - struct bio_vec *bvec; - int i; - - /* - * For the last bio, bi_private points to the ioend, so we - * need to explicitly end the iteration here. - */ - if (bio == last) - next = NULL; - else - next = bio->bi_private; - - /* walk each page on bio, ending page IO on them */ - bio_for_each_segment_all(bvec, bio, i) - xfs_finish_page_writeback(inode, bvec, error); - bio_put(bio); - } - - if (unlikely(error && !quiet)) { - xfs_err_ratelimited(XFS_I(inode)->i_mount, - "writeback error on sector %llu", start); - } + return container_of(ctx, struct xfs_writepage_ctx, ctx); } /* * Fast and loose check if this write could update the on-disk inode size. */ -static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend) +static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend) { return ioend->io_offset + ioend->io_size > - XFS_I(ioend->io_inode)->i_d.di_size; -} - -STATIC int -xfs_setfilesize_trans_alloc( - struct xfs_ioend *ioend) -{ - struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount; - struct xfs_trans *tp; - int error; - - error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, - XFS_TRANS_NOFS, &tp); - if (error) - return error; - - ioend->io_append_trans = tp; - - /* - * We may pass freeze protection with a transaction. So tell lockdep - * we released it. - */ - __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS); - /* - * We hand off the transaction to the completion thread now, so - * clear the flag here. - */ - current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); - return 0; + XFS_I(ioend->io_inode)->i_disk_size; } /* * Update on-disk file size now that data has been written to disk. */ -STATIC int -__xfs_setfilesize( +int +xfs_setfilesize( struct xfs_inode *ip, - struct xfs_trans *tp, xfs_off_t offset, size_t size) { + struct xfs_mount *mp = ip->i_mount; + struct xfs_trans *tp; xfs_fsize_t isize; + int error; + + error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp); + if (error) + return error; xfs_ilock(ip, XFS_ILOCK_EXCL); isize = xfs_new_eof(ip, offset + size); @@ -179,157 +72,273 @@ __xfs_setfilesize( trace_xfs_setfilesize(ip, offset, size); - ip->i_d.di_size = isize; + ip->i_disk_size = isize; xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); return xfs_trans_commit(tp); } -int -xfs_setfilesize( - struct xfs_inode *ip, - xfs_off_t offset, - size_t size) -{ - struct xfs_mount *mp = ip->i_mount; - struct xfs_trans *tp; - int error; - - error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp); - if (error) - return error; - - return __xfs_setfilesize(ip, tp, offset, size); -} - -STATIC int -xfs_setfilesize_ioend( - struct xfs_ioend *ioend, - int error) +static void +xfs_ioend_put_open_zones( + struct iomap_ioend *ioend) { - struct xfs_inode *ip = XFS_I(ioend->io_inode); - struct xfs_trans *tp = ioend->io_append_trans; + struct iomap_ioend *tmp; /* - * The transaction may have been allocated in the I/O submission thread, - * thus we need to mark ourselves as being in a transaction manually. - * Similarly for freeze protection. + * Put the open zone for all ioends merged into this one (if any). */ - current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS); - __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS); + list_for_each_entry(tmp, &ioend->io_list, io_list) + xfs_open_zone_put(tmp->io_private); - /* we abort the update if there was an IO error */ - if (error) { - xfs_trans_cancel(tp); - return error; - } - - return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size); + /* + * The main ioend might not have an open zone if the submission failed + * before xfs_zone_alloc_and_submit got called. + */ + if (ioend->io_private) + xfs_open_zone_put(ioend->io_private); } /* * IO write completion. */ STATIC void -xfs_end_io( - struct work_struct *work) +xfs_end_ioend( + struct iomap_ioend *ioend) { - struct xfs_ioend *ioend = - container_of(work, struct xfs_ioend, io_work); struct xfs_inode *ip = XFS_I(ioend->io_inode); + struct xfs_mount *mp = ip->i_mount; + bool is_zoned = xfs_is_zoned_inode(ip); xfs_off_t offset = ioend->io_offset; size_t size = ioend->io_size; + unsigned int nofs_flag; int error; /* - * Just clean up the in-memory strutures if the fs has been shut down. + * We can allocate memory here while doing writeback on behalf of + * memory reclaim. To avoid memory allocation deadlocks set the + * task-wide nofs context for the following operations. + */ + nofs_flag = memalloc_nofs_save(); + + /* + * Just clean up the in-memory structures if the fs has been shut down. */ - if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { + if (xfs_is_shutdown(mp)) { error = -EIO; goto done; } /* - * Clean up any COW blocks on an I/O error. + * Clean up all COW blocks and underlying data fork delalloc blocks on + * I/O error. The delalloc punch is required because this ioend was + * mapped to blocks in the COW fork and the associated pages are no + * longer dirty. If we don't remove delalloc blocks here, they become + * stale and can corrupt free space accounting on unmount. */ - error = blk_status_to_errno(ioend->io_bio->bi_status); + error = blk_status_to_errno(ioend->io_bio.bi_status); if (unlikely(error)) { - switch (ioend->io_type) { - case XFS_IO_COW: + if (ioend->io_flags & IOMAP_IOEND_SHARED) { + ASSERT(!is_zoned); xfs_reflink_cancel_cow_range(ip, offset, size, true); - break; + xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK, offset, + offset + size, NULL); } - goto done; } /* - * Success: commit the COW or unwritten blocks if needed. + * Success: commit the COW or unwritten blocks if needed. */ - switch (ioend->io_type) { - case XFS_IO_COW: + if (is_zoned) + error = xfs_zoned_end_io(ip, offset, size, ioend->io_sector, + ioend->io_private, NULLFSBLOCK); + else if (ioend->io_flags & IOMAP_IOEND_SHARED) error = xfs_reflink_end_cow(ip, offset, size); - break; - case XFS_IO_UNWRITTEN: - /* writeback should never update isize */ + else if (ioend->io_flags & IOMAP_IOEND_UNWRITTEN) error = xfs_iomap_write_unwritten(ip, offset, size, false); - break; - default: - ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans); - break; - } + if (!error && + !(ioend->io_flags & IOMAP_IOEND_DIRECT) && + xfs_ioend_is_append(ioend)) + error = xfs_setfilesize(ip, offset, size); done: - if (ioend->io_append_trans) - error = xfs_setfilesize_ioend(ioend, error); - xfs_destroy_ioend(ioend, error); + if (is_zoned) + xfs_ioend_put_open_zones(ioend); + iomap_finish_ioends(ioend, error); + memalloc_nofs_restore(nofs_flag); } -STATIC void +/* + * Finish all pending IO completions that require transactional modifications. + * + * We try to merge physical and logically contiguous ioends before completion to + * minimise the number of transactions we need to perform during IO completion. + * Both unwritten extent conversion and COW remapping need to iterate and modify + * one physical extent at a time, so we gain nothing by merging physically + * discontiguous extents here. + * + * The ioend chain length that we can be processing here is largely unbound in + * length and we may have to perform significant amounts of work on each ioend + * to complete it. Hence we have to be careful about holding the CPU for too + * long in this loop. + */ +void +xfs_end_io( + struct work_struct *work) +{ + struct xfs_inode *ip = + container_of(work, struct xfs_inode, i_ioend_work); + struct iomap_ioend *ioend; + struct list_head tmp; + unsigned long flags; + + spin_lock_irqsave(&ip->i_ioend_lock, flags); + list_replace_init(&ip->i_ioend_list, &tmp); + spin_unlock_irqrestore(&ip->i_ioend_lock, flags); + + iomap_sort_ioends(&tmp); + while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend, + io_list))) { + list_del_init(&ioend->io_list); + iomap_ioend_try_merge(ioend, &tmp); + xfs_end_ioend(ioend); + cond_resched(); + } +} + +void xfs_end_bio( struct bio *bio) { - struct xfs_ioend *ioend = bio->bi_private; - struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount; + struct iomap_ioend *ioend = iomap_ioend_from_bio(bio); + struct xfs_inode *ip = XFS_I(ioend->io_inode); + struct xfs_mount *mp = ip->i_mount; + unsigned long flags; - if (ioend->io_type == XFS_IO_UNWRITTEN || ioend->io_type == XFS_IO_COW) - queue_work(mp->m_unwritten_workqueue, &ioend->io_work); - else if (ioend->io_append_trans) - queue_work(mp->m_data_workqueue, &ioend->io_work); - else - xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status)); + /* + * For Appends record the actually written block number and set the + * boundary flag if needed. + */ + if (IS_ENABLED(CONFIG_XFS_RT) && bio_is_zone_append(bio)) { + ioend->io_sector = bio->bi_iter.bi_sector; + xfs_mark_rtg_boundary(ioend); + } + + spin_lock_irqsave(&ip->i_ioend_lock, flags); + if (list_empty(&ip->i_ioend_list)) + WARN_ON_ONCE(!queue_work(mp->m_unwritten_workqueue, + &ip->i_ioend_work)); + list_add_tail(&ioend->io_list, &ip->i_ioend_list); + spin_unlock_irqrestore(&ip->i_ioend_lock, flags); } -STATIC int +/* + * We cannot cancel the ioend directly on error. We may have already set other + * pages under writeback and hence we have to run I/O completion to mark the + * error state of the pages under writeback appropriately. + * + * If the folio has delalloc blocks on it, the caller is asking us to punch them + * out. If we don't, we can leave a stale delalloc mapping covered by a clean + * page that needs to be dirtied again before the delalloc mapping can be + * converted. This stale delalloc mapping can trip up a later direct I/O read + * operation on the same region. + * + * We prevent this by truncating away the delalloc regions on the folio. Because + * they are delalloc, we can do this without needing a transaction. Indeed - if + * we get ENOSPC errors, we have to be able to do this truncation without a + * transaction as there is no space left for block reservation (typically why + * we see a ENOSPC in writeback). + */ +static void +xfs_discard_folio( + struct folio *folio, + loff_t pos) +{ + struct xfs_inode *ip = XFS_I(folio->mapping->host); + struct xfs_mount *mp = ip->i_mount; + + if (xfs_is_shutdown(mp)) + return; + + xfs_alert_ratelimited(mp, + "page discard on page "PTR_FMT", inode 0x%llx, pos %llu.", + folio, ip->i_ino, pos); + + /* + * The end of the punch range is always the offset of the first + * byte of the next folio. Hence the end offset is only dependent on the + * folio itself and not the start offset that is passed in. + */ + xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK, pos, + folio_next_pos(folio), NULL); +} + +/* + * Fast revalidation of the cached writeback mapping. Return true if the current + * mapping is valid, false otherwise. + */ +static bool +xfs_imap_valid( + struct iomap_writepage_ctx *wpc, + struct xfs_inode *ip, + loff_t offset) +{ + if (offset < wpc->iomap.offset || + offset >= wpc->iomap.offset + wpc->iomap.length) + return false; + /* + * If this is a COW mapping, it is sufficient to check that the mapping + * covers the offset. Be careful to check this first because the caller + * can revalidate a COW mapping without updating the data seqno. + */ + if (wpc->iomap.flags & IOMAP_F_SHARED) + return true; + + /* + * This is not a COW mapping. Check the sequence number of the data fork + * because concurrent changes could have invalidated the extent. Check + * the COW fork because concurrent changes since the last time we + * checked (and found nothing at this offset) could have added + * overlapping blocks. + */ + if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq)) { + trace_xfs_wb_data_iomap_invalid(ip, &wpc->iomap, + XFS_WPC(wpc)->data_seq, XFS_DATA_FORK); + return false; + } + if (xfs_inode_has_cow_data(ip) && + XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq)) { + trace_xfs_wb_cow_iomap_invalid(ip, &wpc->iomap, + XFS_WPC(wpc)->cow_seq, XFS_COW_FORK); + return false; + } + return true; +} + +static int xfs_map_blocks( - struct xfs_writepage_ctx *wpc, - struct inode *inode, - loff_t offset) + struct iomap_writepage_ctx *wpc, + loff_t offset, + unsigned int len) { - struct xfs_inode *ip = XFS_I(inode); + struct xfs_inode *ip = XFS_I(wpc->inode); struct xfs_mount *mp = ip->i_mount; - ssize_t count = i_blocksize(inode); - xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset), end_fsb; - xfs_fileoff_t cow_fsb = NULLFILEOFF; + ssize_t count = i_blocksize(wpc->inode); + xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); + xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count); + xfs_fileoff_t cow_fsb; + int whichfork; struct xfs_bmbt_irec imap; - int whichfork = XFS_DATA_FORK; struct xfs_iext_cursor icur; - bool imap_valid; + int retries = 0; int error = 0; + unsigned int *seq; - /* - * We have to make sure the cached mapping is within EOF to protect - * against eofblocks trimming on file release leaving us with a stale - * mapping. Otherwise, a page for a subsequent file extending buffered - * write could get picked up by this writeback cycle and written to the - * wrong blocks. - * - * Note that what we really want here is a generic mapping invalidation - * mechanism to protect us from arbitrary extent modifying contexts, not - * just eofblocks. - */ - xfs_trim_extent_eof(&wpc->imap, ip); + if (xfs_is_shutdown(mp)) + return -EIO; + + XFS_ERRORTAG_DELAY(mp, XFS_ERRTAG_WB_DELAY_MS); /* * COW fork blocks can overlap data fork blocks even if the blocks @@ -346,31 +355,20 @@ xfs_map_blocks( * against concurrent updates and provides a memory barrier on the way * out that ensures that we always see the current value. */ - imap_valid = offset_fsb >= wpc->imap.br_startoff && - offset_fsb < wpc->imap.br_startoff + wpc->imap.br_blockcount; - if (imap_valid && - (!xfs_inode_has_cow_data(ip) || - wpc->io_type == XFS_IO_COW || - wpc->cow_seq == READ_ONCE(ip->i_cowfp->if_seq))) + if (xfs_imap_valid(wpc, ip, offset)) return 0; - if (XFS_FORCED_SHUTDOWN(mp)) - return -EIO; - /* * If we don't have a valid map, now it's time to get a new one for this * offset. This will convert delayed allocations (including COW ones) * into real extents. If we return without a valid map, it means we * landed in a hole and we skip the block. */ +retry: + cow_fsb = NULLFILEOFF; + whichfork = XFS_DATA_FORK; xfs_ilock(ip, XFS_ILOCK_SHARED); - ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || - (ip->i_df.if_flags & XFS_IFEXTENTS)); - ASSERT(offset <= mp->m_super->s_maxbytes); - - if (offset > mp->m_super->s_maxbytes - count) - count = mp->m_super->s_maxbytes - offset; - end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); + ASSERT(!xfs_need_iread_extents(&ip->i_df)); /* * Check if this is offset is covered by a COW extents, and if yes use @@ -380,32 +378,18 @@ xfs_map_blocks( xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap)) cow_fsb = imap.br_startoff; if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) { - wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq); + XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq); xfs_iunlock(ip, XFS_ILOCK_SHARED); - /* - * Truncate can race with writeback since writeback doesn't - * take the iolock and truncate decreases the file size before - * it starts truncating the pages between new_size and old_size. - * Therefore, we can end up in the situation where writeback - * gets a CoW fork mapping but the truncate makes the mapping - * invalid and we end up in here trying to get a new mapping. - * bail out here so that we simply never get a valid mapping - * and so we drop the write altogether. The page truncation - * will kill the contents anyway. - */ - if (offset > i_size_read(inode)) { - wpc->io_type = XFS_IO_HOLE; - return 0; - } + whichfork = XFS_COW_FORK; - wpc->io_type = XFS_IO_COW; goto allocate_blocks; } /* - * Map valid and no COW extent in the way? We're done. + * No COW extent overlap. Revalidate now that we may have updated + * ->cow_seq. If the data mapping is still valid, we're done. */ - if (imap_valid) { + if (xfs_imap_valid(wpc, ip, offset)) { xfs_iunlock(ip, XFS_ILOCK_SHARED); return 0; } @@ -417,531 +401,305 @@ xfs_map_blocks( */ if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) imap.br_startoff = end_fsb; /* fake a hole past EOF */ + XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq); xfs_iunlock(ip, XFS_ILOCK_SHARED); + /* landed in a hole or beyond EOF? */ if (imap.br_startoff > offset_fsb) { - /* landed in a hole or beyond EOF */ imap.br_blockcount = imap.br_startoff - offset_fsb; imap.br_startoff = offset_fsb; imap.br_startblock = HOLESTARTBLOCK; - wpc->io_type = XFS_IO_HOLE; - } else { - /* - * Truncate to the next COW extent if there is one. This is the - * only opportunity to do this because we can skip COW fork - * lookups for the subsequent blocks in the mapping; however, - * the requirement to treat the COW range separately remains. - */ - if (cow_fsb != NULLFILEOFF && - cow_fsb < imap.br_startoff + imap.br_blockcount) - imap.br_blockcount = cow_fsb - imap.br_startoff; - - if (isnullstartblock(imap.br_startblock)) { - /* got a delalloc extent */ - wpc->io_type = XFS_IO_DELALLOC; - goto allocate_blocks; - } - - if (imap.br_state == XFS_EXT_UNWRITTEN) - wpc->io_type = XFS_IO_UNWRITTEN; - else - wpc->io_type = XFS_IO_OVERWRITE; + imap.br_state = XFS_EXT_NORM; } - wpc->imap = imap; - trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap); + /* + * Truncate to the next COW extent if there is one. This is the only + * opportunity to do this because we can skip COW fork lookups for the + * subsequent blocks in the mapping; however, the requirement to treat + * the COW range separately remains. + */ + if (cow_fsb != NULLFILEOFF && + cow_fsb < imap.br_startoff + imap.br_blockcount) + imap.br_blockcount = cow_fsb - imap.br_startoff; + + /* got a delalloc extent? */ + if (imap.br_startblock != HOLESTARTBLOCK && + isnullstartblock(imap.br_startblock)) + goto allocate_blocks; + + xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0, 0, XFS_WPC(wpc)->data_seq); + trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap); return 0; allocate_blocks: - error = xfs_iomap_write_allocate(ip, whichfork, offset, &imap, - &wpc->cow_seq); - if (error) - return error; - ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF || - imap.br_startoff + imap.br_blockcount <= cow_fsb); - wpc->imap = imap; - trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap); - return 0; -} + /* + * Convert a dellalloc extent to a real one. The current page is held + * locked so nothing could have removed the block backing offset_fsb, + * although it could have moved from the COW to the data fork by another + * thread. + */ + if (whichfork == XFS_COW_FORK) + seq = &XFS_WPC(wpc)->cow_seq; + else + seq = &XFS_WPC(wpc)->data_seq; -/* - * Submit the bio for an ioend. We are passed an ioend with a bio attached to - * it, and we submit that bio. The ioend may be used for multiple bio - * submissions, so we only want to allocate an append transaction for the ioend - * once. In the case of multiple bio submission, each bio will take an IO - * reference to the ioend to ensure that the ioend completion is only done once - * all bios have been submitted and the ioend is really done. - * - * If @fail is non-zero, it means that we have a situation where some part of - * the submission process has failed after we have marked paged for writeback - * and unlocked them. In this situation, we need to fail the bio and ioend - * rather than submit it to IO. This typically only happens on a filesystem - * shutdown. - */ -STATIC int -xfs_submit_ioend( - struct writeback_control *wbc, - struct xfs_ioend *ioend, - int status) -{ - /* Convert CoW extents to regular */ - if (!status && ioend->io_type == XFS_IO_COW) { + error = xfs_bmapi_convert_delalloc(ip, whichfork, offset, + &wpc->iomap, seq); + if (error) { /* - * Yuk. This can do memory allocation, but is not a - * transactional operation so everything is done in GFP_KERNEL - * context. That can deadlock, because we hold pages in - * writeback state and GFP_KERNEL allocations can block on them. - * Hence we must operate in nofs conditions here. + * If we failed to find the extent in the COW fork we might have + * raced with a COW to data fork conversion or truncate. + * Restart the lookup to catch the extent in the data fork for + * the former case, but prevent additional retries to avoid + * looping forever for the latter case. */ - unsigned nofs_flag; - - nofs_flag = memalloc_nofs_save(); - status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode), - ioend->io_offset, ioend->io_size); - memalloc_nofs_restore(nofs_flag); + if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++) + goto retry; + ASSERT(error != -EAGAIN); + return error; } - /* Reserve log space if we might write beyond the on-disk inode size. */ - if (!status && - ioend->io_type != XFS_IO_UNWRITTEN && - xfs_ioend_is_append(ioend) && - !ioend->io_append_trans) - status = xfs_setfilesize_trans_alloc(ioend); - - ioend->io_bio->bi_private = ioend; - ioend->io_bio->bi_end_io = xfs_end_bio; - ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); - /* - * If we are failing the IO now, just mark the ioend with an - * error and finish it. This will run IO completion immediately - * as there is only one reference to the ioend at this point in - * time. + * Due to merging the return real extent might be larger than the + * original delalloc one. Trim the return extent to the next COW + * boundary again to force a re-lookup. */ - if (status) { - ioend->io_bio->bi_status = errno_to_blk_status(status); - bio_endio(ioend->io_bio); - return status; + if (whichfork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) { + loff_t cow_offset = XFS_FSB_TO_B(mp, cow_fsb); + + if (cow_offset < wpc->iomap.offset + wpc->iomap.length) + wpc->iomap.length = cow_offset - wpc->iomap.offset; } - ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint; - submit_bio(ioend->io_bio); + ASSERT(wpc->iomap.offset <= offset); + ASSERT(wpc->iomap.offset + wpc->iomap.length > offset); + trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap); return 0; } -static struct xfs_ioend * -xfs_alloc_ioend( - struct inode *inode, - unsigned int type, - xfs_off_t offset, - struct block_device *bdev, - sector_t sector) -{ - struct xfs_ioend *ioend; - struct bio *bio; - - bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset); - bio_set_dev(bio, bdev); - bio->bi_iter.bi_sector = sector; - - ioend = container_of(bio, struct xfs_ioend, io_inline_bio); - INIT_LIST_HEAD(&ioend->io_list); - ioend->io_type = type; - ioend->io_inode = inode; - ioend->io_size = 0; - ioend->io_offset = offset; - INIT_WORK(&ioend->io_work, xfs_end_io); - ioend->io_append_trans = NULL; - ioend->io_bio = bio; - return ioend; -} - -/* - * Allocate a new bio, and chain the old bio to the new one. - * - * Note that we have to do perform the chaining in this unintuitive order - * so that the bi_private linkage is set up in the right direction for the - * traversal in xfs_destroy_ioend(). - */ -static void -xfs_chain_bio( - struct xfs_ioend *ioend, - struct writeback_control *wbc, - struct block_device *bdev, - sector_t sector) +static ssize_t +xfs_writeback_range( + struct iomap_writepage_ctx *wpc, + struct folio *folio, + u64 offset, + unsigned int len, + u64 end_pos) { - struct bio *new; - - new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES); - bio_set_dev(new, bdev); - new->bi_iter.bi_sector = sector; - bio_chain(ioend->io_bio, new); - bio_get(ioend->io_bio); /* for xfs_destroy_ioend */ - ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc); - ioend->io_bio->bi_write_hint = ioend->io_inode->i_write_hint; - submit_bio(ioend->io_bio); - ioend->io_bio = new; -} + ssize_t ret; -/* - * Test to see if we have an existing ioend structure that we could append to - * first, otherwise finish off the current ioend and start another. - */ -STATIC void -xfs_add_to_ioend( - struct inode *inode, - xfs_off_t offset, - struct page *page, - struct iomap_page *iop, - struct xfs_writepage_ctx *wpc, - struct writeback_control *wbc, - struct list_head *iolist) -{ - struct xfs_inode *ip = XFS_I(inode); - struct xfs_mount *mp = ip->i_mount; - struct block_device *bdev = xfs_find_bdev_for_inode(inode); - unsigned len = i_blocksize(inode); - unsigned poff = offset & (PAGE_SIZE - 1); - sector_t sector; - - sector = xfs_fsb_to_db(ip, wpc->imap.br_startblock) + - ((offset - XFS_FSB_TO_B(mp, wpc->imap.br_startoff)) >> 9); - - if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type || - sector != bio_end_sector(wpc->ioend->io_bio) || - offset != wpc->ioend->io_offset + wpc->ioend->io_size) { - if (wpc->ioend) - list_add(&wpc->ioend->io_list, iolist); - wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, - bdev, sector); - } - - if (!__bio_try_merge_page(wpc->ioend->io_bio, page, len, poff)) { - if (iop) - atomic_inc(&iop->write_count); - if (bio_full(wpc->ioend->io_bio)) - xfs_chain_bio(wpc->ioend, wbc, bdev, sector); - __bio_add_page(wpc->ioend->io_bio, page, len, poff); - } - - wpc->ioend->io_size += len; -} - -STATIC void -xfs_vm_invalidatepage( - struct page *page, - unsigned int offset, - unsigned int length) -{ - trace_xfs_invalidatepage(page->mapping->host, page, offset, length); - iomap_invalidatepage(page, offset, length); + ret = xfs_map_blocks(wpc, offset, len); + if (!ret) + ret = iomap_add_to_ioend(wpc, folio, offset, end_pos, len); + if (ret < 0) + xfs_discard_folio(folio, offset); + return ret; } -/* - * If the page has delalloc blocks on it, we need to punch them out before we - * invalidate the page. If we don't, we leave a stale delalloc mapping on the - * inode that can trip up a later direct I/O read operation on the same region. - * - * We prevent this by truncating away the delalloc regions on the page. Because - * they are delalloc, we can do this without needing a transaction. Indeed - if - * we get ENOSPC errors, we have to be able to do this truncation without a - * transaction as there is no space left for block reservation (typically why we - * see a ENOSPC in writeback). - */ -STATIC void -xfs_aops_discard_page( - struct page *page) +static bool +xfs_ioend_needs_wq_completion( + struct iomap_ioend *ioend) { - struct inode *inode = page->mapping->host; - struct xfs_inode *ip = XFS_I(inode); - struct xfs_mount *mp = ip->i_mount; - loff_t offset = page_offset(page); - xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, offset); - int error; + /* Changing inode size requires a transaction. */ + if (xfs_ioend_is_append(ioend)) + return true; - if (XFS_FORCED_SHUTDOWN(mp)) - goto out_invalidate; + /* Extent manipulation requires a transaction. */ + if (ioend->io_flags & (IOMAP_IOEND_UNWRITTEN | IOMAP_IOEND_SHARED)) + return true; - xfs_alert(mp, - "page discard on page "PTR_FMT", inode 0x%llx, offset %llu.", - page, ip->i_ino, offset); + /* Page cache invalidation cannot be done in irq context. */ + if (ioend->io_flags & IOMAP_IOEND_DONTCACHE) + return true; - error = xfs_bmap_punch_delalloc_range(ip, start_fsb, - PAGE_SIZE / i_blocksize(inode)); - if (error && !XFS_FORCED_SHUTDOWN(mp)) - xfs_alert(mp, "page discard unable to remove delalloc mapping."); -out_invalidate: - xfs_vm_invalidatepage(page, 0, PAGE_SIZE); + return false; } -/* - * We implement an immediate ioend submission policy here to avoid needing to - * chain multiple ioends and hence nest mempool allocations which can violate - * forward progress guarantees we need to provide. The current ioend we are - * adding blocks to is cached on the writepage context, and if the new block - * does not append to the cached ioend it will create a new ioend and cache that - * instead. - * - * If a new ioend is created and cached, the old ioend is returned and queued - * locally for submission once the entire page is processed or an error has been - * detected. While ioends are submitted immediately after they are completed, - * batching optimisations are provided by higher level block plugging. - * - * At the end of a writeback pass, there will be a cached ioend remaining on the - * writepage context that the caller will need to submit. - */ static int -xfs_writepage_map( - struct xfs_writepage_ctx *wpc, - struct writeback_control *wbc, - struct inode *inode, - struct page *page, - uint64_t end_offset) +xfs_writeback_submit( + struct iomap_writepage_ctx *wpc, + int error) { - LIST_HEAD(submit_list); - struct iomap_page *iop = to_iomap_page(page); - unsigned len = i_blocksize(inode); - struct xfs_ioend *ioend, *next; - uint64_t file_offset; /* file offset of page */ - int error = 0, count = 0, i; - - ASSERT(iop || i_blocksize(inode) == PAGE_SIZE); - ASSERT(!iop || atomic_read(&iop->write_count) == 0); - - /* - * Walk through the page to find areas to write back. If we run off the - * end of the current map or find the current map invalid, grab a new - * one. - */ - for (i = 0, file_offset = page_offset(page); - i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset; - i++, file_offset += len) { - if (iop && !test_bit(i, iop->uptodate)) - continue; - - error = xfs_map_blocks(wpc, inode, file_offset); - if (error) - break; - if (wpc->io_type == XFS_IO_HOLE) - continue; - xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc, - &submit_list); - count++; - } - - ASSERT(wpc->ioend || list_empty(&submit_list)); - ASSERT(PageLocked(page)); - ASSERT(!PageWriteback(page)); + struct iomap_ioend *ioend = wpc->wb_ctx; /* - * On error, we have to fail the ioend here because we may have set - * pages under writeback, we have to make sure we run IO completion to - * mark the error state of the IO appropriately, so we can't cancel the - * ioend directly here. That means we have to mark this page as under - * writeback if we included any blocks from it in the ioend chain so - * that completion treats it correctly. + * Convert CoW extents to regular. * - * If we didn't include the page in the ioend, the on error we can - * simply discard and unlock it as there are no other users of the page - * now. The caller will still need to trigger submission of outstanding - * ioends on the writepage context so they are treated correctly on - * error. + * We can allocate memory here while doing writeback on behalf of memory + * reclaim. To avoid memory allocation deadlocks, set the task-wide + * nofs context. */ - if (unlikely(error)) { - if (!count) { - xfs_aops_discard_page(page); - ClearPageUptodate(page); - unlock_page(page); - goto done; - } + if (!error && (ioend->io_flags & IOMAP_IOEND_SHARED)) { + unsigned int nofs_flag; - /* - * If the page was not fully cleaned, we need to ensure that the - * higher layers come back to it correctly. That means we need - * to keep the page dirty, and for WB_SYNC_ALL writeback we need - * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed - * so another attempt to write this page in this writeback sweep - * will be made. - */ - set_page_writeback_keepwrite(page); - } else { - clear_page_dirty_for_io(page); - set_page_writeback(page); + nofs_flag = memalloc_nofs_save(); + error = xfs_reflink_convert_cow(XFS_I(ioend->io_inode), + ioend->io_offset, ioend->io_size); + memalloc_nofs_restore(nofs_flag); } - unlock_page(page); - /* - * Preserve the original error if there was one, otherwise catch - * submission errors here and propagate into subsequent ioend - * submissions. + * Send ioends that might require a transaction to the completion wq. */ - list_for_each_entry_safe(ioend, next, &submit_list, io_list) { - int error2; + if (xfs_ioend_needs_wq_completion(ioend)) + ioend->io_bio.bi_end_io = xfs_end_bio; - list_del_init(&ioend->io_list); - error2 = xfs_submit_ioend(wbc, ioend, error); - if (error2 && !error) - error = error2; - } + return iomap_ioend_writeback_submit(wpc, error); +} - /* - * We can end up here with no error and nothing to write only if we race - * with a partial page truncate on a sub-page block sized filesystem. - */ - if (!count) - end_page_writeback(page); -done: - mapping_set_error(page->mapping, error); - return error; +static const struct iomap_writeback_ops xfs_writeback_ops = { + .writeback_range = xfs_writeback_range, + .writeback_submit = xfs_writeback_submit, +}; + +struct xfs_zoned_writepage_ctx { + struct iomap_writepage_ctx ctx; + struct xfs_open_zone *open_zone; +}; + +static inline struct xfs_zoned_writepage_ctx * +XFS_ZWPC(struct iomap_writepage_ctx *ctx) +{ + return container_of(ctx, struct xfs_zoned_writepage_ctx, ctx); } -/* - * Write out a dirty page. - * - * For delalloc space on the page we need to allocate space and flush it. - * For unwritten space on the page we need to start the conversion to - * regular allocated space. - */ -STATIC int -xfs_do_writepage( - struct page *page, - struct writeback_control *wbc, - void *data) +static int +xfs_zoned_map_blocks( + struct iomap_writepage_ctx *wpc, + loff_t offset, + unsigned int len) { - struct xfs_writepage_ctx *wpc = data; - struct inode *inode = page->mapping->host; - loff_t offset; - uint64_t end_offset; - pgoff_t end_index; + struct xfs_inode *ip = XFS_I(wpc->inode); + struct xfs_mount *mp = ip->i_mount; + xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); + xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + len); + xfs_filblks_t count_fsb; + struct xfs_bmbt_irec imap, del; + struct xfs_iext_cursor icur; + + if (xfs_is_shutdown(mp)) + return -EIO; - trace_xfs_writepage(inode, page, 0, 0); + XFS_ERRORTAG_DELAY(mp, XFS_ERRTAG_WB_DELAY_MS); /* - * Refuse to write the page out if we are called from reclaim context. + * All dirty data must be covered by delalloc extents. But truncate can + * remove delalloc extents underneath us or reduce their size. + * Returning a hole tells iomap to not write back any data from this + * range, which is the right thing to do in that case. * - * This avoids stack overflows when called from deeply used stacks in - * random callers for direct reclaim or memcg reclaim. We explicitly - * allow reclaim from kswapd as the stack usage there is relatively low. + * Otherwise just tell iomap to treat ranges previously covered by a + * delalloc extent as mapped. The actual block allocation will be done + * just before submitting the bio. * - * This should never happen except in the case of a VM regression so - * warn about it. + * This implies we never map outside folios that are locked or marked + * as under writeback, and thus there is no need check the fork sequence + * count here. */ - if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == - PF_MEMALLOC)) - goto redirty; - - /* - * Given that we do not allow direct reclaim to call us, we should - * never be called while in a filesystem transaction. - */ - if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS)) - goto redirty; + xfs_ilock(ip, XFS_ILOCK_EXCL); + if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap)) + imap.br_startoff = end_fsb; /* fake a hole past EOF */ + if (imap.br_startoff > offset_fsb) { + imap.br_blockcount = imap.br_startoff - offset_fsb; + imap.br_startoff = offset_fsb; + imap.br_startblock = HOLESTARTBLOCK; + imap.br_state = XFS_EXT_NORM; + xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0, 0, 0); + return 0; + } + end_fsb = min(end_fsb, imap.br_startoff + imap.br_blockcount); + count_fsb = end_fsb - offset_fsb; + + del = imap; + xfs_trim_extent(&del, offset_fsb, count_fsb); + xfs_bmap_del_extent_delay(ip, XFS_COW_FORK, &icur, &imap, &del, + XFS_BMAPI_REMAP); + xfs_iunlock(ip, XFS_ILOCK_EXCL); + + wpc->iomap.type = IOMAP_MAPPED; + wpc->iomap.flags = IOMAP_F_DIRTY; + wpc->iomap.bdev = mp->m_rtdev_targp->bt_bdev; + wpc->iomap.offset = offset; + wpc->iomap.length = XFS_FSB_TO_B(mp, count_fsb); + wpc->iomap.flags = IOMAP_F_ANON_WRITE; + + trace_xfs_zoned_map_blocks(ip, offset, wpc->iomap.length); + return 0; +} - /* - * Is this page beyond the end of the file? - * - * The page index is less than the end_index, adjust the end_offset - * to the highest offset that this page should represent. - * ----------------------------------------------------- - * | file mapping | <EOF> | - * ----------------------------------------------------- - * | Page ... | Page N-2 | Page N-1 | Page N | | - * ^--------------------------------^----------|-------- - * | desired writeback range | see else | - * ---------------------------------^------------------| - */ - offset = i_size_read(inode); - end_index = offset >> PAGE_SHIFT; - if (page->index < end_index) - end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT; - else { - /* - * Check whether the page to write out is beyond or straddles - * i_size or not. - * ------------------------------------------------------- - * | file mapping | <EOF> | - * ------------------------------------------------------- - * | Page ... | Page N-2 | Page N-1 | Page N | Beyond | - * ^--------------------------------^-----------|--------- - * | | Straddles | - * ---------------------------------^-----------|--------| - */ - unsigned offset_into_page = offset & (PAGE_SIZE - 1); +static ssize_t +xfs_zoned_writeback_range( + struct iomap_writepage_ctx *wpc, + struct folio *folio, + u64 offset, + unsigned int len, + u64 end_pos) +{ + ssize_t ret; - /* - * Skip the page if it is fully outside i_size, e.g. due to a - * truncate operation that is in progress. We must redirty the - * page so that reclaim stops reclaiming it. Otherwise - * xfs_vm_releasepage() is called on it and gets confused. - * - * Note that the end_index is unsigned long, it would overflow - * if the given offset is greater than 16TB on 32-bit system - * and if we do check the page is fully outside i_size or not - * via "if (page->index >= end_index + 1)" as "end_index + 1" - * will be evaluated to 0. Hence this page will be redirtied - * and be written out repeatedly which would result in an - * infinite loop, the user program that perform this operation - * will hang. Instead, we can verify this situation by checking - * if the page to write is totally beyond the i_size or if it's - * offset is just equal to the EOF. - */ - if (page->index > end_index || - (page->index == end_index && offset_into_page == 0)) - goto redirty; + ret = xfs_zoned_map_blocks(wpc, offset, len); + if (!ret) + ret = iomap_add_to_ioend(wpc, folio, offset, end_pos, len); + if (ret < 0) + xfs_discard_folio(folio, offset); + return ret; +} - /* - * The page straddles i_size. It must be zeroed out on each - * and every writepage invocation because it may be mmapped. - * "A file is mapped in multiples of the page size. For a file - * that is not a multiple of the page size, the remaining - * memory is zeroed when mapped, and writes to that region are - * not written out to the file." - */ - zero_user_segment(page, offset_into_page, PAGE_SIZE); +static int +xfs_zoned_writeback_submit( + struct iomap_writepage_ctx *wpc, + int error) +{ + struct iomap_ioend *ioend = wpc->wb_ctx; - /* Adjust the end_offset to the end of file */ - end_offset = offset; + ioend->io_bio.bi_end_io = xfs_end_bio; + if (error) { + ioend->io_bio.bi_status = errno_to_blk_status(error); + bio_endio(&ioend->io_bio); + return error; } - - return xfs_writepage_map(wpc, wbc, inode, page, end_offset); - -redirty: - redirty_page_for_writepage(wbc, page); - unlock_page(page); + xfs_zone_alloc_and_submit(ioend, &XFS_ZWPC(wpc)->open_zone); return 0; } -STATIC int -xfs_vm_writepage( - struct page *page, - struct writeback_control *wbc) -{ - struct xfs_writepage_ctx wpc = { - .io_type = XFS_IO_HOLE, - }; - int ret; - - ret = xfs_do_writepage(page, wbc, &wpc); - if (wpc.ioend) - ret = xfs_submit_ioend(wbc, wpc.ioend, ret); - return ret; -} +static const struct iomap_writeback_ops xfs_zoned_writeback_ops = { + .writeback_range = xfs_zoned_writeback_range, + .writeback_submit = xfs_zoned_writeback_submit, +}; STATIC int xfs_vm_writepages( struct address_space *mapping, struct writeback_control *wbc) { - struct xfs_writepage_ctx wpc = { - .io_type = XFS_IO_HOLE, - }; - int ret; - - xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); - ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc); - if (wpc.ioend) - ret = xfs_submit_ioend(wbc, wpc.ioend, ret); - return ret; + struct xfs_inode *ip = XFS_I(mapping->host); + + xfs_iflags_clear(ip, XFS_ITRUNCATED); + + if (xfs_is_zoned_inode(ip)) { + struct xfs_zoned_writepage_ctx xc = { + .ctx = { + .inode = mapping->host, + .wbc = wbc, + .ops = &xfs_zoned_writeback_ops + }, + }; + int error; + + error = iomap_writepages(&xc.ctx); + if (xc.open_zone) + xfs_open_zone_put(xc.open_zone); + return error; + } else { + struct xfs_writepage_ctx wpc = { + .ctx = { + .inode = mapping->host, + .wbc = wbc, + .ops = &xfs_writeback_ops + }, + }; + + return iomap_writepages(&wpc.ctx); + } } STATIC int @@ -949,18 +707,11 @@ xfs_dax_writepages( struct address_space *mapping, struct writeback_control *wbc) { - xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); - return dax_writeback_mapping_range(mapping, - xfs_find_bdev_for_inode(mapping->host), wbc); -} + struct xfs_inode *ip = XFS_I(mapping->host); -STATIC int -xfs_vm_releasepage( - struct page *page, - gfp_t gfp_mask) -{ - trace_xfs_releasepage(page->mapping->host, page, 0, 0); - return iomap_releasepage(page, gfp_mask); + xfs_iflags_clear(ip, XFS_ITRUNCATED); + return dax_writeback_mapping_range(mapping, + xfs_inode_buftarg(ip)->bt_daxdev, wbc); } STATIC sector_t @@ -981,61 +732,89 @@ xfs_vm_bmap( * Since we don't pass back blockdev info, we can't return bmap * information for rt files either. */ - if (xfs_is_reflink_inode(ip) || XFS_IS_REALTIME_INODE(ip)) + if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip)) return 0; - return iomap_bmap(mapping, block, &xfs_iomap_ops); + return iomap_bmap(mapping, block, &xfs_read_iomap_ops); } STATIC int -xfs_vm_readpage( +xfs_vm_read_folio( struct file *unused, - struct page *page) + struct folio *folio) { - trace_xfs_vm_readpage(page->mapping->host, 1); - return iomap_readpage(page, &xfs_iomap_ops); + iomap_bio_read_folio(folio, &xfs_read_iomap_ops); + return 0; } -STATIC int -xfs_vm_readpages( - struct file *unused, - struct address_space *mapping, - struct list_head *pages, - unsigned nr_pages) +STATIC void +xfs_vm_readahead( + struct readahead_control *rac) { - trace_xfs_vm_readpages(mapping->host, nr_pages); - return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops); + iomap_bio_readahead(rac, &xfs_read_iomap_ops); } static int -xfs_iomap_swapfile_activate( +xfs_vm_swap_activate( struct swap_info_struct *sis, struct file *swap_file, sector_t *span) { - sis->bdev = xfs_find_bdev_for_inode(file_inode(swap_file)); - return iomap_swapfile_activate(sis, swap_file, span, &xfs_iomap_ops); + struct xfs_inode *ip = XFS_I(file_inode(swap_file)); + + if (xfs_is_zoned_inode(ip)) + return -EINVAL; + + /* + * Swap file activation can race against concurrent shared extent + * removal in files that have been cloned. If this happens, + * iomap_swapfile_iter() can fail because it encountered a shared + * extent even though an operation is in progress to remove those + * shared extents. + * + * This race becomes problematic when we defer extent removal + * operations beyond the end of a syscall (i.e. use async background + * processing algorithms). Users think the extents are no longer + * shared, but iomap_swapfile_iter() still sees them as shared + * because the refcountbt entries for the extents being removed have + * not yet been updated. Hence the swapon call fails unexpectedly. + * + * The race condition is currently most obvious from the unlink() + * operation as extent removal is deferred until after the last + * reference to the inode goes away. We then process the extent + * removal asynchronously, hence triggers the "syscall completed but + * work not done" condition mentioned above. To close this race + * window, we need to flush any pending inodegc operations to ensure + * they have updated the refcountbt records before we try to map the + * swapfile. + */ + xfs_inodegc_flush(ip->i_mount); + + /* + * Direct the swap code to the correct block device when this file + * sits on the RT device. + */ + sis->bdev = xfs_inode_buftarg(ip)->bt_bdev; + + return iomap_swapfile_activate(sis, swap_file, span, + &xfs_read_iomap_ops); } const struct address_space_operations xfs_address_space_operations = { - .readpage = xfs_vm_readpage, - .readpages = xfs_vm_readpages, - .writepage = xfs_vm_writepage, + .read_folio = xfs_vm_read_folio, + .readahead = xfs_vm_readahead, .writepages = xfs_vm_writepages, - .set_page_dirty = iomap_set_page_dirty, - .releasepage = xfs_vm_releasepage, - .invalidatepage = xfs_vm_invalidatepage, + .dirty_folio = iomap_dirty_folio, + .release_folio = iomap_release_folio, + .invalidate_folio = iomap_invalidate_folio, .bmap = xfs_vm_bmap, - .direct_IO = noop_direct_IO, - .migratepage = iomap_migrate_page, + .migrate_folio = filemap_migrate_folio, .is_partially_uptodate = iomap_is_partially_uptodate, - .error_remove_page = generic_error_remove_page, - .swap_activate = xfs_iomap_swapfile_activate, + .error_remove_folio = generic_error_remove_folio, + .swap_activate = xfs_vm_swap_activate, }; const struct address_space_operations xfs_dax_aops = { .writepages = xfs_dax_writepages, - .direct_IO = noop_direct_IO, - .set_page_dirty = noop_set_page_dirty, - .invalidatepage = noop_invalidatepage, - .swap_activate = xfs_iomap_swapfile_activate, + .dirty_folio = noop_dirty_folio, + .swap_activate = xfs_vm_swap_activate, }; |
