diff options
Diffstat (limited to 'fs/xfs/xfs_buf.c')
| -rw-r--r-- | fs/xfs/xfs_buf.c | 2794 |
1 files changed, 1524 insertions, 1270 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 1b2472a46e46..47edf3041631 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1,130 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000-2006 Silicon Graphics, Inc. * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" -#include <linux/stddef.h> -#include <linux/errno.h> -#include <linux/gfp.h> -#include <linux/pagemap.h> -#include <linux/init.h> -#include <linux/vmalloc.h> -#include <linux/bio.h> -#include <linux/sysctl.h> -#include <linux/proc_fs.h> -#include <linux/workqueue.h> -#include <linux/percpu.h> -#include <linux/blkdev.h> -#include <linux/hash.h> -#include <linux/kthread.h> -#include <linux/migrate.h> #include <linux/backing-dev.h> -#include <linux/freezer.h> +#include <linux/dax.h> -#include "xfs_sb.h" -#include "xfs_log.h" -#include "xfs_ag.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_log_format.h" +#include "xfs_trans_resv.h" #include "xfs_mount.h" #include "xfs_trace.h" +#include "xfs_log.h" +#include "xfs_log_recover.h" +#include "xfs_log_priv.h" +#include "xfs_trans.h" +#include "xfs_buf_item.h" +#include "xfs_errortag.h" +#include "xfs_error.h" +#include "xfs_ag.h" +#include "xfs_buf_mem.h" +#include "xfs_notify_failure.h" -static kmem_zone_t *xfs_buf_zone; - -static struct workqueue_struct *xfslogd_workqueue; - -#ifdef XFS_BUF_LOCK_TRACKING -# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid) -# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1) -# define XB_GET_OWNER(bp) ((bp)->b_last_holder) -#else -# define XB_SET_OWNER(bp) do { } while (0) -# define XB_CLEAR_OWNER(bp) do { } while (0) -# define XB_GET_OWNER(bp) do { } while (0) -#endif - -#define xb_to_gfp(flags) \ - ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN) - - -static inline int -xfs_buf_is_vmapped( - struct xfs_buf *bp) -{ - /* - * Return true if the buffer is vmapped. - * - * b_addr is null if the buffer is not mapped, but the code is clever - * enough to know it doesn't have to map a single page, so the check has - * to be both for b_addr and bp->b_page_count > 1. - */ - return bp->b_addr && bp->b_page_count > 1; -} - -static inline int -xfs_buf_vmap_len( - struct xfs_buf *bp) -{ - return (bp->b_page_count * PAGE_SIZE) - bp->b_offset; -} +struct kmem_cache *xfs_buf_cache; /* - * xfs_buf_lru_add - add a buffer to the LRU. + * Locking orders * - * The LRU takes a new reference to the buffer so that it will only be freed - * once the shrinker takes the buffer off the LRU. - */ -STATIC void -xfs_buf_lru_add( - struct xfs_buf *bp) -{ - struct xfs_buftarg *btp = bp->b_target; - - spin_lock(&btp->bt_lru_lock); - if (list_empty(&bp->b_lru)) { - atomic_inc(&bp->b_hold); - list_add_tail(&bp->b_lru, &btp->bt_lru); - btp->bt_lru_nr++; - bp->b_lru_flags &= ~_XBF_LRU_DISPOSE; - } - spin_unlock(&btp->bt_lru_lock); -} - -/* - * xfs_buf_lru_del - remove a buffer from the LRU + * xfs_buf_stale: + * b_sema (caller holds) + * b_lock + * lru_lock + * + * xfs_buf_rele: + * b_lock + * lru_lock * - * The unlocked check is safe here because it only occurs when there are not - * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there - * to optimise the shrinker removing the buffer from the LRU and calling - * xfs_buf_free(). i.e. it removes an unnecessary round trip on the - * bt_lru_lock. + * xfs_buftarg_drain_rele + * lru_lock + * b_lock (trylock due to inversion) + * + * xfs_buftarg_isolate + * lru_lock + * b_lock (trylock due to inversion) */ -STATIC void -xfs_buf_lru_del( - struct xfs_buf *bp) -{ - struct xfs_buftarg *btp = bp->b_target; - if (list_empty(&bp->b_lru)) - return; +static void xfs_buf_submit(struct xfs_buf *bp); +static int xfs_buf_iowait(struct xfs_buf *bp); - spin_lock(&btp->bt_lru_lock); - if (!list_empty(&bp->b_lru)) { - list_del_init(&bp->b_lru); - btp->bt_lru_nr--; - } - spin_unlock(&btp->bt_lru_lock); +static inline bool xfs_buf_is_uncached(struct xfs_buf *bp) +{ + return bp->b_rhash_key == XFS_BUF_DADDR_NULL; } /* @@ -150,452 +78,485 @@ xfs_buf_stale( */ bp->b_flags &= ~_XBF_DELWRI_Q; - atomic_set(&(bp)->b_lru_ref, 0); - if (!list_empty(&bp->b_lru)) { - struct xfs_buftarg *btp = bp->b_target; + spin_lock(&bp->b_lock); + atomic_set(&bp->b_lru_ref, 0); + if (!(bp->b_state & XFS_BSTATE_DISPOSE) && + (list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru))) + bp->b_hold--; - spin_lock(&btp->bt_lru_lock); - if (!list_empty(&bp->b_lru) && - !(bp->b_lru_flags & _XBF_LRU_DISPOSE)) { - list_del_init(&bp->b_lru); - btp->bt_lru_nr--; - atomic_dec(&bp->b_hold); - } - spin_unlock(&btp->bt_lru_lock); - } - ASSERT(atomic_read(&bp->b_hold) >= 1); + ASSERT(bp->b_hold >= 1); + spin_unlock(&bp->b_lock); +} + +static void +xfs_buf_free_callback( + struct callback_head *cb) +{ + struct xfs_buf *bp = container_of(cb, struct xfs_buf, b_rcu); + + if (bp->b_maps != &bp->__b_map) + kfree(bp->b_maps); + kmem_cache_free(xfs_buf_cache, bp); +} + +static void +xfs_buf_free( + struct xfs_buf *bp) +{ + unsigned int size = BBTOB(bp->b_length); + + might_sleep(); + trace_xfs_buf_free(bp, _RET_IP_); + + ASSERT(list_empty(&bp->b_lru)); + + if (!xfs_buftarg_is_mem(bp->b_target) && size >= PAGE_SIZE) + mm_account_reclaimed_pages(howmany(size, PAGE_SHIFT)); + + if (is_vmalloc_addr(bp->b_addr)) + vfree(bp->b_addr); + else if (bp->b_flags & _XBF_KMEM) + kfree(bp->b_addr); + else + folio_put(virt_to_folio(bp->b_addr)); + + call_rcu(&bp->b_rcu, xfs_buf_free_callback); } static int -xfs_buf_get_maps( +xfs_buf_alloc_kmem( struct xfs_buf *bp, - int map_count) + size_t size, + gfp_t gfp_mask) { - ASSERT(bp->b_maps == NULL); - bp->b_map_count = map_count; + ASSERT(is_power_of_2(size)); + ASSERT(size < PAGE_SIZE); - if (map_count == 1) { - bp->b_maps = &bp->__b_map; - return 0; - } + bp->b_addr = kmalloc(size, gfp_mask | __GFP_NOFAIL); + if (!bp->b_addr) + return -ENOMEM; - bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map), - KM_NOFS); - if (!bp->b_maps) - return ENOMEM; + /* + * Slab guarantees that we get back naturally aligned allocations for + * power of two sizes. Keep this check as the canary in the coal mine + * if anything changes in slab. + */ + if (WARN_ON_ONCE(!IS_ALIGNED((unsigned long)bp->b_addr, size))) { + kfree(bp->b_addr); + bp->b_addr = NULL; + return -ENOMEM; + } + bp->b_flags |= _XBF_KMEM; + trace_xfs_buf_backing_kmem(bp, _RET_IP_); return 0; } /* - * Frees b_pages if it was allocated. + * Allocate backing memory for a buffer. + * + * For tmpfs-backed buffers used by in-memory btrees this directly maps the + * tmpfs page cache folios. + * + * For real file system buffers there are three different kinds backing memory: + * + * The first type backs the buffer by a kmalloc allocation. This is done for + * less than PAGE_SIZE allocations to avoid wasting memory. + * + * The second type is a single folio buffer - this may be a high order folio or + * just a single page sized folio, but either way they get treated the same way + * by the rest of the code - the buffer memory spans a single contiguous memory + * region that we don't have to map and unmap to access the data directly. + * + * The third type of buffer is the vmalloc()d buffer. This provides the buffer + * with the required contiguous memory region but backed by discontiguous + * physical pages. */ -static void -xfs_buf_free_maps( - struct xfs_buf *bp) +static int +xfs_buf_alloc_backing_mem( + struct xfs_buf *bp, + xfs_buf_flags_t flags) { - if (bp->b_maps != &bp->__b_map) { - kmem_free(bp->b_maps); - bp->b_maps = NULL; + size_t size = BBTOB(bp->b_length); + gfp_t gfp_mask = GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOWARN; + struct folio *folio; + + if (xfs_buftarg_is_mem(bp->b_target)) + return xmbuf_map_backing_mem(bp); + + /* Assure zeroed buffer for non-read cases. */ + if (!(flags & XBF_READ)) + gfp_mask |= __GFP_ZERO; + + if (flags & XBF_READ_AHEAD) + gfp_mask |= __GFP_NORETRY; + + /* + * For buffers smaller than PAGE_SIZE use a kmalloc allocation if that + * is properly aligned. The slab allocator now guarantees an aligned + * allocation for all power of two sizes, which matches most of the + * smaller than PAGE_SIZE buffers used by XFS. + */ + if (size < PAGE_SIZE && is_power_of_2(size)) + return xfs_buf_alloc_kmem(bp, size, gfp_mask); + + /* + * Don't bother with the retry loop for single PAGE allocations: vmalloc + * won't do any better. + */ + if (size <= PAGE_SIZE) + gfp_mask |= __GFP_NOFAIL; + + /* + * Optimistically attempt a single high order folio allocation for + * larger than PAGE_SIZE buffers. + * + * Allocating a high order folio makes the assumption that buffers are a + * power-of-2 size, matching the power-of-2 folios sizes available. + * + * The exception here are user xattr data buffers, which can be arbitrarily + * sized up to 64kB plus structure metadata, skip straight to the vmalloc + * path for them instead of wasting memory here. + */ + if (size > PAGE_SIZE) { + if (!is_power_of_2(size)) + goto fallback; + gfp_mask &= ~__GFP_DIRECT_RECLAIM; + gfp_mask |= __GFP_NORETRY; + } + folio = folio_alloc(gfp_mask, get_order(size)); + if (!folio) { + if (size <= PAGE_SIZE) + return -ENOMEM; + trace_xfs_buf_backing_fallback(bp, _RET_IP_); + goto fallback; + } + bp->b_addr = folio_address(folio); + trace_xfs_buf_backing_folio(bp, _RET_IP_); + return 0; + +fallback: + for (;;) { + bp->b_addr = __vmalloc(size, gfp_mask); + if (bp->b_addr) + break; + if (flags & XBF_READ_AHEAD) + return -ENOMEM; + XFS_STATS_INC(bp->b_mount, xb_page_retries); + memalloc_retry_wait(gfp_mask); } + + trace_xfs_buf_backing_vmalloc(bp, _RET_IP_); + return 0; } -struct xfs_buf * -_xfs_buf_alloc( +static int +xfs_buf_alloc( struct xfs_buftarg *target, struct xfs_buf_map *map, int nmaps, - xfs_buf_flags_t flags) + xfs_buf_flags_t flags, + struct xfs_buf **bpp) { struct xfs_buf *bp; int error; int i; - bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS); - if (unlikely(!bp)) - return NULL; + *bpp = NULL; + bp = kmem_cache_zalloc(xfs_buf_cache, + GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL); /* * We don't want certain flags to appear in b_flags unless they are * specifically set by later operations on the buffer. */ - flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD); + flags &= ~(XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD); + + /* + * A new buffer is held and locked by the owner. This ensures that the + * buffer is owned by the caller and racing RCU lookups right after + * inserting into the hash table are safe (and will have to wait for + * the unlock to do anything non-trivial). + */ + bp->b_hold = 1; + sema_init(&bp->b_sema, 0); /* held, no waiters */ - atomic_set(&bp->b_hold, 1); + spin_lock_init(&bp->b_lock); atomic_set(&bp->b_lru_ref, 1); init_completion(&bp->b_iowait); INIT_LIST_HEAD(&bp->b_lru); INIT_LIST_HEAD(&bp->b_list); - RB_CLEAR_NODE(&bp->b_rbnode); - sema_init(&bp->b_sema, 0); /* held, no waiters */ - XB_SET_OWNER(bp); + INIT_LIST_HEAD(&bp->b_li_list); bp->b_target = target; + bp->b_mount = target->bt_mount; bp->b_flags = flags; - - /* - * Set length and io_length to the same value initially. - * I/O routines should use io_length, which will be the same in - * most cases but may be reset (e.g. XFS recovery). - */ - error = xfs_buf_get_maps(bp, nmaps); - if (error) { - kmem_zone_free(xfs_buf_zone, bp); - return NULL; - } - - bp->b_bn = map[0].bm_bn; + bp->b_rhash_key = map[0].bm_bn; bp->b_length = 0; + bp->b_map_count = nmaps; + if (nmaps == 1) + bp->b_maps = &bp->__b_map; + else + bp->b_maps = kcalloc(nmaps, sizeof(struct xfs_buf_map), + GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL); for (i = 0; i < nmaps; i++) { bp->b_maps[i].bm_bn = map[i].bm_bn; bp->b_maps[i].bm_len = map[i].bm_len; bp->b_length += map[i].bm_len; } - bp->b_io_length = bp->b_length; atomic_set(&bp->b_pin_count, 0); init_waitqueue_head(&bp->b_waiters); - XFS_STATS_INC(xb_create); + XFS_STATS_INC(bp->b_mount, xb_create); trace_xfs_buf_init(bp, _RET_IP_); - return bp; + error = xfs_buf_alloc_backing_mem(bp, flags); + if (error) { + xfs_buf_free(bp); + return error; + } + + *bpp = bp; + return 0; } /* - * Allocate a page array capable of holding a specified number - * of pages, and point the page buf at it. + * Finding and Reading Buffers */ -STATIC int -_xfs_buf_get_pages( - xfs_buf_t *bp, - int page_count, - xfs_buf_flags_t flags) +static int +_xfs_buf_obj_cmp( + struct rhashtable_compare_arg *arg, + const void *obj) { - /* Make sure that we have a page list */ - if (bp->b_pages == NULL) { - bp->b_page_count = page_count; - if (page_count <= XB_PAGES) { - bp->b_pages = bp->b_page_array; - } else { - bp->b_pages = kmem_alloc(sizeof(struct page *) * - page_count, KM_NOFS); - if (bp->b_pages == NULL) - return -ENOMEM; - } - memset(bp->b_pages, 0, sizeof(struct page *) * page_count); + const struct xfs_buf_map *map = arg->key; + const struct xfs_buf *bp = obj; + + /* + * The key hashing in the lookup path depends on the key being the + * first element of the compare_arg, make sure to assert this. + */ + BUILD_BUG_ON(offsetof(struct xfs_buf_map, bm_bn) != 0); + + if (bp->b_rhash_key != map->bm_bn) + return 1; + + if (unlikely(bp->b_length != map->bm_len)) { + /* + * found a block number match. If the range doesn't + * match, the only way this is allowed is if the buffer + * in the cache is stale and the transaction that made + * it stale has not yet committed. i.e. we are + * reallocating a busy extent. Skip this buffer and + * continue searching for an exact match. + * + * Note: If we're scanning for incore buffers to stale, don't + * complain if we find non-stale buffers. + */ + if (!(map->bm_flags & XBM_LIVESCAN)) + ASSERT(bp->b_flags & XBF_STALE); + return 1; } return 0; } -/* - * Frees b_pages if it was allocated. - */ -STATIC void -_xfs_buf_free_pages( - xfs_buf_t *bp) +static const struct rhashtable_params xfs_buf_hash_params = { + .min_size = 32, /* empty AGs have minimal footprint */ + .nelem_hint = 16, + .key_len = sizeof(xfs_daddr_t), + .key_offset = offsetof(struct xfs_buf, b_rhash_key), + .head_offset = offsetof(struct xfs_buf, b_rhash_head), + .automatic_shrinking = true, + .obj_cmpfn = _xfs_buf_obj_cmp, +}; + +int +xfs_buf_cache_init( + struct xfs_buf_cache *bch) { - if (bp->b_pages != bp->b_page_array) { - kmem_free(bp->b_pages); - bp->b_pages = NULL; - } + return rhashtable_init(&bch->bc_hash, &xfs_buf_hash_params); } -/* - * Releases the specified buffer. - * - * The modification state of any associated pages is left unchanged. - * The buffer most not be on any hash - use xfs_buf_rele instead for - * hashed and refcounted buffers - */ void -xfs_buf_free( - xfs_buf_t *bp) +xfs_buf_cache_destroy( + struct xfs_buf_cache *bch) { - trace_xfs_buf_free(bp, _RET_IP_); - - ASSERT(list_empty(&bp->b_lru)); - - if (bp->b_flags & _XBF_PAGES) { - uint i; - - if (xfs_buf_is_vmapped(bp)) - vm_unmap_ram(bp->b_addr - bp->b_offset, - bp->b_page_count); - - for (i = 0; i < bp->b_page_count; i++) { - struct page *page = bp->b_pages[i]; - - __free_page(page); - } - } else if (bp->b_flags & _XBF_KMEM) - kmem_free(bp->b_addr); - _xfs_buf_free_pages(bp); - xfs_buf_free_maps(bp); - kmem_zone_free(xfs_buf_zone, bp); + rhashtable_destroy(&bch->bc_hash); } -/* - * Allocates all the pages for buffer in question and builds it's page list. - */ -STATIC int -xfs_buf_allocate_memory( - xfs_buf_t *bp, - uint flags) -{ - size_t size; - size_t nbytes, offset; - gfp_t gfp_mask = xb_to_gfp(flags); - unsigned short page_count, i; - xfs_off_t start, end; - int error; +static int +xfs_buf_map_verify( + struct xfs_buftarg *btp, + struct xfs_buf_map *map) +{ + /* Check for IOs smaller than the sector size / not sector aligned */ + ASSERT(!(BBTOB(map->bm_len) < btp->bt_meta_sectorsize)); + ASSERT(!(BBTOB(map->bm_bn) & (xfs_off_t)btp->bt_meta_sectormask)); /* - * for buffers that are contained within a single page, just allocate - * the memory from the heap - there's no need for the complexity of - * page arrays to keep allocation down to order 0. + * Corrupted block numbers can get through to here, unfortunately, so we + * have to check that the buffer falls within the filesystem bounds. */ - size = BBTOB(bp->b_length); - if (size < PAGE_SIZE) { - bp->b_addr = kmem_alloc(size, KM_NOFS); - if (!bp->b_addr) { - /* low memory - use alloc_page loop instead */ - goto use_alloc_page; - } + if (map->bm_bn < 0 || map->bm_bn >= btp->bt_nr_sectors) { + xfs_alert(btp->bt_mount, + "%s: daddr 0x%llx out of range, EOFS 0x%llx", + __func__, map->bm_bn, btp->bt_nr_sectors); + WARN_ON(1); + return -EFSCORRUPTED; + } + return 0; +} - if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) != - ((unsigned long)bp->b_addr & PAGE_MASK)) { - /* b_addr spans two pages - use alloc_page instead */ - kmem_free(bp->b_addr); - bp->b_addr = NULL; - goto use_alloc_page; +static int +xfs_buf_find_lock( + struct xfs_buf *bp, + xfs_buf_flags_t flags) +{ + if (flags & XBF_TRYLOCK) { + if (!xfs_buf_trylock(bp)) { + XFS_STATS_INC(bp->b_mount, xb_busy_locked); + return -EAGAIN; } - bp->b_offset = offset_in_page(bp->b_addr); - bp->b_pages = bp->b_page_array; - bp->b_pages[0] = virt_to_page(bp->b_addr); - bp->b_page_count = 1; - bp->b_flags |= _XBF_KMEM; - return 0; + } else { + xfs_buf_lock(bp); + XFS_STATS_INC(bp->b_mount, xb_get_locked_waited); } -use_alloc_page: - start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT; - end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1) - >> PAGE_SHIFT; - page_count = end - start; - error = _xfs_buf_get_pages(bp, page_count, flags); - if (unlikely(error)) - return error; - - offset = bp->b_offset; - bp->b_flags |= _XBF_PAGES; - - for (i = 0; i < bp->b_page_count; i++) { - struct page *page; - uint retries = 0; -retry: - page = alloc_page(gfp_mask); - if (unlikely(page == NULL)) { - if (flags & XBF_READ_AHEAD) { - bp->b_page_count = i; - error = ENOMEM; - goto out_free_pages; - } - - /* - * This could deadlock. - * - * But until all the XFS lowlevel code is revamped to - * handle buffer allocation failures we can't do much. - */ - if (!(++retries % 100)) - xfs_err(NULL, - "possible memory allocation deadlock in %s (mode:0x%x)", - __func__, gfp_mask); - - XFS_STATS_INC(xb_page_retries); - congestion_wait(BLK_RW_ASYNC, HZ/50); - goto retry; + /* + * if the buffer is stale, clear all the external state associated with + * it. We need to keep flags such as how we allocated the buffer memory + * intact here. + */ + if (bp->b_flags & XBF_STALE) { + if (flags & XBF_LIVESCAN) { + xfs_buf_unlock(bp); + return -ENOENT; } - - XFS_STATS_INC(xb_page_found); - - nbytes = min_t(size_t, size, PAGE_SIZE - offset); - size -= nbytes; - bp->b_pages[i] = page; - offset = 0; + ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); + bp->b_flags &= _XBF_KMEM; + bp->b_ops = NULL; } return 0; +} -out_free_pages: - for (i = 0; i < bp->b_page_count; i++) - __free_page(bp->b_pages[i]); - return error; +static bool +xfs_buf_try_hold( + struct xfs_buf *bp) +{ + spin_lock(&bp->b_lock); + if (bp->b_hold == 0) { + spin_unlock(&bp->b_lock); + return false; + } + bp->b_hold++; + spin_unlock(&bp->b_lock); + return true; } -/* - * Map buffer into kernel address-space if necessary. - */ -STATIC int -_xfs_buf_map_pages( - xfs_buf_t *bp, - uint flags) -{ - ASSERT(bp->b_flags & _XBF_PAGES); - if (bp->b_page_count == 1) { - /* A single page buffer is always mappable */ - bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; - } else if (flags & XBF_UNMAPPED) { - bp->b_addr = NULL; - } else { - int retried = 0; +static inline int +xfs_buf_lookup( + struct xfs_buf_cache *bch, + struct xfs_buf_map *map, + xfs_buf_flags_t flags, + struct xfs_buf **bpp) +{ + struct xfs_buf *bp; + int error; - do { - bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count, - -1, PAGE_KERNEL); - if (bp->b_addr) - break; - vm_unmap_aliases(); - } while (retried++ <= 1); + rcu_read_lock(); + bp = rhashtable_lookup(&bch->bc_hash, map, xfs_buf_hash_params); + if (!bp || !xfs_buf_try_hold(bp)) { + rcu_read_unlock(); + return -ENOENT; + } + rcu_read_unlock(); - if (!bp->b_addr) - return -ENOMEM; - bp->b_addr += bp->b_offset; + error = xfs_buf_find_lock(bp, flags); + if (error) { + xfs_buf_rele(bp); + return error; } + trace_xfs_buf_find(bp, flags, _RET_IP_); + *bpp = bp; return 0; } /* - * Finding and Reading Buffers + * Insert the new_bp into the hash table. This consumes the perag reference + * taken for the lookup regardless of the result of the insert. */ - -/* - * Look up, and creates if absent, a lockable buffer for - * a given range of an inode. The buffer is returned - * locked. No I/O is implied by this call. - */ -xfs_buf_t * -_xfs_buf_find( +static int +xfs_buf_find_insert( struct xfs_buftarg *btp, + struct xfs_buf_cache *bch, + struct xfs_perag *pag, + struct xfs_buf_map *cmap, struct xfs_buf_map *map, int nmaps, xfs_buf_flags_t flags, - xfs_buf_t *new_bp) + struct xfs_buf **bpp) { - size_t numbytes; - struct xfs_perag *pag; - struct rb_node **rbp; - struct rb_node *parent; - xfs_buf_t *bp; - xfs_daddr_t blkno = map[0].bm_bn; - xfs_daddr_t eofs; - int numblks = 0; - int i; - - for (i = 0; i < nmaps; i++) - numblks += map[i].bm_len; - numbytes = BBTOB(numblks); - - /* Check for IOs smaller than the sector size / not sector aligned */ - ASSERT(!(numbytes < (1 << btp->bt_sshift))); - ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_smask)); + struct xfs_buf *new_bp; + struct xfs_buf *bp; + int error; - /* - * Corrupted block numbers can get through to here, unfortunately, so we - * have to check that the buffer falls within the filesystem bounds. - */ - eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks); - if (blkno >= eofs) { - /* - * XXX (dgc): we should really be returning EFSCORRUPTED here, - * but none of the higher level infrastructure supports - * returning a specific error on buffer lookup failures. - */ - xfs_alert(btp->bt_mount, - "%s: Block out of range: block 0x%llx, EOFS 0x%llx ", - __func__, blkno, eofs); - WARN_ON(1); - return NULL; + error = xfs_buf_alloc(btp, map, nmaps, flags, &new_bp); + if (error) + goto out_drop_pag; + + /* The new buffer keeps the perag reference until it is freed. */ + new_bp->b_pag = pag; + + rcu_read_lock(); + bp = rhashtable_lookup_get_insert_fast(&bch->bc_hash, + &new_bp->b_rhash_head, xfs_buf_hash_params); + if (IS_ERR(bp)) { + rcu_read_unlock(); + error = PTR_ERR(bp); + goto out_free_buf; } - - /* get tree root */ - pag = xfs_perag_get(btp->bt_mount, - xfs_daddr_to_agno(btp->bt_mount, blkno)); - - /* walk tree */ - spin_lock(&pag->pag_buf_lock); - rbp = &pag->pag_buf_tree.rb_node; - parent = NULL; - bp = NULL; - while (*rbp) { - parent = *rbp; - bp = rb_entry(parent, struct xfs_buf, b_rbnode); - - if (blkno < bp->b_bn) - rbp = &(*rbp)->rb_left; - else if (blkno > bp->b_bn) - rbp = &(*rbp)->rb_right; - else { - /* - * found a block number match. If the range doesn't - * match, the only way this is allowed is if the buffer - * in the cache is stale and the transaction that made - * it stale has not yet committed. i.e. we are - * reallocating a busy extent. Skip this buffer and - * continue searching to the right for an exact match. - */ - if (bp->b_length != numblks) { - ASSERT(bp->b_flags & XBF_STALE); - rbp = &(*rbp)->rb_right; - continue; - } - atomic_inc(&bp->b_hold); - goto found; - } + if (bp && xfs_buf_try_hold(bp)) { + /* found an existing buffer */ + rcu_read_unlock(); + error = xfs_buf_find_lock(bp, flags); + if (error) + xfs_buf_rele(bp); + else + *bpp = bp; + goto out_free_buf; } + rcu_read_unlock(); - /* No match found */ - if (new_bp) { - rb_link_node(&new_bp->b_rbnode, parent, rbp); - rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree); - /* the buffer keeps the perag reference until it is freed */ - new_bp->b_pag = pag; - spin_unlock(&pag->pag_buf_lock); - } else { - XFS_STATS_INC(xb_miss_locked); - spin_unlock(&pag->pag_buf_lock); - xfs_perag_put(pag); - } - return new_bp; + *bpp = new_bp; + return 0; -found: - spin_unlock(&pag->pag_buf_lock); - xfs_perag_put(pag); +out_free_buf: + xfs_buf_free(new_bp); +out_drop_pag: + if (pag) + xfs_perag_put(pag); + return error; +} - if (!xfs_buf_trylock(bp)) { - if (flags & XBF_TRYLOCK) { - xfs_buf_rele(bp); - XFS_STATS_INC(xb_busy_locked); - return NULL; - } - xfs_buf_lock(bp); - XFS_STATS_INC(xb_get_locked_waited); - } +static inline struct xfs_perag * +xfs_buftarg_get_pag( + struct xfs_buftarg *btp, + const struct xfs_buf_map *map) +{ + struct xfs_mount *mp = btp->bt_mount; - /* - * if the buffer is stale, clear all the external state associated with - * it. We need to keep flags such as how we allocated the buffer memory - * intact here. - */ - if (bp->b_flags & XBF_STALE) { - ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); - ASSERT(bp->b_iodone == NULL); - bp->b_flags &= _XBF_KMEM | _XBF_PAGES; - bp->b_ops = NULL; - } + if (xfs_buftarg_is_mem(btp)) + return NULL; + return xfs_perag_get(mp, xfs_daddr_to_agno(mp, map->bm_bn)); +} - trace_xfs_buf_find(bp, flags, _RET_IP_); - XFS_STATS_INC(xb_get_locked); - return bp; +static inline struct xfs_buf_cache * +xfs_buftarg_buf_cache( + struct xfs_buftarg *btp, + struct xfs_perag *pag) +{ + if (pag) + return &pag->pag_bcache; + return btp->bt_cache; } /* @@ -603,107 +564,191 @@ found: * cache hits, as metadata intensive workloads will see 3 orders of magnitude * more hits than misses. */ -struct xfs_buf * +int xfs_buf_get_map( - struct xfs_buftarg *target, + struct xfs_buftarg *btp, struct xfs_buf_map *map, int nmaps, - xfs_buf_flags_t flags) + xfs_buf_flags_t flags, + struct xfs_buf **bpp) { - struct xfs_buf *bp; - struct xfs_buf *new_bp; - int error = 0; + struct xfs_buf_cache *bch; + struct xfs_perag *pag; + struct xfs_buf *bp = NULL; + struct xfs_buf_map cmap = { .bm_bn = map[0].bm_bn }; + int error; + int i; + + if (flags & XBF_LIVESCAN) + cmap.bm_flags |= XBM_LIVESCAN; + for (i = 0; i < nmaps; i++) + cmap.bm_len += map[i].bm_len; - bp = _xfs_buf_find(target, map, nmaps, flags, NULL); - if (likely(bp)) - goto found; + error = xfs_buf_map_verify(btp, &cmap); + if (error) + return error; - new_bp = _xfs_buf_alloc(target, map, nmaps, flags); - if (unlikely(!new_bp)) - return NULL; + pag = xfs_buftarg_get_pag(btp, &cmap); + bch = xfs_buftarg_buf_cache(btp, pag); - error = xfs_buf_allocate_memory(new_bp, flags); - if (error) { - xfs_buf_free(new_bp); - return NULL; - } + error = xfs_buf_lookup(bch, &cmap, flags, &bp); + if (error && error != -ENOENT) + goto out_put_perag; - bp = _xfs_buf_find(target, map, nmaps, flags, new_bp); - if (!bp) { - xfs_buf_free(new_bp); - return NULL; - } + /* cache hits always outnumber misses by at least 10:1 */ + if (unlikely(!bp)) { + XFS_STATS_INC(btp->bt_mount, xb_miss_locked); - if (bp != new_bp) - xfs_buf_free(new_bp); - -found: - if (!bp->b_addr) { - error = _xfs_buf_map_pages(bp, flags); - if (unlikely(error)) { - xfs_warn(target->bt_mount, - "%s: failed to map pages\n", __func__); - xfs_buf_relse(bp); - return NULL; - } + if (flags & XBF_INCORE) + goto out_put_perag; + + /* xfs_buf_find_insert() consumes the perag reference. */ + error = xfs_buf_find_insert(btp, bch, pag, &cmap, map, nmaps, + flags, &bp); + if (error) + return error; + } else { + XFS_STATS_INC(btp->bt_mount, xb_get_locked); + if (pag) + xfs_perag_put(pag); } - XFS_STATS_INC(xb_get); + /* + * Clear b_error if this is a lookup from a caller that doesn't expect + * valid data to be found in the buffer. + */ + if (!(flags & XBF_READ)) + xfs_buf_ioerror(bp, 0); + + XFS_STATS_INC(btp->bt_mount, xb_get); trace_xfs_buf_get(bp, flags, _RET_IP_); - return bp; + *bpp = bp; + return 0; + +out_put_perag: + if (pag) + xfs_perag_put(pag); + return error; } -STATIC int +int _xfs_buf_read( - xfs_buf_t *bp, - xfs_buf_flags_t flags) + struct xfs_buf *bp) { - ASSERT(!(flags & XBF_WRITE)); ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL); - bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD); - bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); + bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD | XBF_DONE); + bp->b_flags |= XBF_READ; + xfs_buf_submit(bp); + return xfs_buf_iowait(bp); +} + +/* + * Reverify a buffer found in cache without an attached ->b_ops. + * + * If the caller passed an ops structure and the buffer doesn't have ops + * assigned, set the ops and use it to verify the contents. If verification + * fails, clear XBF_DONE. We assume the buffer has no recorded errors and is + * already in XBF_DONE state on entry. + * + * Under normal operations, every in-core buffer is verified on read I/O + * completion. There are two scenarios that can lead to in-core buffers without + * an assigned ->b_ops. The first is during log recovery of buffers on a V4 + * filesystem, though these buffers are purged at the end of recovery. The + * other is online repair, which intentionally reads with a NULL buffer ops to + * run several verifiers across an in-core buffer in order to establish buffer + * type. If repair can't establish that, the buffer will be left in memory + * with NULL buffer ops. + */ +int +xfs_buf_reverify( + struct xfs_buf *bp, + const struct xfs_buf_ops *ops) +{ + ASSERT(bp->b_flags & XBF_DONE); + ASSERT(bp->b_error == 0); - xfs_buf_iorequest(bp); - if (flags & XBF_ASYNC) + if (!ops || bp->b_ops) return 0; - return xfs_buf_iowait(bp); + + bp->b_ops = ops; + bp->b_ops->verify_read(bp); + if (bp->b_error) + bp->b_flags &= ~XBF_DONE; + return bp->b_error; } -xfs_buf_t * +int xfs_buf_read_map( struct xfs_buftarg *target, struct xfs_buf_map *map, int nmaps, xfs_buf_flags_t flags, - const struct xfs_buf_ops *ops) + struct xfs_buf **bpp, + const struct xfs_buf_ops *ops, + xfs_failaddr_t fa) { struct xfs_buf *bp; + int error; + + ASSERT(!(flags & (XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD))); flags |= XBF_READ; + *bpp = NULL; - bp = xfs_buf_get_map(target, map, nmaps, flags); - if (bp) { - trace_xfs_buf_read(bp, flags, _RET_IP_); - - if (!XFS_BUF_ISDONE(bp)) { - XFS_STATS_INC(xb_get_read); - bp->b_ops = ops; - _xfs_buf_read(bp, flags); - } else if (flags & XBF_ASYNC) { - /* - * Read ahead call which is already satisfied, - * drop the buffer - */ - xfs_buf_relse(bp); - return NULL; - } else { - /* We do not want read in the flags */ - bp->b_flags &= ~XBF_READ; - } + error = xfs_buf_get_map(target, map, nmaps, flags, &bp); + if (error) + return error; + + trace_xfs_buf_read(bp, flags, _RET_IP_); + + if (!(bp->b_flags & XBF_DONE)) { + /* Initiate the buffer read and wait. */ + XFS_STATS_INC(target->bt_mount, xb_get_read); + bp->b_ops = ops; + error = _xfs_buf_read(bp); + } else { + /* Buffer already read; all we need to do is check it. */ + error = xfs_buf_reverify(bp, ops); + + /* We do not want read in the flags */ + bp->b_flags &= ~XBF_READ; + ASSERT(bp->b_ops != NULL || ops == NULL); } - return bp; + /* + * If we've had a read error, then the contents of the buffer are + * invalid and should not be used. To ensure that a followup read tries + * to pull the buffer from disk again, we clear the XBF_DONE flag and + * mark the buffer stale. This ensures that anyone who has a current + * reference to the buffer will interpret it's contents correctly and + * future cache lookups will also treat it as an empty, uninitialised + * buffer. + */ + if (error) { + /* + * Check against log shutdown for error reporting because + * metadata writeback may require a read first and we need to + * report errors in metadata writeback until the log is shut + * down. High level transaction read functions already check + * against mount shutdown, anyway, so we only need to be + * concerned about low level IO interactions here. + */ + if (!xlog_is_shutdown(target->bt_mount->m_log)) + xfs_buf_ioerror_alert(bp, fa); + + bp->b_flags &= ~XBF_DONE; + xfs_buf_stale(bp); + xfs_buf_relse(bp); + + /* bad CRC means corrupted metadata */ + if (error == -EFSBADCRC) + error = -EFSCORRUPTED; + return error; + } + + *bpp = bp; + return 0; } /* @@ -717,166 +762,87 @@ xfs_buf_readahead_map( int nmaps, const struct xfs_buf_ops *ops) { - if (bdi_read_congested(target->bt_bdi)) + const xfs_buf_flags_t flags = XBF_READ | XBF_ASYNC | XBF_READ_AHEAD; + struct xfs_buf *bp; + + /* + * Currently we don't have a good means or justification for performing + * xmbuf_map_page asynchronously, so we don't do readahead. + */ + if (xfs_buftarg_is_mem(target)) return; - xfs_buf_read_map(target, map, nmaps, - XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops); + if (xfs_buf_get_map(target, map, nmaps, flags | XBF_TRYLOCK, &bp)) + return; + trace_xfs_buf_readahead(bp, 0, _RET_IP_); + + if (bp->b_flags & XBF_DONE) { + xfs_buf_reverify(bp, ops); + xfs_buf_relse(bp); + return; + } + XFS_STATS_INC(target->bt_mount, xb_get_read); + bp->b_ops = ops; + bp->b_flags &= ~(XBF_WRITE | XBF_DONE); + bp->b_flags |= flags; + percpu_counter_inc(&target->bt_readahead_count); + xfs_buf_submit(bp); } /* * Read an uncached buffer from disk. Allocates and returns a locked - * buffer containing the disk contents or nothing. + * buffer containing the disk contents or nothing. Uncached buffers always have + * a cache index of XFS_BUF_DADDR_NULL so we can easily determine if the buffer + * is cached or uncached during fault diagnosis. */ -struct xfs_buf * +int xfs_buf_read_uncached( struct xfs_buftarg *target, xfs_daddr_t daddr, size_t numblks, - int flags, + struct xfs_buf **bpp, const struct xfs_buf_ops *ops) { struct xfs_buf *bp; + int error; - bp = xfs_buf_get_uncached(target, numblks, flags); - if (!bp) - return NULL; + *bpp = NULL; + + error = xfs_buf_get_uncached(target, numblks, &bp); + if (error) + return error; /* set up the buffer for a read IO */ ASSERT(bp->b_map_count == 1); - bp->b_bn = daddr; + bp->b_rhash_key = XFS_BUF_DADDR_NULL; bp->b_maps[0].bm_bn = daddr; bp->b_flags |= XBF_READ; bp->b_ops = ops; - xfsbdstrat(target->bt_mount, bp); - xfs_buf_iowait(bp); - return bp; -} - -/* - * Return a buffer allocated as an empty buffer and associated to external - * memory via xfs_buf_associate_memory() back to it's empty state. - */ -void -xfs_buf_set_empty( - struct xfs_buf *bp, - size_t numblks) -{ - if (bp->b_pages) - _xfs_buf_free_pages(bp); - - bp->b_pages = NULL; - bp->b_page_count = 0; - bp->b_addr = NULL; - bp->b_length = numblks; - bp->b_io_length = numblks; - - ASSERT(bp->b_map_count == 1); - bp->b_bn = XFS_BUF_DADDR_NULL; - bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL; - bp->b_maps[0].bm_len = bp->b_length; -} - -static inline struct page * -mem_to_page( - void *addr) -{ - if ((!is_vmalloc_addr(addr))) { - return virt_to_page(addr); - } else { - return vmalloc_to_page(addr); - } -} - -int -xfs_buf_associate_memory( - xfs_buf_t *bp, - void *mem, - size_t len) -{ - int rval; - int i = 0; - unsigned long pageaddr; - unsigned long offset; - size_t buflen; - int page_count; - - pageaddr = (unsigned long)mem & PAGE_MASK; - offset = (unsigned long)mem - pageaddr; - buflen = PAGE_ALIGN(len + offset); - page_count = buflen >> PAGE_SHIFT; - - /* Free any previous set of page pointers */ - if (bp->b_pages) - _xfs_buf_free_pages(bp); - - bp->b_pages = NULL; - bp->b_addr = mem; - - rval = _xfs_buf_get_pages(bp, page_count, 0); - if (rval) - return rval; - - bp->b_offset = offset; - - for (i = 0; i < bp->b_page_count; i++) { - bp->b_pages[i] = mem_to_page((void *)pageaddr); - pageaddr += PAGE_SIZE; + xfs_buf_submit(bp); + error = xfs_buf_iowait(bp); + if (error) { + xfs_buf_relse(bp); + return error; } - bp->b_io_length = BTOBB(len); - bp->b_length = BTOBB(buflen); - + *bpp = bp; return 0; } -xfs_buf_t * +int xfs_buf_get_uncached( struct xfs_buftarg *target, size_t numblks, - int flags) + struct xfs_buf **bpp) { - unsigned long page_count; - int error, i; - struct xfs_buf *bp; + int error; DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks); - bp = _xfs_buf_alloc(target, &map, 1, 0); - if (unlikely(bp == NULL)) - goto fail; - - page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT; - error = _xfs_buf_get_pages(bp, page_count, 0); - if (error) - goto fail_free_buf; - - for (i = 0; i < page_count; i++) { - bp->b_pages[i] = alloc_page(xb_to_gfp(flags)); - if (!bp->b_pages[i]) - goto fail_free_mem; - } - bp->b_flags |= _XBF_PAGES; - - error = _xfs_buf_map_pages(bp, 0); - if (unlikely(error)) { - xfs_warn(target->bt_mount, - "%s: failed to map pages\n", __func__); - goto fail_free_mem; - } - - trace_xfs_buf_get_uncached(bp, _RET_IP_); - return bp; - - fail_free_mem: - while (--i >= 0) - __free_page(bp->b_pages[i]); - _xfs_buf_free_pages(bp); - fail_free_buf: - xfs_buf_free_maps(bp); - kmem_zone_free(xfs_buf_zone, bp); - fail: - return NULL; + error = xfs_buf_alloc(target, &map, 1, 0, bpp); + if (!error) + trace_xfs_buf_get_uncached(*bpp, _RET_IP_); + return error; } /* @@ -886,51 +852,101 @@ xfs_buf_get_uncached( */ void xfs_buf_hold( - xfs_buf_t *bp) + struct xfs_buf *bp) { trace_xfs_buf_hold(bp, _RET_IP_); - atomic_inc(&bp->b_hold); + + spin_lock(&bp->b_lock); + bp->b_hold++; + spin_unlock(&bp->b_lock); } -/* - * Releases a hold on the specified buffer. If the - * the hold count is 1, calls xfs_buf_free. - */ -void -xfs_buf_rele( - xfs_buf_t *bp) +static void +xfs_buf_rele_uncached( + struct xfs_buf *bp) +{ + ASSERT(list_empty(&bp->b_lru)); + + spin_lock(&bp->b_lock); + if (--bp->b_hold) { + spin_unlock(&bp->b_lock); + return; + } + spin_unlock(&bp->b_lock); + xfs_buf_free(bp); +} + +static void +xfs_buf_rele_cached( + struct xfs_buf *bp) { + struct xfs_buftarg *btp = bp->b_target; struct xfs_perag *pag = bp->b_pag; + struct xfs_buf_cache *bch = xfs_buftarg_buf_cache(btp, pag); + bool freebuf = false; trace_xfs_buf_rele(bp, _RET_IP_); - if (!pag) { - ASSERT(list_empty(&bp->b_lru)); - ASSERT(RB_EMPTY_NODE(&bp->b_rbnode)); - if (atomic_dec_and_test(&bp->b_hold)) - xfs_buf_free(bp); - return; + spin_lock(&bp->b_lock); + ASSERT(bp->b_hold >= 1); + if (bp->b_hold > 1) { + bp->b_hold--; + goto out_unlock; } - ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode)); - - ASSERT(atomic_read(&bp->b_hold) > 0); - if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) { - if (!(bp->b_flags & XBF_STALE) && - atomic_read(&bp->b_lru_ref)) { - xfs_buf_lru_add(bp); - spin_unlock(&pag->pag_buf_lock); + /* we are asked to drop the last reference */ + if (atomic_read(&bp->b_lru_ref)) { + /* + * If the buffer is added to the LRU, keep the reference to the + * buffer for the LRU and clear the (now stale) dispose list + * state flag, else drop the reference. + */ + if (list_lru_add_obj(&btp->bt_lru, &bp->b_lru)) + bp->b_state &= ~XFS_BSTATE_DISPOSE; + else + bp->b_hold--; + } else { + bp->b_hold--; + /* + * most of the time buffers will already be removed from the + * LRU, so optimise that case by checking for the + * XFS_BSTATE_DISPOSE flag indicating the last list the buffer + * was on was the disposal list + */ + if (!(bp->b_state & XFS_BSTATE_DISPOSE)) { + list_lru_del_obj(&btp->bt_lru, &bp->b_lru); } else { - xfs_buf_lru_del(bp); - ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); - rb_erase(&bp->b_rbnode, &pag->pag_buf_tree); - spin_unlock(&pag->pag_buf_lock); - xfs_perag_put(pag); - xfs_buf_free(bp); + ASSERT(list_empty(&bp->b_lru)); } + + ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); + rhashtable_remove_fast(&bch->bc_hash, &bp->b_rhash_head, + xfs_buf_hash_params); + if (pag) + xfs_perag_put(pag); + freebuf = true; } + +out_unlock: + spin_unlock(&bp->b_lock); + + if (freebuf) + xfs_buf_free(bp); } +/* + * Release a hold on the specified buffer. + */ +void +xfs_buf_rele( + struct xfs_buf *bp) +{ + trace_xfs_buf_rele(bp, _RET_IP_); + if (xfs_buf_is_uncached(bp)) + xfs_buf_rele_uncached(bp); + else + xfs_buf_rele_cached(bp); +} /* * Lock a buffer object, if it is not already locked. @@ -951,9 +967,9 @@ xfs_buf_trylock( locked = down_trylock(&bp->b_sema) == 0; if (locked) - XB_SET_OWNER(bp); - - trace_xfs_buf_trylock(bp, _RET_IP_); + trace_xfs_buf_trylock(bp, _RET_IP_); + else + trace_xfs_buf_trylock_fail(bp, _RET_IP_); return locked; } @@ -973,9 +989,8 @@ xfs_buf_lock( trace_xfs_buf_lock(bp, _RET_IP_); if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) - xfs_log_force(bp->b_target->bt_mount, 0); + xfs_log_force(bp->b_mount, 0); down(&bp->b_sema); - XB_SET_OWNER(bp); trace_xfs_buf_lock_done(bp, _RET_IP_); } @@ -984,15 +999,15 @@ void xfs_buf_unlock( struct xfs_buf *bp) { - XB_CLEAR_OWNER(bp); - up(&bp->b_sema); + ASSERT(xfs_buf_islocked(bp)); + up(&bp->b_sema); trace_xfs_buf_unlock(bp, _RET_IP_); } STATIC void xfs_buf_wait_unpin( - xfs_buf_t *bp) + struct xfs_buf *bp) { DECLARE_WAITQUEUE (wait, current); @@ -1010,486 +1025,472 @@ xfs_buf_wait_unpin( set_current_state(TASK_RUNNING); } -/* - * Buffer Utility Routines - */ - -STATIC void -xfs_buf_iodone_work( - struct work_struct *work) +static void +xfs_buf_ioerror_alert_ratelimited( + struct xfs_buf *bp) { - struct xfs_buf *bp = - container_of(work, xfs_buf_t, b_iodone_work); - bool read = !!(bp->b_flags & XBF_READ); - - bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); - - /* only validate buffers that were read without errors */ - if (read && bp->b_ops && !bp->b_error && (bp->b_flags & XBF_DONE)) - bp->b_ops->verify_read(bp); + static unsigned long lasttime; + static struct xfs_buftarg *lasttarg; - if (bp->b_iodone) - (*(bp->b_iodone))(bp); - else if (bp->b_flags & XBF_ASYNC) - xfs_buf_relse(bp); - else { - ASSERT(read && bp->b_ops); - complete(&bp->b_iowait); + if (bp->b_target != lasttarg || + time_after(jiffies, (lasttime + 5*HZ))) { + lasttime = jiffies; + xfs_buf_ioerror_alert(bp, __this_address); } + lasttarg = bp->b_target; } -void -xfs_buf_ioend( - struct xfs_buf *bp, - int schedule) +/* + * Account for this latest trip around the retry handler, and decide if + * we've failed enough times to constitute a permanent failure. + */ +static bool +xfs_buf_ioerror_permanent( + struct xfs_buf *bp, + struct xfs_error_cfg *cfg) { - bool read = !!(bp->b_flags & XBF_READ); - - trace_xfs_buf_iodone(bp, _RET_IP_); - - if (bp->b_error == 0) - bp->b_flags |= XBF_DONE; + struct xfs_mount *mp = bp->b_mount; - if (bp->b_iodone || (read && bp->b_ops) || (bp->b_flags & XBF_ASYNC)) { - if (schedule) { - INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work); - queue_work(xfslogd_workqueue, &bp->b_iodone_work); - } else { - xfs_buf_iodone_work(&bp->b_iodone_work); - } - } else { - bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD); - complete(&bp->b_iowait); - } -} + if (cfg->max_retries != XFS_ERR_RETRY_FOREVER && + ++bp->b_retries > cfg->max_retries) + return true; + if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER && + time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time)) + return true; -void -xfs_buf_ioerror( - xfs_buf_t *bp, - int error) -{ - ASSERT(error >= 0 && error <= 0xffff); - bp->b_error = (unsigned short)error; - trace_xfs_buf_ioerror(bp, error, _RET_IP_); -} + /* At unmount we may treat errors differently */ + if (xfs_is_unmounting(mp) && mp->m_fail_unmount) + return true; -void -xfs_buf_ioerror_alert( - struct xfs_buf *bp, - const char *func) -{ - xfs_alert(bp->b_target->bt_mount, -"metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d", - (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length); + return false; } /* - * Called when we want to stop a buffer from getting written or read. - * We attach the EIO error, muck with its flags, and call xfs_buf_ioend - * so that the proper iodone callbacks get called. + * On a sync write or shutdown we just want to stale the buffer and let the + * caller handle the error in bp->b_error appropriately. + * + * If the write was asynchronous then no one will be looking for the error. If + * this is the first failure of this type, clear the error state and write the + * buffer out again. This means we always retry an async write failure at least + * once, but we also need to set the buffer up to behave correctly now for + * repeated failures. + * + * If we get repeated async write failures, then we take action according to the + * error configuration we have been set up to use. + * + * Returns true if this function took care of error handling and the caller must + * not touch the buffer again. Return false if the caller should proceed with + * normal I/O completion handling. */ -STATIC int -xfs_bioerror( - xfs_buf_t *bp) +static bool +xfs_buf_ioend_handle_error( + struct xfs_buf *bp) { -#ifdef XFSERRORDEBUG - ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone); -#endif + struct xfs_mount *mp = bp->b_mount; + struct xfs_error_cfg *cfg; + struct xfs_log_item *lip; /* - * No need to wait until the buffer is unpinned, we aren't flushing it. + * If we've already shutdown the journal because of I/O errors, there's + * no point in giving this a retry. */ - xfs_buf_ioerror(bp, EIO); + if (xlog_is_shutdown(mp->m_log)) + goto out_stale; + + xfs_buf_ioerror_alert_ratelimited(bp); /* - * We're calling xfs_buf_ioend, so delete XBF_DONE flag. + * We're not going to bother about retrying this during recovery. + * One strike! */ - XFS_BUF_UNREAD(bp); - XFS_BUF_UNDONE(bp); - xfs_buf_stale(bp); - - xfs_buf_ioend(bp, 0); + if (bp->b_flags & _XBF_LOGRECOVERY) { + xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); + return false; + } - return EIO; -} + /* + * Synchronous writes will have callers process the error. + */ + if (!(bp->b_flags & XBF_ASYNC)) + goto out_stale; + + trace_xfs_buf_iodone_async(bp, _RET_IP_); + + cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error); + if (bp->b_last_error != bp->b_error || + !(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL))) { + bp->b_last_error = bp->b_error; + if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER && + !bp->b_first_retry_time) + bp->b_first_retry_time = jiffies; + goto resubmit; + } -/* - * Same as xfs_bioerror, except that we are releasing the buffer - * here ourselves, and avoiding the xfs_buf_ioend call. - * This is meant for userdata errors; metadata bufs come with - * iodone functions attached, so that we can track down errors. - */ -STATIC int -xfs_bioerror_relse( - struct xfs_buf *bp) -{ - int64_t fl = bp->b_flags; /* - * No need to wait until the buffer is unpinned. - * We aren't flushing it. - * - * chunkhold expects B_DONE to be set, whether - * we actually finish the I/O or not. We don't want to - * change that interface. + * Permanent error - we need to trigger a shutdown if we haven't already + * to indicate that inconsistency will result from this action. */ - XFS_BUF_UNREAD(bp); - XFS_BUF_DONE(bp); - xfs_buf_stale(bp); - bp->b_iodone = NULL; - if (!(fl & XBF_ASYNC)) { - /* - * Mark b_error and B_ERROR _both_. - * Lot's of chunkcache code assumes that. - * There's no reason to mark error for - * ASYNC buffers. - */ - xfs_buf_ioerror(bp, EIO); - complete(&bp->b_iowait); - } else { - xfs_buf_relse(bp); + if (xfs_buf_ioerror_permanent(bp, cfg)) { + xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); + goto out_stale; } - return EIO; + /* Still considered a transient error. Caller will schedule retries. */ + list_for_each_entry(lip, &bp->b_li_list, li_bio_list) { + set_bit(XFS_LI_FAILED, &lip->li_flags); + clear_bit(XFS_LI_FLUSHING, &lip->li_flags); + } + + xfs_buf_ioerror(bp, 0); + xfs_buf_relse(bp); + return true; + +resubmit: + xfs_buf_ioerror(bp, 0); + bp->b_flags |= (XBF_DONE | XBF_WRITE_FAIL); + reinit_completion(&bp->b_iowait); + xfs_buf_submit(bp); + return true; +out_stale: + xfs_buf_stale(bp); + bp->b_flags |= XBF_DONE; + bp->b_flags &= ~XBF_WRITE; + trace_xfs_buf_error_relse(bp, _RET_IP_); + return false; } -STATIC int -xfs_bdstrat_cb( +/* returns false if the caller needs to resubmit the I/O, else true */ +static bool +__xfs_buf_ioend( struct xfs_buf *bp) { - if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { - trace_xfs_bdstrat_shut(bp, _RET_IP_); + trace_xfs_buf_iodone(bp, _RET_IP_); + + if (bp->b_flags & XBF_READ) { + if (!bp->b_error && is_vmalloc_addr(bp->b_addr)) + invalidate_kernel_vmap_range(bp->b_addr, + roundup(BBTOB(bp->b_length), PAGE_SIZE)); + if (!bp->b_error && bp->b_ops) + bp->b_ops->verify_read(bp); + if (!bp->b_error) + bp->b_flags |= XBF_DONE; + if (bp->b_flags & XBF_READ_AHEAD) + percpu_counter_dec(&bp->b_target->bt_readahead_count); + } else { + if (!bp->b_error) { + bp->b_flags &= ~XBF_WRITE_FAIL; + bp->b_flags |= XBF_DONE; + } + + if (unlikely(bp->b_error) && xfs_buf_ioend_handle_error(bp)) + return false; + + /* clear the retry state */ + bp->b_last_error = 0; + bp->b_retries = 0; + bp->b_first_retry_time = 0; + /* - * Metadata write that didn't get logged but - * written delayed anyway. These aren't associated - * with a transaction, and can be ignored. + * Note that for things like remote attribute buffers, there may + * not be a buffer log item here, so processing the buffer log + * item must remain optional. */ - if (!bp->b_iodone && !XFS_BUF_ISREAD(bp)) - return xfs_bioerror_relse(bp); - else - return xfs_bioerror(bp); + if (bp->b_log_item) + xfs_buf_item_done(bp); + + if (bp->b_iodone) + bp->b_iodone(bp); } - xfs_buf_iorequest(bp); - return 0; + bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD | + _XBF_LOGRECOVERY); + return true; } -int -xfs_bwrite( - struct xfs_buf *bp) +static void +xfs_buf_ioend( + struct xfs_buf *bp) { - int error; - - ASSERT(xfs_buf_islocked(bp)); - - bp->b_flags |= XBF_WRITE; - bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q); + if (!__xfs_buf_ioend(bp)) + return; + if (bp->b_flags & XBF_ASYNC) + xfs_buf_relse(bp); + else + complete(&bp->b_iowait); +} - xfs_bdstrat_cb(bp); +static void +xfs_buf_ioend_work( + struct work_struct *work) +{ + struct xfs_buf *bp = + container_of(work, struct xfs_buf, b_ioend_work); - error = xfs_buf_iowait(bp); - if (error) { - xfs_force_shutdown(bp->b_target->bt_mount, - SHUTDOWN_META_IO_ERROR); - } - return error; + if (__xfs_buf_ioend(bp)) + xfs_buf_relse(bp); } -/* - * Wrapper around bdstrat so that we can stop data from going to disk in case - * we are shutting down the filesystem. Typically user data goes thru this - * path; one of the exceptions is the superblock. - */ void -xfsbdstrat( - struct xfs_mount *mp, - struct xfs_buf *bp) +__xfs_buf_ioerror( + struct xfs_buf *bp, + int error, + xfs_failaddr_t failaddr) { - if (XFS_FORCED_SHUTDOWN(mp)) { - trace_xfs_bdstrat_shut(bp, _RET_IP_); - xfs_bioerror_relse(bp); - return; - } + ASSERT(error <= 0 && error >= -1000); + bp->b_error = error; + trace_xfs_buf_ioerror(bp, error, failaddr); +} - xfs_buf_iorequest(bp); +void +xfs_buf_ioerror_alert( + struct xfs_buf *bp, + xfs_failaddr_t func) +{ + xfs_buf_alert_ratelimited(bp, "XFS: metadata IO error", + "metadata I/O error in \"%pS\" at daddr 0x%llx len %d error %d", + func, (uint64_t)xfs_buf_daddr(bp), + bp->b_length, -bp->b_error); } -STATIC void -_xfs_buf_ioend( - xfs_buf_t *bp, - int schedule) +/* + * To simulate an I/O failure, the buffer must be locked and held with at least + * three references. The LRU reference is dropped by the stale call. The buf + * item reference is dropped via ioend processing. The third reference is owned + * by the caller and is dropped on I/O completion if the buffer is XBF_ASYNC. + */ +void +xfs_buf_ioend_fail( + struct xfs_buf *bp) { - if (atomic_dec_and_test(&bp->b_io_remaining) == 1) - xfs_buf_ioend(bp, schedule); + bp->b_flags &= ~XBF_DONE; + xfs_buf_stale(bp); + xfs_buf_ioerror(bp, -EIO); + xfs_buf_ioend(bp); } -STATIC void -xfs_buf_bio_end_io( - struct bio *bio, - int error) +int +xfs_bwrite( + struct xfs_buf *bp) { - xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; + int error; - /* - * don't overwrite existing errors - otherwise we can lose errors on - * buffers that require multiple bios to complete. - */ - if (!bp->b_error) - xfs_buf_ioerror(bp, -error); + ASSERT(xfs_buf_islocked(bp)); - if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) - invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); + bp->b_flags |= XBF_WRITE; + bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | + XBF_DONE); - _xfs_buf_ioend(bp, 1); - bio_put(bio); + xfs_buf_submit(bp); + error = xfs_buf_iowait(bp); + if (error) + xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR); + return error; } static void -xfs_buf_ioapply_map( - struct xfs_buf *bp, - int map, - int *buf_offset, - int *count, - int rw) -{ - int page_index; - int total_nr_pages = bp->b_page_count; - int nr_pages; - struct bio *bio; - sector_t sector = bp->b_maps[map].bm_bn; - int size; - int offset; - - total_nr_pages = bp->b_page_count; - - /* skip the pages in the buffer before the start offset */ - page_index = 0; - offset = *buf_offset; - while (offset >= PAGE_SIZE) { - page_index++; - offset -= PAGE_SIZE; - } - - /* - * Limit the IO size to the length of the current vector, and update the - * remaining IO count for the next time around. - */ - size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count); - *count -= size; - *buf_offset += size; - -next_chunk: - atomic_inc(&bp->b_io_remaining); - nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); - if (nr_pages > total_nr_pages) - nr_pages = total_nr_pages; - - bio = bio_alloc(GFP_NOIO, nr_pages); - bio->bi_bdev = bp->b_target->bt_bdev; - bio->bi_sector = sector; - bio->bi_end_io = xfs_buf_bio_end_io; - bio->bi_private = bp; - - - for (; size && nr_pages; nr_pages--, page_index++) { - int rbytes, nbytes = PAGE_SIZE - offset; - - if (nbytes > size) - nbytes = size; - - rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes, - offset); - if (rbytes < nbytes) - break; +xfs_buf_bio_end_io( + struct bio *bio) +{ + struct xfs_buf *bp = bio->bi_private; - offset = 0; - sector += BTOBB(nbytes); - size -= nbytes; - total_nr_pages--; - } + if (bio->bi_status) + xfs_buf_ioerror(bp, blk_status_to_errno(bio->bi_status)); + else if ((bp->b_flags & XBF_WRITE) && (bp->b_flags & XBF_ASYNC) && + XFS_TEST_ERROR(bp->b_mount, XFS_ERRTAG_BUF_IOERROR)) + xfs_buf_ioerror(bp, -EIO); - if (likely(bio->bi_size)) { - if (xfs_buf_is_vmapped(bp)) { - flush_kernel_vmap_range(bp->b_addr, - xfs_buf_vmap_len(bp)); - } - submit_bio(rw, bio); - if (size) - goto next_chunk; + if (bp->b_flags & XBF_ASYNC) { + INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work); + queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work); } else { - /* - * This is guaranteed not to be the last io reference count - * because the caller (xfs_buf_iorequest) holds a count itself. - */ - atomic_dec(&bp->b_io_remaining); - xfs_buf_ioerror(bp, EIO); - bio_put(bio); + complete(&bp->b_iowait); } + bio_put(bio); } -STATIC void -_xfs_buf_ioapply( - struct xfs_buf *bp) +static inline blk_opf_t +xfs_buf_bio_op( + struct xfs_buf *bp) { - struct blk_plug plug; - int rw; - int offset; - int size; - int i; - - /* - * Make sure we capture only current IO errors rather than stale errors - * left over from previous use of the buffer (e.g. failed readahead). - */ - bp->b_error = 0; + blk_opf_t op; if (bp->b_flags & XBF_WRITE) { - if (bp->b_flags & XBF_SYNCIO) - rw = WRITE_SYNC; - else - rw = WRITE; - if (bp->b_flags & XBF_FUA) - rw |= REQ_FUA; - if (bp->b_flags & XBF_FLUSH) - rw |= REQ_FLUSH; - - /* - * Run the write verifier callback function if it exists. If - * this function fails it will mark the buffer with an error and - * the IO should not be dispatched. - */ - if (bp->b_ops) { - bp->b_ops->verify_write(bp); - if (bp->b_error) { - xfs_force_shutdown(bp->b_target->bt_mount, - SHUTDOWN_CORRUPT_INCORE); - return; - } - } - } else if (bp->b_flags & XBF_READ_AHEAD) { - rw = READA; + op = REQ_OP_WRITE; } else { - rw = READ; + op = REQ_OP_READ; + if (bp->b_flags & XBF_READ_AHEAD) + op |= REQ_RAHEAD; } - /* we only use the buffer cache for meta-data */ - rw |= REQ_META; + return op | REQ_META; +} + +static void +xfs_buf_submit_bio( + struct xfs_buf *bp) +{ + unsigned int len = BBTOB(bp->b_length); + unsigned int nr_vecs = bio_add_max_vecs(bp->b_addr, len); + unsigned int map = 0; + struct blk_plug plug; + struct bio *bio; + + bio = bio_alloc(bp->b_target->bt_bdev, nr_vecs, xfs_buf_bio_op(bp), + GFP_NOIO); + if (is_vmalloc_addr(bp->b_addr)) + bio_add_vmalloc(bio, bp->b_addr, len); + else + bio_add_virt_nofail(bio, bp->b_addr, len); + bio->bi_private = bp; + bio->bi_end_io = xfs_buf_bio_end_io; /* - * Walk all the vectors issuing IO on them. Set up the initial offset - * into the buffer and the desired IO size before we start - - * _xfs_buf_ioapply_vec() will modify them appropriately for each - * subsequent call. + * If there is more than one map segment, split out a new bio for each + * map except of the last one. The last map is handled by the + * remainder of the original bio outside the loop. */ - offset = bp->b_offset; - size = BBTOB(bp->b_io_length); blk_start_plug(&plug); - for (i = 0; i < bp->b_map_count; i++) { - xfs_buf_ioapply_map(bp, i, &offset, &size, rw); - if (bp->b_error) - break; - if (size <= 0) - break; /* all done */ + for (map = 0; map < bp->b_map_count - 1; map++) { + struct bio *split; + + split = bio_split(bio, bp->b_maps[map].bm_len, GFP_NOFS, + &fs_bio_set); + split->bi_iter.bi_sector = bp->b_maps[map].bm_bn; + bio_chain(split, bio); + submit_bio(split); } + bio->bi_iter.bi_sector = bp->b_maps[map].bm_bn; + submit_bio(bio); blk_finish_plug(&plug); } -void -xfs_buf_iorequest( - xfs_buf_t *bp) +/* + * Wait for I/O completion of a sync buffer and return the I/O error code. + */ +static int +xfs_buf_iowait( + struct xfs_buf *bp) { - trace_xfs_buf_iorequest(bp, _RET_IP_); + ASSERT(!(bp->b_flags & XBF_ASYNC)); - ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); + do { + trace_xfs_buf_iowait(bp, _RET_IP_); + wait_for_completion(&bp->b_iowait); + trace_xfs_buf_iowait_done(bp, _RET_IP_); + } while (!__xfs_buf_ioend(bp)); - if (bp->b_flags & XBF_WRITE) - xfs_buf_wait_unpin(bp); - xfs_buf_hold(bp); + return bp->b_error; +} - /* Set the count to 1 initially, this will stop an I/O - * completion callout which happens before we have started - * all the I/O from calling xfs_buf_ioend too early. - */ - atomic_set(&bp->b_io_remaining, 1); - _xfs_buf_ioapply(bp); - _xfs_buf_ioend(bp, 1); +/* + * Run the write verifier callback function if it exists. If this fails, mark + * the buffer with an error and do not dispatch the I/O. + */ +static bool +xfs_buf_verify_write( + struct xfs_buf *bp) +{ + if (bp->b_ops) { + bp->b_ops->verify_write(bp); + if (bp->b_error) + return false; + } else if (bp->b_rhash_key != XFS_BUF_DADDR_NULL) { + /* + * Non-crc filesystems don't attach verifiers during log + * recovery, so don't warn for such filesystems. + */ + if (xfs_has_crc(bp->b_mount)) { + xfs_warn(bp->b_mount, + "%s: no buf ops on daddr 0x%llx len %d", + __func__, xfs_buf_daddr(bp), + bp->b_length); + xfs_hex_dump(bp->b_addr, XFS_CORRUPTION_DUMP_LEN); + dump_stack(); + } + } - xfs_buf_rele(bp); + return true; } /* - * Waits for I/O to complete on the buffer supplied. It returns immediately if - * no I/O is pending or there is already a pending error on the buffer. It - * returns the I/O error code, if any, or 0 if there was no error. + * Buffer I/O submission path, read or write. Asynchronous submission transfers + * the buffer lock ownership and the current reference to the IO. It is not + * safe to reference the buffer after a call to this function unless the caller + * holds an additional reference itself. */ -int -xfs_buf_iowait( - xfs_buf_t *bp) +static void +xfs_buf_submit( + struct xfs_buf *bp) { - trace_xfs_buf_iowait(bp, _RET_IP_); + trace_xfs_buf_submit(bp, _RET_IP_); - if (!bp->b_error) - wait_for_completion(&bp->b_iowait); + ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); - trace_xfs_buf_iowait_done(bp, _RET_IP_); - return bp->b_error; -} + /* + * On log shutdown we stale and complete the buffer immediately. We can + * be called to read the superblock before the log has been set up, so + * be careful checking the log state. + * + * Checking the mount shutdown state here can result in the log tail + * moving inappropriately on disk as the log may not yet be shut down. + * i.e. failing this buffer on mount shutdown can remove it from the AIL + * and move the tail of the log forwards without having written this + * buffer to disk. This corrupts the log tail state in memory, and + * because the log may not be shut down yet, it can then be propagated + * to disk before the log is shutdown. Hence we check log shutdown + * state here rather than mount state to avoid corrupting the log tail + * on shutdown. + */ + if (bp->b_mount->m_log && xlog_is_shutdown(bp->b_mount->m_log)) { + xfs_buf_ioend_fail(bp); + return; + } -xfs_caddr_t -xfs_buf_offset( - xfs_buf_t *bp, - size_t offset) -{ - struct page *page; + if (bp->b_flags & XBF_WRITE) + xfs_buf_wait_unpin(bp); - if (bp->b_addr) - return bp->b_addr + offset; + /* + * Make sure we capture only current IO errors rather than stale errors + * left over from previous use of the buffer (e.g. failed readahead). + */ + bp->b_error = 0; + + if ((bp->b_flags & XBF_WRITE) && !xfs_buf_verify_write(bp)) { + xfs_force_shutdown(bp->b_mount, SHUTDOWN_CORRUPT_INCORE); + xfs_buf_ioend(bp); + return; + } + + /* In-memory targets are directly mapped, no I/O required. */ + if (xfs_buftarg_is_mem(bp->b_target)) { + xfs_buf_ioend(bp); + return; + } - offset += bp->b_offset; - page = bp->b_pages[offset >> PAGE_SHIFT]; - return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1)); + xfs_buf_submit_bio(bp); } /* - * Move data into or out of a buffer. + * Log a message about and stale a buffer that a caller has decided is corrupt. + * + * This function should be called for the kinds of metadata corruption that + * cannot be detect from a verifier, such as incorrect inter-block relationship + * data. Do /not/ call this function from a verifier function. + * + * The buffer must be XBF_DONE prior to the call. Afterwards, the buffer will + * be marked stale, but b_error will not be set. The caller is responsible for + * releasing the buffer or fixing it. */ void -xfs_buf_iomove( - xfs_buf_t *bp, /* buffer to process */ - size_t boff, /* starting buffer offset */ - size_t bsize, /* length to copy */ - void *data, /* data address */ - xfs_buf_rw_t mode) /* read/write/zero flag */ -{ - size_t bend; - - bend = boff + bsize; - while (boff < bend) { - struct page *page; - int page_index, page_offset, csize; - - page_index = (boff + bp->b_offset) >> PAGE_SHIFT; - page_offset = (boff + bp->b_offset) & ~PAGE_MASK; - page = bp->b_pages[page_index]; - csize = min_t(size_t, PAGE_SIZE - page_offset, - BBTOB(bp->b_io_length) - boff); - - ASSERT((csize + page_offset) <= PAGE_SIZE); - - switch (mode) { - case XBRW_ZERO: - memset(page_address(page) + page_offset, 0, csize); - break; - case XBRW_READ: - memcpy(data, page_address(page) + page_offset, csize); - break; - case XBRW_WRITE: - memcpy(page_address(page) + page_offset, data, csize); - } +__xfs_buf_mark_corrupt( + struct xfs_buf *bp, + xfs_failaddr_t fa) +{ + ASSERT(bp->b_flags & XBF_DONE); - boff += csize; - data += csize; - } + xfs_buf_corruption_error(bp, fa); + xfs_buf_stale(bp); } /* @@ -1501,176 +1502,367 @@ xfs_buf_iomove( * returned. These buffers will have an elevated hold count, so wait on those * while freeing all the buffers only held by the LRU. */ +static enum lru_status +xfs_buftarg_drain_rele( + struct list_head *item, + struct list_lru_one *lru, + void *arg) + +{ + struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); + struct list_head *dispose = arg; + + if (!spin_trylock(&bp->b_lock)) + return LRU_SKIP; + if (bp->b_hold > 1) { + /* need to wait, so skip it this pass */ + spin_unlock(&bp->b_lock); + trace_xfs_buf_drain_buftarg(bp, _RET_IP_); + return LRU_SKIP; + } + + /* + * clear the LRU reference count so the buffer doesn't get + * ignored in xfs_buf_rele(). + */ + atomic_set(&bp->b_lru_ref, 0); + bp->b_state |= XFS_BSTATE_DISPOSE; + list_lru_isolate_move(lru, item, dispose); + spin_unlock(&bp->b_lock); + return LRU_REMOVED; +} + +/* + * Wait for outstanding I/O on the buftarg to complete. + */ void -xfs_wait_buftarg( +xfs_buftarg_wait( struct xfs_buftarg *btp) { - struct xfs_buf *bp; - -restart: - spin_lock(&btp->bt_lru_lock); - while (!list_empty(&btp->bt_lru)) { - bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru); - if (atomic_read(&bp->b_hold) > 1) { - trace_xfs_buf_wait_buftarg(bp, _RET_IP_); - list_move_tail(&bp->b_lru, &btp->bt_lru); - spin_unlock(&btp->bt_lru_lock); - delay(100); - goto restart; - } - /* - * clear the LRU reference count so the buffer doesn't get - * ignored in xfs_buf_rele(). - */ - atomic_set(&bp->b_lru_ref, 0); - spin_unlock(&btp->bt_lru_lock); - xfs_buf_rele(bp); - spin_lock(&btp->bt_lru_lock); - } - spin_unlock(&btp->bt_lru_lock); + /* + * First wait for all in-flight readahead buffers to be released. This is + * critical as new buffers do not make the LRU until they are released. + * + * Next, flush the buffer workqueue to ensure all completion processing + * has finished. Just waiting on buffer locks is not sufficient for + * async IO as the reference count held over IO is not released until + * after the buffer lock is dropped. Hence we need to ensure here that + * all reference counts have been dropped before we start walking the + * LRU list. + */ + while (percpu_counter_sum(&btp->bt_readahead_count)) + delay(100); + flush_workqueue(btp->bt_mount->m_buf_workqueue); } -int -xfs_buftarg_shrink( - struct shrinker *shrink, - struct shrink_control *sc) +void +xfs_buftarg_drain( + struct xfs_buftarg *btp) { - struct xfs_buftarg *btp = container_of(shrink, - struct xfs_buftarg, bt_shrinker); - struct xfs_buf *bp; - int nr_to_scan = sc->nr_to_scan; LIST_HEAD(dispose); + int loop = 0; + bool write_fail = false; - if (!nr_to_scan) - return btp->bt_lru_nr; - - spin_lock(&btp->bt_lru_lock); - while (!list_empty(&btp->bt_lru)) { - if (nr_to_scan-- <= 0) - break; + xfs_buftarg_wait(btp); - bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru); + /* loop until there is nothing left on the lru list. */ + while (list_lru_count(&btp->bt_lru)) { + list_lru_walk(&btp->bt_lru, xfs_buftarg_drain_rele, + &dispose, LONG_MAX); - /* - * Decrement the b_lru_ref count unless the value is already - * zero. If the value is already zero, we need to reclaim the - * buffer, otherwise it gets another trip through the LRU. - */ - if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) { - list_move_tail(&bp->b_lru, &btp->bt_lru); - continue; + while (!list_empty(&dispose)) { + struct xfs_buf *bp; + bp = list_first_entry(&dispose, struct xfs_buf, b_lru); + list_del_init(&bp->b_lru); + if (bp->b_flags & XBF_WRITE_FAIL) { + write_fail = true; + xfs_buf_alert_ratelimited(bp, + "XFS: Corruption Alert", +"Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!", + (long long)xfs_buf_daddr(bp)); + } + xfs_buf_rele(bp); } + if (loop++ != 0) + delay(100); + } - /* - * remove the buffer from the LRU now to avoid needing another - * lock round trip inside xfs_buf_rele(). - */ - list_move(&bp->b_lru, &dispose); - btp->bt_lru_nr--; - bp->b_lru_flags |= _XBF_LRU_DISPOSE; + /* + * If one or more failed buffers were freed, that means dirty metadata + * was thrown away. This should only ever happen after I/O completion + * handling has elevated I/O error(s) to permanent failures and shuts + * down the journal. + */ + if (write_fail) { + ASSERT(xlog_is_shutdown(btp->bt_mount->m_log)); + xfs_alert(btp->bt_mount, + "Please run xfs_repair to determine the extent of the problem."); + } +} + +static enum lru_status +xfs_buftarg_isolate( + struct list_head *item, + struct list_lru_one *lru, + void *arg) +{ + struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); + struct list_head *dispose = arg; + + /* + * we are inverting the lru lock/bp->b_lock here, so use a trylock. + * If we fail to get the lock, just skip it. + */ + if (!spin_trylock(&bp->b_lock)) + return LRU_SKIP; + /* + * Decrement the b_lru_ref count unless the value is already + * zero. If the value is already zero, we need to reclaim the + * buffer, otherwise it gets another trip through the LRU. + */ + if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) { + spin_unlock(&bp->b_lock); + return LRU_ROTATE; } - spin_unlock(&btp->bt_lru_lock); + + bp->b_state |= XFS_BSTATE_DISPOSE; + list_lru_isolate_move(lru, item, dispose); + spin_unlock(&bp->b_lock); + return LRU_REMOVED; +} + +static unsigned long +xfs_buftarg_shrink_scan( + struct shrinker *shrink, + struct shrink_control *sc) +{ + struct xfs_buftarg *btp = shrink->private_data; + LIST_HEAD(dispose); + unsigned long freed; + + freed = list_lru_shrink_walk(&btp->bt_lru, sc, + xfs_buftarg_isolate, &dispose); while (!list_empty(&dispose)) { + struct xfs_buf *bp; bp = list_first_entry(&dispose, struct xfs_buf, b_lru); list_del_init(&bp->b_lru); xfs_buf_rele(bp); } - return btp->bt_lru_nr; + return freed; +} + +static unsigned long +xfs_buftarg_shrink_count( + struct shrinker *shrink, + struct shrink_control *sc) +{ + struct xfs_buftarg *btp = shrink->private_data; + return list_lru_shrink_count(&btp->bt_lru, sc); +} + +void +xfs_destroy_buftarg( + struct xfs_buftarg *btp) +{ + shrinker_free(btp->bt_shrinker); + ASSERT(percpu_counter_sum(&btp->bt_readahead_count) == 0); + percpu_counter_destroy(&btp->bt_readahead_count); + list_lru_destroy(&btp->bt_lru); } void xfs_free_buftarg( - struct xfs_mount *mp, struct xfs_buftarg *btp) { - unregister_shrinker(&btp->bt_shrinker); + xfs_destroy_buftarg(btp); + fs_put_dax(btp->bt_daxdev, btp->bt_mount); + /* the main block device is closed by kill_block_super */ + if (btp->bt_bdev != btp->bt_mount->m_super->s_bdev) + bdev_fput(btp->bt_file); + kfree(btp); +} + +/* + * Configure this buffer target for hardware-assisted atomic writes if the + * underlying block device supports is congruent with the filesystem geometry. + */ +static inline void +xfs_configure_buftarg_atomic_writes( + struct xfs_buftarg *btp) +{ + struct xfs_mount *mp = btp->bt_mount; + unsigned int min_bytes, max_bytes; - if (mp->m_flags & XFS_MOUNT_BARRIER) - xfs_blkdev_issue_flush(btp); + min_bytes = bdev_atomic_write_unit_min_bytes(btp->bt_bdev); + max_bytes = bdev_atomic_write_unit_max_bytes(btp->bt_bdev); - kmem_free(btp); + /* + * Ignore atomic write geometry that is nonsense or doesn't even cover + * a single fsblock. + */ + if (min_bytes > max_bytes || + min_bytes > mp->m_sb.sb_blocksize || + max_bytes < mp->m_sb.sb_blocksize) { + min_bytes = 0; + max_bytes = 0; + } + + btp->bt_awu_min = min_bytes; + btp->bt_awu_max = max_bytes; } -STATIC int -xfs_setsize_buftarg_flags( - xfs_buftarg_t *btp, - unsigned int blocksize, +/* Configure a buffer target that abstracts a block device. */ +int +xfs_configure_buftarg( + struct xfs_buftarg *btp, unsigned int sectorsize, - int verbose) + xfs_rfsblock_t nr_blocks) { - btp->bt_bsize = blocksize; - btp->bt_sshift = ffs(sectorsize) - 1; - btp->bt_smask = sectorsize - 1; + struct xfs_mount *mp = btp->bt_mount; - if (set_blocksize(btp->bt_bdev, sectorsize)) { - char name[BDEVNAME_SIZE]; + if (btp->bt_bdev) { + int error; - bdevname(btp->bt_bdev, name); + error = bdev_validate_blocksize(btp->bt_bdev, sectorsize); + if (error) { + xfs_warn(mp, + "Cannot use blocksize %u on device %pg, err %d", + sectorsize, btp->bt_bdev, error); + return -EINVAL; + } - xfs_warn(btp->bt_mount, - "Cannot set_blocksize to %u on device %s\n", - sectorsize, name); - return EINVAL; + if (bdev_can_atomic_write(btp->bt_bdev)) + xfs_configure_buftarg_atomic_writes(btp); } + btp->bt_meta_sectorsize = sectorsize; + btp->bt_meta_sectormask = sectorsize - 1; + /* m_blkbb_log is not set up yet */ + btp->bt_nr_sectors = nr_blocks << (mp->m_sb.sb_blocklog - BBSHIFT); return 0; } -/* - * When allocating the initial buffer target we have not yet - * read in the superblock, so don't know what sized sectors - * are being used is at this early stage. Play safe. - */ -STATIC int -xfs_setsize_buftarg_early( - xfs_buftarg_t *btp, - struct block_device *bdev) -{ - return xfs_setsize_buftarg_flags(btp, - PAGE_SIZE, bdev_logical_block_size(bdev), 0); -} - int -xfs_setsize_buftarg( - xfs_buftarg_t *btp, - unsigned int blocksize, - unsigned int sectorsize) +xfs_init_buftarg( + struct xfs_buftarg *btp, + size_t logical_sectorsize, + const char *descr) { - return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1); + /* The maximum size of the buftarg is only known once the sb is read. */ + btp->bt_nr_sectors = XFS_BUF_DADDR_MAX; + + /* Set up device logical sector size mask */ + btp->bt_logical_sectorsize = logical_sectorsize; + btp->bt_logical_sectormask = logical_sectorsize - 1; + + /* + * Buffer IO error rate limiting. Limit it to no more than 10 messages + * per 30 seconds so as to not spam logs too much on repeated errors. + */ + ratelimit_state_init(&btp->bt_ioerror_rl, 30 * HZ, + DEFAULT_RATELIMIT_BURST); + + if (list_lru_init(&btp->bt_lru)) + return -ENOMEM; + if (percpu_counter_init(&btp->bt_readahead_count, 0, GFP_KERNEL)) + goto out_destroy_lru; + + btp->bt_shrinker = + shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-buf:%s", descr); + if (!btp->bt_shrinker) + goto out_destroy_io_count; + btp->bt_shrinker->count_objects = xfs_buftarg_shrink_count; + btp->bt_shrinker->scan_objects = xfs_buftarg_shrink_scan; + btp->bt_shrinker->private_data = btp; + shrinker_register(btp->bt_shrinker); + return 0; + +out_destroy_io_count: + percpu_counter_destroy(&btp->bt_readahead_count); +out_destroy_lru: + list_lru_destroy(&btp->bt_lru); + return -ENOMEM; } -xfs_buftarg_t * +struct xfs_buftarg * xfs_alloc_buftarg( struct xfs_mount *mp, - struct block_device *bdev, - int external, - const char *fsname) + struct file *bdev_file) { - xfs_buftarg_t *btp; + struct xfs_buftarg *btp; + const struct dax_holder_operations *ops = NULL; + int error; + - btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS); +#if defined(CONFIG_FS_DAX) && defined(CONFIG_MEMORY_FAILURE) + ops = &xfs_dax_holder_operations; +#endif + btp = kzalloc(sizeof(*btp), GFP_KERNEL | __GFP_NOFAIL); btp->bt_mount = mp; - btp->bt_dev = bdev->bd_dev; - btp->bt_bdev = bdev; - btp->bt_bdi = blk_get_backing_dev_info(bdev); - if (!btp->bt_bdi) - goto error; - - INIT_LIST_HEAD(&btp->bt_lru); - spin_lock_init(&btp->bt_lru_lock); - if (xfs_setsize_buftarg_early(btp, bdev)) - goto error; - btp->bt_shrinker.shrink = xfs_buftarg_shrink; - btp->bt_shrinker.seeks = DEFAULT_SEEKS; - register_shrinker(&btp->bt_shrinker); + btp->bt_file = bdev_file; + btp->bt_bdev = file_bdev(bdev_file); + btp->bt_dev = btp->bt_bdev->bd_dev; + btp->bt_daxdev = fs_dax_get_by_bdev(btp->bt_bdev, &btp->bt_dax_part_off, + mp, ops); + + /* + * Flush and invalidate all devices' pagecaches before reading any + * metadata because XFS doesn't use the bdev pagecache. + */ + error = sync_blockdev(btp->bt_bdev); + if (error) + goto error_free; + + /* + * When allocating the buftargs we have not yet read the super block and + * thus don't know the file system sector size yet. + */ + btp->bt_meta_sectorsize = bdev_logical_block_size(btp->bt_bdev); + btp->bt_meta_sectormask = btp->bt_meta_sectorsize - 1; + + error = xfs_init_buftarg(btp, btp->bt_meta_sectorsize, + mp->m_super->s_id); + if (error) + goto error_free; + return btp; -error: - kmem_free(btp); - return NULL; +error_free: + kfree(btp); + return ERR_PTR(error); +} + +static inline void +xfs_buf_list_del( + struct xfs_buf *bp) +{ + list_del_init(&bp->b_list); + wake_up_var(&bp->b_list); +} + +/* + * Cancel a delayed write list. + * + * Remove each buffer from the list, clear the delwri queue flag and drop the + * associated buffer reference. + */ +void +xfs_buf_delwri_cancel( + struct list_head *list) +{ + struct xfs_buf *bp; + + while (!list_empty(list)) { + bp = list_first_entry(list, struct xfs_buf, b_list); + + xfs_buf_lock(bp); + bp->b_flags &= ~_XBF_DELWRI_Q; + xfs_buf_list_del(bp); + xfs_buf_relse(bp); + } } /* @@ -1714,7 +1906,7 @@ xfs_buf_delwri_queue( */ bp->b_flags |= _XBF_DELWRI_Q; if (list_empty(&bp->b_list)) { - atomic_inc(&bp->b_hold); + xfs_buf_hold(bp); list_add_tail(&bp->b_list, list); } @@ -1722,15 +1914,43 @@ xfs_buf_delwri_queue( } /* + * Queue a buffer to this delwri list as part of a data integrity operation. + * If the buffer is on any other delwri list, we'll wait for that to clear + * so that the caller can submit the buffer for IO and wait for the result. + * Callers must ensure the buffer is not already on the list. + */ +void +xfs_buf_delwri_queue_here( + struct xfs_buf *bp, + struct list_head *buffer_list) +{ + /* + * We need this buffer to end up on the /caller's/ delwri list, not any + * old list. This can happen if the buffer is marked stale (which + * clears DELWRI_Q) after the AIL queues the buffer to its list but + * before the AIL has a chance to submit the list. + */ + while (!list_empty(&bp->b_list)) { + xfs_buf_unlock(bp); + wait_var_event(&bp->b_list, list_empty(&bp->b_list)); + xfs_buf_lock(bp); + } + + ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); + + xfs_buf_delwri_queue(bp, buffer_list); +} + +/* * Compare function is more complex than it needs to be because * the return value is only 32 bits and we are doing comparisons * on 64 bit values */ static int xfs_buf_cmp( - void *priv, - struct list_head *a, - struct list_head *b) + void *priv, + const struct list_head *a, + const struct list_head *b) { struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list); struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list); @@ -1744,60 +1964,26 @@ xfs_buf_cmp( return 0; } -static int -__xfs_buf_delwri_submit( - struct list_head *buffer_list, - struct list_head *io_list, - bool wait) +static bool +xfs_buf_delwri_submit_prep( + struct xfs_buf *bp) { - struct blk_plug plug; - struct xfs_buf *bp, *n; - int pinned = 0; - - list_for_each_entry_safe(bp, n, buffer_list, b_list) { - if (!wait) { - if (xfs_buf_ispinned(bp)) { - pinned++; - continue; - } - if (!xfs_buf_trylock(bp)) - continue; - } else { - xfs_buf_lock(bp); - } - - /* - * Someone else might have written the buffer synchronously or - * marked it stale in the meantime. In that case only the - * _XBF_DELWRI_Q flag got cleared, and we have to drop the - * reference and remove it from the list here. - */ - if (!(bp->b_flags & _XBF_DELWRI_Q)) { - list_del_init(&bp->b_list); - xfs_buf_relse(bp); - continue; - } - - list_move_tail(&bp->b_list, io_list); - trace_xfs_buf_delwri_split(bp, _RET_IP_); - } - - list_sort(NULL, io_list, xfs_buf_cmp); - - blk_start_plug(&plug); - list_for_each_entry_safe(bp, n, io_list, b_list) { - bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC); - bp->b_flags |= XBF_WRITE; - - if (!wait) { - bp->b_flags |= XBF_ASYNC; - list_del_init(&bp->b_list); - } - xfs_bdstrat_cb(bp); + /* + * Someone else might have written the buffer synchronously or marked it + * stale in the meantime. In that case only the _XBF_DELWRI_Q flag got + * cleared, and we have to drop the reference and remove it from the + * list here. + */ + if (!(bp->b_flags & _XBF_DELWRI_Q)) { + xfs_buf_list_del(bp); + xfs_buf_relse(bp); + return false; } - blk_finish_plug(&plug); - return pinned; + trace_xfs_buf_delwri_split(bp, _RET_IP_); + bp->b_flags &= ~_XBF_DELWRI_Q; + bp->b_flags |= XBF_WRITE; + return true; } /* @@ -1808,13 +1994,42 @@ __xfs_buf_delwri_submit( * is only safely useable for callers that can track I/O completion by higher * level means, e.g. AIL pushing as the @buffer_list is consumed in this * function. + * + * Note: this function will skip buffers it would block on, and in doing so + * leaves them on @buffer_list so they can be retried on a later pass. As such, + * it is up to the caller to ensure that the buffer list is fully submitted or + * cancelled appropriately when they are finished with the list. Failure to + * cancel or resubmit the list until it is empty will result in leaked buffers + * at unmount time. */ int xfs_buf_delwri_submit_nowait( struct list_head *buffer_list) { - LIST_HEAD (io_list); - return __xfs_buf_delwri_submit(buffer_list, &io_list, false); + struct xfs_buf *bp, *n; + int pinned = 0; + struct blk_plug plug; + + list_sort(NULL, buffer_list, xfs_buf_cmp); + + blk_start_plug(&plug); + list_for_each_entry_safe(bp, n, buffer_list, b_list) { + if (!xfs_buf_trylock(bp)) + continue; + if (xfs_buf_ispinned(bp)) { + xfs_buf_unlock(bp); + pinned++; + continue; + } + if (!xfs_buf_delwri_submit_prep(bp)) + continue; + bp->b_flags |= XBF_ASYNC; + xfs_buf_list_del(bp); + xfs_buf_submit(bp); + } + blk_finish_plug(&plug); + + return pinned; } /* @@ -1829,17 +2044,34 @@ int xfs_buf_delwri_submit( struct list_head *buffer_list) { - LIST_HEAD (io_list); + LIST_HEAD (wait_list); int error = 0, error2; - struct xfs_buf *bp; + struct xfs_buf *bp, *n; + struct blk_plug plug; - __xfs_buf_delwri_submit(buffer_list, &io_list, true); + list_sort(NULL, buffer_list, xfs_buf_cmp); + + blk_start_plug(&plug); + list_for_each_entry_safe(bp, n, buffer_list, b_list) { + xfs_buf_lock(bp); + if (!xfs_buf_delwri_submit_prep(bp)) + continue; + bp->b_flags &= ~XBF_ASYNC; + list_move_tail(&bp->b_list, &wait_list); + xfs_buf_submit(bp); + } + blk_finish_plug(&plug); /* Wait for IO to complete. */ - while (!list_empty(&io_list)) { - bp = list_first_entry(&io_list, struct xfs_buf, b_list); + while (!list_empty(&wait_list)) { + bp = list_first_entry(&wait_list, struct xfs_buf, b_list); + + xfs_buf_list_del(bp); - list_del_init(&bp->b_list); + /* + * Wait on the locked buffer, check for errors and unlock and + * release the delwri queue reference. + */ error2 = xfs_buf_iowait(bp); xfs_buf_relse(bp); if (!error) @@ -1849,30 +2081,52 @@ xfs_buf_delwri_submit( return error; } -int __init -xfs_buf_init(void) +void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref) { - xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf", - KM_ZONE_HWALIGN, NULL); - if (!xfs_buf_zone) - goto out; + /* + * Set the lru reference count to 0 based on the error injection tag. + * This allows userspace to disrupt buffer caching for debug/testing + * purposes. + */ + if (XFS_TEST_ERROR(bp->b_mount, XFS_ERRTAG_BUF_LRU_REF)) + lru_ref = 0; - xfslogd_workqueue = alloc_workqueue("xfslogd", - WQ_MEM_RECLAIM | WQ_HIGHPRI, 1); - if (!xfslogd_workqueue) - goto out_free_buf_zone; + atomic_set(&bp->b_lru_ref, lru_ref); +} - return 0; +/* + * Verify an on-disk magic value against the magic value specified in the + * verifier structure. The verifier magic is in disk byte order so the caller is + * expected to pass the value directly from disk. + */ +bool +xfs_verify_magic( + struct xfs_buf *bp, + __be32 dmagic) +{ + struct xfs_mount *mp = bp->b_mount; + int idx; - out_free_buf_zone: - kmem_zone_destroy(xfs_buf_zone); - out: - return -ENOMEM; + idx = xfs_has_crc(mp); + if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx])) + return false; + return dmagic == bp->b_ops->magic[idx]; } - -void -xfs_buf_terminate(void) +/* + * Verify an on-disk magic value against the magic value specified in the + * verifier structure. The verifier magic is in disk byte order so the caller is + * expected to pass the value directly from disk. + */ +bool +xfs_verify_magic16( + struct xfs_buf *bp, + __be16 dmagic) { - destroy_workqueue(xfslogd_workqueue); - kmem_zone_destroy(xfs_buf_zone); + struct xfs_mount *mp = bp->b_mount; + int idx; + + idx = xfs_has_crc(mp); + if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx])) + return false; + return dmagic == bp->b_ops->magic16[idx]; } |
