summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_buf.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_buf.c')
-rw-r--r--fs/xfs/xfs_buf.c2480
1 files changed, 1208 insertions, 1272 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index eedc5e0156ff..47edf3041631 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -4,47 +4,31 @@
* All Rights Reserved.
*/
#include "xfs.h"
-#include <linux/stddef.h>
-#include <linux/errno.h>
-#include <linux/gfp.h>
-#include <linux/pagemap.h>
-#include <linux/init.h>
-#include <linux/vmalloc.h>
-#include <linux/bio.h>
-#include <linux/sysctl.h>
-#include <linux/proc_fs.h>
-#include <linux/workqueue.h>
-#include <linux/percpu.h>
-#include <linux/blkdev.h>
-#include <linux/hash.h>
-#include <linux/kthread.h>
-#include <linux/migrate.h>
#include <linux/backing-dev.h>
-#include <linux/freezer.h>
+#include <linux/dax.h>
+#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
-#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_trace.h"
#include "xfs_log.h"
+#include "xfs_log_recover.h"
+#include "xfs_log_priv.h"
+#include "xfs_trans.h"
+#include "xfs_buf_item.h"
#include "xfs_errortag.h"
#include "xfs_error.h"
+#include "xfs_ag.h"
+#include "xfs_buf_mem.h"
+#include "xfs_notify_failure.h"
-static kmem_zone_t *xfs_buf_zone;
-
-#define xb_to_gfp(flags) \
- ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
+struct kmem_cache *xfs_buf_cache;
/*
* Locking orders
*
- * xfs_buf_ioacct_inc:
- * xfs_buf_ioacct_dec:
- * b_sema (caller holds)
- * b_lock
- *
* xfs_buf_stale:
* b_sema (caller holds)
* b_lock
@@ -52,10 +36,9 @@ static kmem_zone_t *xfs_buf_zone;
*
* xfs_buf_rele:
* b_lock
- * pag_buf_lock
- * lru_lock
+ * lru_lock
*
- * xfs_buftarg_wait_rele
+ * xfs_buftarg_drain_rele
* lru_lock
* b_lock (trylock due to inversion)
*
@@ -64,79 +47,12 @@ static kmem_zone_t *xfs_buf_zone;
* b_lock (trylock due to inversion)
*/
-static inline int
-xfs_buf_is_vmapped(
- struct xfs_buf *bp)
-{
- /*
- * Return true if the buffer is vmapped.
- *
- * b_addr is null if the buffer is not mapped, but the code is clever
- * enough to know it doesn't have to map a single page, so the check has
- * to be both for b_addr and bp->b_page_count > 1.
- */
- return bp->b_addr && bp->b_page_count > 1;
-}
-
-static inline int
-xfs_buf_vmap_len(
- struct xfs_buf *bp)
-{
- return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
-}
-
-/*
- * Bump the I/O in flight count on the buftarg if we haven't yet done so for
- * this buffer. The count is incremented once per buffer (per hold cycle)
- * because the corresponding decrement is deferred to buffer release. Buffers
- * can undergo I/O multiple times in a hold-release cycle and per buffer I/O
- * tracking adds unnecessary overhead. This is used for sychronization purposes
- * with unmount (see xfs_wait_buftarg()), so all we really need is a count of
- * in-flight buffers.
- *
- * Buffers that are never released (e.g., superblock, iclog buffers) must set
- * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count
- * never reaches zero and unmount hangs indefinitely.
- */
-static inline void
-xfs_buf_ioacct_inc(
- struct xfs_buf *bp)
-{
- if (bp->b_flags & XBF_NO_IOACCT)
- return;
-
- ASSERT(bp->b_flags & XBF_ASYNC);
- spin_lock(&bp->b_lock);
- if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
- bp->b_state |= XFS_BSTATE_IN_FLIGHT;
- percpu_counter_inc(&bp->b_target->bt_io_count);
- }
- spin_unlock(&bp->b_lock);
-}
-
-/*
- * Clear the in-flight state on a buffer about to be released to the LRU or
- * freed and unaccount from the buftarg.
- */
-static inline void
-__xfs_buf_ioacct_dec(
- struct xfs_buf *bp)
-{
- lockdep_assert_held(&bp->b_lock);
-
- if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
- bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
- percpu_counter_dec(&bp->b_target->bt_io_count);
- }
-}
+static void xfs_buf_submit(struct xfs_buf *bp);
+static int xfs_buf_iowait(struct xfs_buf *bp);
-static inline void
-xfs_buf_ioacct_dec(
- struct xfs_buf *bp)
+static inline bool xfs_buf_is_uncached(struct xfs_buf *bp)
{
- spin_lock(&bp->b_lock);
- __xfs_buf_ioacct_dec(bp);
- spin_unlock(&bp->b_lock);
+ return bp->b_rhash_key == XFS_BUF_DADDR_NULL;
}
/*
@@ -162,331 +78,244 @@ xfs_buf_stale(
*/
bp->b_flags &= ~_XBF_DELWRI_Q;
- /*
- * Once the buffer is marked stale and unlocked, a subsequent lookup
- * could reset b_flags. There is no guarantee that the buffer is
- * unaccounted (released to LRU) before that occurs. Drop in-flight
- * status now to preserve accounting consistency.
- */
spin_lock(&bp->b_lock);
- __xfs_buf_ioacct_dec(bp);
-
atomic_set(&bp->b_lru_ref, 0);
if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
- (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
- atomic_dec(&bp->b_hold);
+ (list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru)))
+ bp->b_hold--;
- ASSERT(atomic_read(&bp->b_hold) >= 1);
+ ASSERT(bp->b_hold >= 1);
spin_unlock(&bp->b_lock);
}
+static void
+xfs_buf_free_callback(
+ struct callback_head *cb)
+{
+ struct xfs_buf *bp = container_of(cb, struct xfs_buf, b_rcu);
+
+ if (bp->b_maps != &bp->__b_map)
+ kfree(bp->b_maps);
+ kmem_cache_free(xfs_buf_cache, bp);
+}
+
+static void
+xfs_buf_free(
+ struct xfs_buf *bp)
+{
+ unsigned int size = BBTOB(bp->b_length);
+
+ might_sleep();
+ trace_xfs_buf_free(bp, _RET_IP_);
+
+ ASSERT(list_empty(&bp->b_lru));
+
+ if (!xfs_buftarg_is_mem(bp->b_target) && size >= PAGE_SIZE)
+ mm_account_reclaimed_pages(howmany(size, PAGE_SHIFT));
+
+ if (is_vmalloc_addr(bp->b_addr))
+ vfree(bp->b_addr);
+ else if (bp->b_flags & _XBF_KMEM)
+ kfree(bp->b_addr);
+ else
+ folio_put(virt_to_folio(bp->b_addr));
+
+ call_rcu(&bp->b_rcu, xfs_buf_free_callback);
+}
+
static int
-xfs_buf_get_maps(
+xfs_buf_alloc_kmem(
struct xfs_buf *bp,
- int map_count)
+ size_t size,
+ gfp_t gfp_mask)
{
- ASSERT(bp->b_maps == NULL);
- bp->b_map_count = map_count;
+ ASSERT(is_power_of_2(size));
+ ASSERT(size < PAGE_SIZE);
- if (map_count == 1) {
- bp->b_maps = &bp->__b_map;
- return 0;
- }
+ bp->b_addr = kmalloc(size, gfp_mask | __GFP_NOFAIL);
+ if (!bp->b_addr)
+ return -ENOMEM;
- bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
- KM_NOFS);
- if (!bp->b_maps)
+ /*
+ * Slab guarantees that we get back naturally aligned allocations for
+ * power of two sizes. Keep this check as the canary in the coal mine
+ * if anything changes in slab.
+ */
+ if (WARN_ON_ONCE(!IS_ALIGNED((unsigned long)bp->b_addr, size))) {
+ kfree(bp->b_addr);
+ bp->b_addr = NULL;
return -ENOMEM;
+ }
+ bp->b_flags |= _XBF_KMEM;
+ trace_xfs_buf_backing_kmem(bp, _RET_IP_);
return 0;
}
/*
- * Frees b_pages if it was allocated.
+ * Allocate backing memory for a buffer.
+ *
+ * For tmpfs-backed buffers used by in-memory btrees this directly maps the
+ * tmpfs page cache folios.
+ *
+ * For real file system buffers there are three different kinds backing memory:
+ *
+ * The first type backs the buffer by a kmalloc allocation. This is done for
+ * less than PAGE_SIZE allocations to avoid wasting memory.
+ *
+ * The second type is a single folio buffer - this may be a high order folio or
+ * just a single page sized folio, but either way they get treated the same way
+ * by the rest of the code - the buffer memory spans a single contiguous memory
+ * region that we don't have to map and unmap to access the data directly.
+ *
+ * The third type of buffer is the vmalloc()d buffer. This provides the buffer
+ * with the required contiguous memory region but backed by discontiguous
+ * physical pages.
*/
-static void
-xfs_buf_free_maps(
- struct xfs_buf *bp)
+static int
+xfs_buf_alloc_backing_mem(
+ struct xfs_buf *bp,
+ xfs_buf_flags_t flags)
{
- if (bp->b_maps != &bp->__b_map) {
- kmem_free(bp->b_maps);
- bp->b_maps = NULL;
+ size_t size = BBTOB(bp->b_length);
+ gfp_t gfp_mask = GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOWARN;
+ struct folio *folio;
+
+ if (xfs_buftarg_is_mem(bp->b_target))
+ return xmbuf_map_backing_mem(bp);
+
+ /* Assure zeroed buffer for non-read cases. */
+ if (!(flags & XBF_READ))
+ gfp_mask |= __GFP_ZERO;
+
+ if (flags & XBF_READ_AHEAD)
+ gfp_mask |= __GFP_NORETRY;
+
+ /*
+ * For buffers smaller than PAGE_SIZE use a kmalloc allocation if that
+ * is properly aligned. The slab allocator now guarantees an aligned
+ * allocation for all power of two sizes, which matches most of the
+ * smaller than PAGE_SIZE buffers used by XFS.
+ */
+ if (size < PAGE_SIZE && is_power_of_2(size))
+ return xfs_buf_alloc_kmem(bp, size, gfp_mask);
+
+ /*
+ * Don't bother with the retry loop for single PAGE allocations: vmalloc
+ * won't do any better.
+ */
+ if (size <= PAGE_SIZE)
+ gfp_mask |= __GFP_NOFAIL;
+
+ /*
+ * Optimistically attempt a single high order folio allocation for
+ * larger than PAGE_SIZE buffers.
+ *
+ * Allocating a high order folio makes the assumption that buffers are a
+ * power-of-2 size, matching the power-of-2 folios sizes available.
+ *
+ * The exception here are user xattr data buffers, which can be arbitrarily
+ * sized up to 64kB plus structure metadata, skip straight to the vmalloc
+ * path for them instead of wasting memory here.
+ */
+ if (size > PAGE_SIZE) {
+ if (!is_power_of_2(size))
+ goto fallback;
+ gfp_mask &= ~__GFP_DIRECT_RECLAIM;
+ gfp_mask |= __GFP_NORETRY;
}
+ folio = folio_alloc(gfp_mask, get_order(size));
+ if (!folio) {
+ if (size <= PAGE_SIZE)
+ return -ENOMEM;
+ trace_xfs_buf_backing_fallback(bp, _RET_IP_);
+ goto fallback;
+ }
+ bp->b_addr = folio_address(folio);
+ trace_xfs_buf_backing_folio(bp, _RET_IP_);
+ return 0;
+
+fallback:
+ for (;;) {
+ bp->b_addr = __vmalloc(size, gfp_mask);
+ if (bp->b_addr)
+ break;
+ if (flags & XBF_READ_AHEAD)
+ return -ENOMEM;
+ XFS_STATS_INC(bp->b_mount, xb_page_retries);
+ memalloc_retry_wait(gfp_mask);
+ }
+
+ trace_xfs_buf_backing_vmalloc(bp, _RET_IP_);
+ return 0;
}
-struct xfs_buf *
-_xfs_buf_alloc(
+static int
+xfs_buf_alloc(
struct xfs_buftarg *target,
struct xfs_buf_map *map,
int nmaps,
- xfs_buf_flags_t flags)
+ xfs_buf_flags_t flags,
+ struct xfs_buf **bpp)
{
struct xfs_buf *bp;
int error;
int i;
- bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
- if (unlikely(!bp))
- return NULL;
+ *bpp = NULL;
+ bp = kmem_cache_zalloc(xfs_buf_cache,
+ GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
/*
* We don't want certain flags to appear in b_flags unless they are
* specifically set by later operations on the buffer.
*/
- flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
+ flags &= ~(XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
+
+ /*
+ * A new buffer is held and locked by the owner. This ensures that the
+ * buffer is owned by the caller and racing RCU lookups right after
+ * inserting into the hash table are safe (and will have to wait for
+ * the unlock to do anything non-trivial).
+ */
+ bp->b_hold = 1;
+ sema_init(&bp->b_sema, 0); /* held, no waiters */
- atomic_set(&bp->b_hold, 1);
+ spin_lock_init(&bp->b_lock);
atomic_set(&bp->b_lru_ref, 1);
init_completion(&bp->b_iowait);
INIT_LIST_HEAD(&bp->b_lru);
INIT_LIST_HEAD(&bp->b_list);
INIT_LIST_HEAD(&bp->b_li_list);
- sema_init(&bp->b_sema, 0); /* held, no waiters */
- spin_lock_init(&bp->b_lock);
bp->b_target = target;
+ bp->b_mount = target->bt_mount;
bp->b_flags = flags;
-
- /*
- * Set length and io_length to the same value initially.
- * I/O routines should use io_length, which will be the same in
- * most cases but may be reset (e.g. XFS recovery).
- */
- error = xfs_buf_get_maps(bp, nmaps);
- if (error) {
- kmem_zone_free(xfs_buf_zone, bp);
- return NULL;
- }
-
- bp->b_bn = map[0].bm_bn;
+ bp->b_rhash_key = map[0].bm_bn;
bp->b_length = 0;
+ bp->b_map_count = nmaps;
+ if (nmaps == 1)
+ bp->b_maps = &bp->__b_map;
+ else
+ bp->b_maps = kcalloc(nmaps, sizeof(struct xfs_buf_map),
+ GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
for (i = 0; i < nmaps; i++) {
bp->b_maps[i].bm_bn = map[i].bm_bn;
bp->b_maps[i].bm_len = map[i].bm_len;
bp->b_length += map[i].bm_len;
}
- bp->b_io_length = bp->b_length;
atomic_set(&bp->b_pin_count, 0);
init_waitqueue_head(&bp->b_waiters);
- XFS_STATS_INC(target->bt_mount, xb_create);
+ XFS_STATS_INC(bp->b_mount, xb_create);
trace_xfs_buf_init(bp, _RET_IP_);
- return bp;
-}
-
-/*
- * Allocate a page array capable of holding a specified number
- * of pages, and point the page buf at it.
- */
-STATIC int
-_xfs_buf_get_pages(
- xfs_buf_t *bp,
- int page_count)
-{
- /* Make sure that we have a page list */
- if (bp->b_pages == NULL) {
- bp->b_page_count = page_count;
- if (page_count <= XB_PAGES) {
- bp->b_pages = bp->b_page_array;
- } else {
- bp->b_pages = kmem_alloc(sizeof(struct page *) *
- page_count, KM_NOFS);
- if (bp->b_pages == NULL)
- return -ENOMEM;
- }
- memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
- }
- return 0;
-}
-
-/*
- * Frees b_pages if it was allocated.
- */
-STATIC void
-_xfs_buf_free_pages(
- xfs_buf_t *bp)
-{
- if (bp->b_pages != bp->b_page_array) {
- kmem_free(bp->b_pages);
- bp->b_pages = NULL;
- }
-}
-
-/*
- * Releases the specified buffer.
- *
- * The modification state of any associated pages is left unchanged.
- * The buffer must not be on any hash - use xfs_buf_rele instead for
- * hashed and refcounted buffers
- */
-void
-xfs_buf_free(
- xfs_buf_t *bp)
-{
- trace_xfs_buf_free(bp, _RET_IP_);
-
- ASSERT(list_empty(&bp->b_lru));
-
- if (bp->b_flags & _XBF_PAGES) {
- uint i;
-
- if (xfs_buf_is_vmapped(bp))
- vm_unmap_ram(bp->b_addr - bp->b_offset,
- bp->b_page_count);
-
- for (i = 0; i < bp->b_page_count; i++) {
- struct page *page = bp->b_pages[i];
-
- __free_page(page);
- }
- } else if (bp->b_flags & _XBF_KMEM)
- kmem_free(bp->b_addr);
- _xfs_buf_free_pages(bp);
- xfs_buf_free_maps(bp);
- kmem_zone_free(xfs_buf_zone, bp);
-}
-
-/*
- * Allocates all the pages for buffer in question and builds it's page list.
- */
-STATIC int
-xfs_buf_allocate_memory(
- xfs_buf_t *bp,
- uint flags)
-{
- size_t size;
- size_t nbytes, offset;
- gfp_t gfp_mask = xb_to_gfp(flags);
- unsigned short page_count, i;
- xfs_off_t start, end;
- int error;
-
- /*
- * for buffers that are contained within a single page, just allocate
- * the memory from the heap - there's no need for the complexity of
- * page arrays to keep allocation down to order 0.
- */
- size = BBTOB(bp->b_length);
- if (size < PAGE_SIZE) {
- bp->b_addr = kmem_alloc(size, KM_NOFS);
- if (!bp->b_addr) {
- /* low memory - use alloc_page loop instead */
- goto use_alloc_page;
- }
-
- if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
- ((unsigned long)bp->b_addr & PAGE_MASK)) {
- /* b_addr spans two pages - use alloc_page instead */
- kmem_free(bp->b_addr);
- bp->b_addr = NULL;
- goto use_alloc_page;
- }
- bp->b_offset = offset_in_page(bp->b_addr);
- bp->b_pages = bp->b_page_array;
- bp->b_pages[0] = virt_to_page(bp->b_addr);
- bp->b_page_count = 1;
- bp->b_flags |= _XBF_KMEM;
- return 0;
- }
-
-use_alloc_page:
- start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
- end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
- >> PAGE_SHIFT;
- page_count = end - start;
- error = _xfs_buf_get_pages(bp, page_count);
- if (unlikely(error))
+ error = xfs_buf_alloc_backing_mem(bp, flags);
+ if (error) {
+ xfs_buf_free(bp);
return error;
-
- offset = bp->b_offset;
- bp->b_flags |= _XBF_PAGES;
-
- for (i = 0; i < bp->b_page_count; i++) {
- struct page *page;
- uint retries = 0;
-retry:
- page = alloc_page(gfp_mask);
- if (unlikely(page == NULL)) {
- if (flags & XBF_READ_AHEAD) {
- bp->b_page_count = i;
- error = -ENOMEM;
- goto out_free_pages;
- }
-
- /*
- * This could deadlock.
- *
- * But until all the XFS lowlevel code is revamped to
- * handle buffer allocation failures we can't do much.
- */
- if (!(++retries % 100))
- xfs_err(NULL,
- "%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
- current->comm, current->pid,
- __func__, gfp_mask);
-
- XFS_STATS_INC(bp->b_target->bt_mount, xb_page_retries);
- congestion_wait(BLK_RW_ASYNC, HZ/50);
- goto retry;
- }
-
- XFS_STATS_INC(bp->b_target->bt_mount, xb_page_found);
-
- nbytes = min_t(size_t, size, PAGE_SIZE - offset);
- size -= nbytes;
- bp->b_pages[i] = page;
- offset = 0;
- }
- return 0;
-
-out_free_pages:
- for (i = 0; i < bp->b_page_count; i++)
- __free_page(bp->b_pages[i]);
- bp->b_flags &= ~_XBF_PAGES;
- return error;
-}
-
-/*
- * Map buffer into kernel address-space if necessary.
- */
-STATIC int
-_xfs_buf_map_pages(
- xfs_buf_t *bp,
- uint flags)
-{
- ASSERT(bp->b_flags & _XBF_PAGES);
- if (bp->b_page_count == 1) {
- /* A single page buffer is always mappable */
- bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
- } else if (flags & XBF_UNMAPPED) {
- bp->b_addr = NULL;
- } else {
- int retried = 0;
- unsigned nofs_flag;
-
- /*
- * vm_map_ram() will allocate auxillary structures (e.g.
- * pagetables) with GFP_KERNEL, yet we are likely to be under
- * GFP_NOFS context here. Hence we need to tell memory reclaim
- * that we are in such a context via PF_MEMALLOC_NOFS to prevent
- * memory reclaim re-entering the filesystem here and
- * potentially deadlocking.
- */
- nofs_flag = memalloc_nofs_save();
- do {
- bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
- -1, PAGE_KERNEL);
- if (bp->b_addr)
- break;
- vm_unmap_aliases();
- } while (retried++ <= 1);
- memalloc_nofs_restore(nofs_flag);
-
- if (!bp->b_addr)
- return -ENOMEM;
- bp->b_addr += bp->b_offset;
}
+ *bpp = bp;
return 0;
}
@@ -507,7 +336,7 @@ _xfs_buf_obj_cmp(
*/
BUILD_BUG_ON(offsetof(struct xfs_buf_map, bm_bn) != 0);
- if (bp->b_bn != map->bm_bn)
+ if (bp->b_rhash_key != map->bm_bn)
return 1;
if (unlikely(bp->b_length != map->bm_len)) {
@@ -518,8 +347,12 @@ _xfs_buf_obj_cmp(
* it stale has not yet committed. i.e. we are
* reallocating a busy extent. Skip this buffer and
* continue searching for an exact match.
+ *
+ * Note: If we're scanning for incore buffers to stale, don't
+ * complain if we find non-stale buffers.
*/
- ASSERT(bp->b_flags & XBF_STALE);
+ if (!(map->bm_flags & XBM_LIVESCAN))
+ ASSERT(bp->b_flags & XBF_STALE);
return 1;
}
return 0;
@@ -529,121 +362,62 @@ static const struct rhashtable_params xfs_buf_hash_params = {
.min_size = 32, /* empty AGs have minimal footprint */
.nelem_hint = 16,
.key_len = sizeof(xfs_daddr_t),
- .key_offset = offsetof(struct xfs_buf, b_bn),
+ .key_offset = offsetof(struct xfs_buf, b_rhash_key),
.head_offset = offsetof(struct xfs_buf, b_rhash_head),
.automatic_shrinking = true,
.obj_cmpfn = _xfs_buf_obj_cmp,
};
int
-xfs_buf_hash_init(
- struct xfs_perag *pag)
+xfs_buf_cache_init(
+ struct xfs_buf_cache *bch)
{
- spin_lock_init(&pag->pag_buf_lock);
- return rhashtable_init(&pag->pag_buf_hash, &xfs_buf_hash_params);
+ return rhashtable_init(&bch->bc_hash, &xfs_buf_hash_params);
}
void
-xfs_buf_hash_destroy(
- struct xfs_perag *pag)
+xfs_buf_cache_destroy(
+ struct xfs_buf_cache *bch)
{
- rhashtable_destroy(&pag->pag_buf_hash);
+ rhashtable_destroy(&bch->bc_hash);
}
-/*
- * Look up a buffer in the buffer cache and return it referenced and locked
- * in @found_bp.
- *
- * If @new_bp is supplied and we have a lookup miss, insert @new_bp into the
- * cache.
- *
- * If XBF_TRYLOCK is set in @flags, only try to lock the buffer and return
- * -EAGAIN if we fail to lock it.
- *
- * Return values are:
- * -EFSCORRUPTED if have been supplied with an invalid address
- * -EAGAIN on trylock failure
- * -ENOENT if we fail to find a match and @new_bp was NULL
- * 0, with @found_bp:
- * - @new_bp if we inserted it into the cache
- * - the buffer we found and locked.
- */
static int
-xfs_buf_find(
+xfs_buf_map_verify(
struct xfs_buftarg *btp,
- struct xfs_buf_map *map,
- int nmaps,
- xfs_buf_flags_t flags,
- struct xfs_buf *new_bp,
- struct xfs_buf **found_bp)
+ struct xfs_buf_map *map)
{
- struct xfs_perag *pag;
- xfs_buf_t *bp;
- struct xfs_buf_map cmap = { .bm_bn = map[0].bm_bn };
- xfs_daddr_t eofs;
- int i;
-
- *found_bp = NULL;
-
- for (i = 0; i < nmaps; i++)
- cmap.bm_len += map[i].bm_len;
-
/* Check for IOs smaller than the sector size / not sector aligned */
- ASSERT(!(BBTOB(cmap.bm_len) < btp->bt_meta_sectorsize));
- ASSERT(!(BBTOB(cmap.bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
+ ASSERT(!(BBTOB(map->bm_len) < btp->bt_meta_sectorsize));
+ ASSERT(!(BBTOB(map->bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
/*
* Corrupted block numbers can get through to here, unfortunately, so we
* have to check that the buffer falls within the filesystem bounds.
*/
- eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
- if (cmap.bm_bn < 0 || cmap.bm_bn >= eofs) {
+ if (map->bm_bn < 0 || map->bm_bn >= btp->bt_nr_sectors) {
xfs_alert(btp->bt_mount,
"%s: daddr 0x%llx out of range, EOFS 0x%llx",
- __func__, cmap.bm_bn, eofs);
+ __func__, map->bm_bn, btp->bt_nr_sectors);
WARN_ON(1);
return -EFSCORRUPTED;
}
-
- pag = xfs_perag_get(btp->bt_mount,
- xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
-
- spin_lock(&pag->pag_buf_lock);
- bp = rhashtable_lookup_fast(&pag->pag_buf_hash, &cmap,
- xfs_buf_hash_params);
- if (bp) {
- atomic_inc(&bp->b_hold);
- goto found;
- }
-
- /* No match found */
- if (!new_bp) {
- XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
- spin_unlock(&pag->pag_buf_lock);
- xfs_perag_put(pag);
- return -ENOENT;
- }
-
- /* the buffer keeps the perag reference until it is freed */
- new_bp->b_pag = pag;
- rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head,
- xfs_buf_hash_params);
- spin_unlock(&pag->pag_buf_lock);
- *found_bp = new_bp;
return 0;
+}
-found:
- spin_unlock(&pag->pag_buf_lock);
- xfs_perag_put(pag);
-
- if (!xfs_buf_trylock(bp)) {
- if (flags & XBF_TRYLOCK) {
- xfs_buf_rele(bp);
- XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
+static int
+xfs_buf_find_lock(
+ struct xfs_buf *bp,
+ xfs_buf_flags_t flags)
+{
+ if (flags & XBF_TRYLOCK) {
+ if (!xfs_buf_trylock(bp)) {
+ XFS_STATS_INC(bp->b_mount, xb_busy_locked);
return -EAGAIN;
}
+ } else {
xfs_buf_lock(bp);
- XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
+ XFS_STATS_INC(bp->b_mount, xb_get_locked_waited);
}
/*
@@ -652,33 +426,137 @@ found:
* intact here.
*/
if (bp->b_flags & XBF_STALE) {
+ if (flags & XBF_LIVESCAN) {
+ xfs_buf_unlock(bp);
+ return -ENOENT;
+ }
ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
- ASSERT(bp->b_iodone == NULL);
- bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
+ bp->b_flags &= _XBF_KMEM;
bp->b_ops = NULL;
}
+ return 0;
+}
+
+static bool
+xfs_buf_try_hold(
+ struct xfs_buf *bp)
+{
+ spin_lock(&bp->b_lock);
+ if (bp->b_hold == 0) {
+ spin_unlock(&bp->b_lock);
+ return false;
+ }
+ bp->b_hold++;
+ spin_unlock(&bp->b_lock);
+ return true;
+}
+
+static inline int
+xfs_buf_lookup(
+ struct xfs_buf_cache *bch,
+ struct xfs_buf_map *map,
+ xfs_buf_flags_t flags,
+ struct xfs_buf **bpp)
+{
+ struct xfs_buf *bp;
+ int error;
+
+ rcu_read_lock();
+ bp = rhashtable_lookup(&bch->bc_hash, map, xfs_buf_hash_params);
+ if (!bp || !xfs_buf_try_hold(bp)) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+ rcu_read_unlock();
+
+ error = xfs_buf_find_lock(bp, flags);
+ if (error) {
+ xfs_buf_rele(bp);
+ return error;
+ }
trace_xfs_buf_find(bp, flags, _RET_IP_);
- XFS_STATS_INC(btp->bt_mount, xb_get_locked);
- *found_bp = bp;
+ *bpp = bp;
return 0;
}
-struct xfs_buf *
-xfs_buf_incore(
- struct xfs_buftarg *target,
- xfs_daddr_t blkno,
- size_t numblks,
- xfs_buf_flags_t flags)
+/*
+ * Insert the new_bp into the hash table. This consumes the perag reference
+ * taken for the lookup regardless of the result of the insert.
+ */
+static int
+xfs_buf_find_insert(
+ struct xfs_buftarg *btp,
+ struct xfs_buf_cache *bch,
+ struct xfs_perag *pag,
+ struct xfs_buf_map *cmap,
+ struct xfs_buf_map *map,
+ int nmaps,
+ xfs_buf_flags_t flags,
+ struct xfs_buf **bpp)
{
+ struct xfs_buf *new_bp;
struct xfs_buf *bp;
int error;
- DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
- error = xfs_buf_find(target, &map, 1, flags, NULL, &bp);
+ error = xfs_buf_alloc(btp, map, nmaps, flags, &new_bp);
if (error)
+ goto out_drop_pag;
+
+ /* The new buffer keeps the perag reference until it is freed. */
+ new_bp->b_pag = pag;
+
+ rcu_read_lock();
+ bp = rhashtable_lookup_get_insert_fast(&bch->bc_hash,
+ &new_bp->b_rhash_head, xfs_buf_hash_params);
+ if (IS_ERR(bp)) {
+ rcu_read_unlock();
+ error = PTR_ERR(bp);
+ goto out_free_buf;
+ }
+ if (bp && xfs_buf_try_hold(bp)) {
+ /* found an existing buffer */
+ rcu_read_unlock();
+ error = xfs_buf_find_lock(bp, flags);
+ if (error)
+ xfs_buf_rele(bp);
+ else
+ *bpp = bp;
+ goto out_free_buf;
+ }
+ rcu_read_unlock();
+
+ *bpp = new_bp;
+ return 0;
+
+out_free_buf:
+ xfs_buf_free(new_bp);
+out_drop_pag:
+ if (pag)
+ xfs_perag_put(pag);
+ return error;
+}
+
+static inline struct xfs_perag *
+xfs_buftarg_get_pag(
+ struct xfs_buftarg *btp,
+ const struct xfs_buf_map *map)
+{
+ struct xfs_mount *mp = btp->bt_mount;
+
+ if (xfs_buftarg_is_mem(btp))
return NULL;
- return bp;
+ return xfs_perag_get(mp, xfs_daddr_to_agno(mp, map->bm_bn));
+}
+
+static inline struct xfs_buf_cache *
+xfs_buftarg_buf_cache(
+ struct xfs_buftarg *btp,
+ struct xfs_perag *pag)
+{
+ if (pag)
+ return &pag->pag_bcache;
+ return btp->bt_cache;
}
/*
@@ -686,67 +564,53 @@ xfs_buf_incore(
* cache hits, as metadata intensive workloads will see 3 orders of magnitude
* more hits than misses.
*/
-struct xfs_buf *
+int
xfs_buf_get_map(
- struct xfs_buftarg *target,
+ struct xfs_buftarg *btp,
struct xfs_buf_map *map,
int nmaps,
- xfs_buf_flags_t flags)
+ xfs_buf_flags_t flags,
+ struct xfs_buf **bpp)
{
- struct xfs_buf *bp;
- struct xfs_buf *new_bp;
- int error = 0;
+ struct xfs_buf_cache *bch;
+ struct xfs_perag *pag;
+ struct xfs_buf *bp = NULL;
+ struct xfs_buf_map cmap = { .bm_bn = map[0].bm_bn };
+ int error;
+ int i;
- error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp);
+ if (flags & XBF_LIVESCAN)
+ cmap.bm_flags |= XBM_LIVESCAN;
+ for (i = 0; i < nmaps; i++)
+ cmap.bm_len += map[i].bm_len;
- switch (error) {
- case 0:
- /* cache hit */
- goto found;
- case -EAGAIN:
- /* cache hit, trylock failure, caller handles failure */
- ASSERT(flags & XBF_TRYLOCK);
- return NULL;
- case -ENOENT:
- /* cache miss, go for insert */
- break;
- case -EFSCORRUPTED:
- default:
- /*
- * None of the higher layers understand failure types
- * yet, so return NULL to signal a fatal lookup error.
- */
- return NULL;
- }
+ error = xfs_buf_map_verify(btp, &cmap);
+ if (error)
+ return error;
- new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
- if (unlikely(!new_bp))
- return NULL;
+ pag = xfs_buftarg_get_pag(btp, &cmap);
+ bch = xfs_buftarg_buf_cache(btp, pag);
- error = xfs_buf_allocate_memory(new_bp, flags);
- if (error) {
- xfs_buf_free(new_bp);
- return NULL;
- }
+ error = xfs_buf_lookup(bch, &cmap, flags, &bp);
+ if (error && error != -ENOENT)
+ goto out_put_perag;
- error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp);
- if (error) {
- xfs_buf_free(new_bp);
- return NULL;
- }
+ /* cache hits always outnumber misses by at least 10:1 */
+ if (unlikely(!bp)) {
+ XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
- if (bp != new_bp)
- xfs_buf_free(new_bp);
-
-found:
- if (!bp->b_addr) {
- error = _xfs_buf_map_pages(bp, flags);
- if (unlikely(error)) {
- xfs_warn(target->bt_mount,
- "%s: failed to map pagesn", __func__);
- xfs_buf_relse(bp);
- return NULL;
- }
+ if (flags & XBF_INCORE)
+ goto out_put_perag;
+
+ /* xfs_buf_find_insert() consumes the perag reference. */
+ error = xfs_buf_find_insert(btp, bch, pag, &cmap, map, nmaps,
+ flags, &bp);
+ if (error)
+ return error;
+ } else {
+ XFS_STATS_INC(btp->bt_mount, xb_get_locked);
+ if (pag)
+ xfs_perag_put(pag);
}
/*
@@ -756,33 +620,48 @@ found:
if (!(flags & XBF_READ))
xfs_buf_ioerror(bp, 0);
- XFS_STATS_INC(target->bt_mount, xb_get);
+ XFS_STATS_INC(btp->bt_mount, xb_get);
trace_xfs_buf_get(bp, flags, _RET_IP_);
- return bp;
+ *bpp = bp;
+ return 0;
+
+out_put_perag:
+ if (pag)
+ xfs_perag_put(pag);
+ return error;
}
-STATIC int
+int
_xfs_buf_read(
- xfs_buf_t *bp,
- xfs_buf_flags_t flags)
+ struct xfs_buf *bp)
{
- ASSERT(!(flags & XBF_WRITE));
ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
- bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
- bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
-
- return xfs_buf_submit(bp);
+ bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD | XBF_DONE);
+ bp->b_flags |= XBF_READ;
+ xfs_buf_submit(bp);
+ return xfs_buf_iowait(bp);
}
/*
- * If the caller passed in an ops structure and the buffer doesn't have ops
- * assigned, set the ops and use them to verify the contents. If the contents
- * cannot be verified, we'll clear XBF_DONE. We assume the buffer has no
- * recorded errors and is already in XBF_DONE state.
+ * Reverify a buffer found in cache without an attached ->b_ops.
+ *
+ * If the caller passed an ops structure and the buffer doesn't have ops
+ * assigned, set the ops and use it to verify the contents. If verification
+ * fails, clear XBF_DONE. We assume the buffer has no recorded errors and is
+ * already in XBF_DONE state on entry.
+ *
+ * Under normal operations, every in-core buffer is verified on read I/O
+ * completion. There are two scenarios that can lead to in-core buffers without
+ * an assigned ->b_ops. The first is during log recovery of buffers on a V4
+ * filesystem, though these buffers are purged at the end of recovery. The
+ * other is online repair, which intentionally reads with a NULL buffer ops to
+ * run several verifiers across an in-core buffer in order to establish buffer
+ * type. If repair can't establish that, the buffer will be left in memory
+ * with NULL buffer ops.
*/
int
-xfs_buf_ensure_ops(
+xfs_buf_reverify(
struct xfs_buf *bp,
const struct xfs_buf_ops *ops)
{
@@ -799,46 +678,77 @@ xfs_buf_ensure_ops(
return bp->b_error;
}
-xfs_buf_t *
+int
xfs_buf_read_map(
struct xfs_buftarg *target,
struct xfs_buf_map *map,
int nmaps,
xfs_buf_flags_t flags,
- const struct xfs_buf_ops *ops)
+ struct xfs_buf **bpp,
+ const struct xfs_buf_ops *ops,
+ xfs_failaddr_t fa)
{
struct xfs_buf *bp;
+ int error;
+
+ ASSERT(!(flags & (XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD)));
flags |= XBF_READ;
+ *bpp = NULL;
- bp = xfs_buf_get_map(target, map, nmaps, flags);
- if (!bp)
- return NULL;
+ error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
+ if (error)
+ return error;
trace_xfs_buf_read(bp, flags, _RET_IP_);
if (!(bp->b_flags & XBF_DONE)) {
+ /* Initiate the buffer read and wait. */
XFS_STATS_INC(target->bt_mount, xb_get_read);
bp->b_ops = ops;
- _xfs_buf_read(bp, flags);
- return bp;
- }
+ error = _xfs_buf_read(bp);
+ } else {
+ /* Buffer already read; all we need to do is check it. */
+ error = xfs_buf_reverify(bp, ops);
- xfs_buf_ensure_ops(bp, ops);
+ /* We do not want read in the flags */
+ bp->b_flags &= ~XBF_READ;
+ ASSERT(bp->b_ops != NULL || ops == NULL);
+ }
- if (flags & XBF_ASYNC) {
+ /*
+ * If we've had a read error, then the contents of the buffer are
+ * invalid and should not be used. To ensure that a followup read tries
+ * to pull the buffer from disk again, we clear the XBF_DONE flag and
+ * mark the buffer stale. This ensures that anyone who has a current
+ * reference to the buffer will interpret it's contents correctly and
+ * future cache lookups will also treat it as an empty, uninitialised
+ * buffer.
+ */
+ if (error) {
/*
- * Read ahead call which is already satisfied,
- * drop the buffer
+ * Check against log shutdown for error reporting because
+ * metadata writeback may require a read first and we need to
+ * report errors in metadata writeback until the log is shut
+ * down. High level transaction read functions already check
+ * against mount shutdown, anyway, so we only need to be
+ * concerned about low level IO interactions here.
*/
+ if (!xlog_is_shutdown(target->bt_mount->m_log))
+ xfs_buf_ioerror_alert(bp, fa);
+
+ bp->b_flags &= ~XBF_DONE;
+ xfs_buf_stale(bp);
xfs_buf_relse(bp);
- return NULL;
+
+ /* bad CRC means corrupted metadata */
+ if (error == -EFSBADCRC)
+ error = -EFSCORRUPTED;
+ return error;
}
- /* We do not want read in the flags */
- bp->b_flags &= ~XBF_READ;
- ASSERT(bp->b_ops != NULL || ops == NULL);
- return bp;
+ *bpp = bp;
+ return 0;
}
/*
@@ -852,44 +762,66 @@ xfs_buf_readahead_map(
int nmaps,
const struct xfs_buf_ops *ops)
{
- if (bdi_read_congested(target->bt_bdev->bd_bdi))
+ const xfs_buf_flags_t flags = XBF_READ | XBF_ASYNC | XBF_READ_AHEAD;
+ struct xfs_buf *bp;
+
+ /*
+ * Currently we don't have a good means or justification for performing
+ * xmbuf_map_page asynchronously, so we don't do readahead.
+ */
+ if (xfs_buftarg_is_mem(target))
+ return;
+
+ if (xfs_buf_get_map(target, map, nmaps, flags | XBF_TRYLOCK, &bp))
return;
+ trace_xfs_buf_readahead(bp, 0, _RET_IP_);
- xfs_buf_read_map(target, map, nmaps,
- XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
+ if (bp->b_flags & XBF_DONE) {
+ xfs_buf_reverify(bp, ops);
+ xfs_buf_relse(bp);
+ return;
+ }
+ XFS_STATS_INC(target->bt_mount, xb_get_read);
+ bp->b_ops = ops;
+ bp->b_flags &= ~(XBF_WRITE | XBF_DONE);
+ bp->b_flags |= flags;
+ percpu_counter_inc(&target->bt_readahead_count);
+ xfs_buf_submit(bp);
}
/*
* Read an uncached buffer from disk. Allocates and returns a locked
- * buffer containing the disk contents or nothing.
+ * buffer containing the disk contents or nothing. Uncached buffers always have
+ * a cache index of XFS_BUF_DADDR_NULL so we can easily determine if the buffer
+ * is cached or uncached during fault diagnosis.
*/
int
xfs_buf_read_uncached(
struct xfs_buftarg *target,
xfs_daddr_t daddr,
size_t numblks,
- int flags,
struct xfs_buf **bpp,
const struct xfs_buf_ops *ops)
{
struct xfs_buf *bp;
+ int error;
*bpp = NULL;
- bp = xfs_buf_get_uncached(target, numblks, flags);
- if (!bp)
- return -ENOMEM;
+ error = xfs_buf_get_uncached(target, numblks, &bp);
+ if (error)
+ return error;
/* set up the buffer for a read IO */
ASSERT(bp->b_map_count == 1);
- bp->b_bn = XFS_BUF_DADDR_NULL; /* always null for uncached buffers */
+ bp->b_rhash_key = XFS_BUF_DADDR_NULL;
bp->b_maps[0].bm_bn = daddr;
bp->b_flags |= XBF_READ;
bp->b_ops = ops;
xfs_buf_submit(bp);
- if (bp->b_error) {
- int error = bp->b_error;
+ error = xfs_buf_iowait(bp);
+ if (error) {
xfs_buf_relse(bp);
return error;
}
@@ -898,130 +830,19 @@ xfs_buf_read_uncached(
return 0;
}
-/*
- * Return a buffer allocated as an empty buffer and associated to external
- * memory via xfs_buf_associate_memory() back to it's empty state.
- */
-void
-xfs_buf_set_empty(
- struct xfs_buf *bp,
- size_t numblks)
-{
- if (bp->b_pages)
- _xfs_buf_free_pages(bp);
-
- bp->b_pages = NULL;
- bp->b_page_count = 0;
- bp->b_addr = NULL;
- bp->b_length = numblks;
- bp->b_io_length = numblks;
-
- ASSERT(bp->b_map_count == 1);
- bp->b_bn = XFS_BUF_DADDR_NULL;
- bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
- bp->b_maps[0].bm_len = bp->b_length;
-}
-
-static inline struct page *
-mem_to_page(
- void *addr)
-{
- if ((!is_vmalloc_addr(addr))) {
- return virt_to_page(addr);
- } else {
- return vmalloc_to_page(addr);
- }
-}
-
int
-xfs_buf_associate_memory(
- xfs_buf_t *bp,
- void *mem,
- size_t len)
-{
- int rval;
- int i = 0;
- unsigned long pageaddr;
- unsigned long offset;
- size_t buflen;
- int page_count;
-
- pageaddr = (unsigned long)mem & PAGE_MASK;
- offset = (unsigned long)mem - pageaddr;
- buflen = PAGE_ALIGN(len + offset);
- page_count = buflen >> PAGE_SHIFT;
-
- /* Free any previous set of page pointers */
- if (bp->b_pages)
- _xfs_buf_free_pages(bp);
-
- bp->b_pages = NULL;
- bp->b_addr = mem;
-
- rval = _xfs_buf_get_pages(bp, page_count);
- if (rval)
- return rval;
-
- bp->b_offset = offset;
-
- for (i = 0; i < bp->b_page_count; i++) {
- bp->b_pages[i] = mem_to_page((void *)pageaddr);
- pageaddr += PAGE_SIZE;
- }
-
- bp->b_io_length = BTOBB(len);
- bp->b_length = BTOBB(buflen);
-
- return 0;
-}
-
-xfs_buf_t *
xfs_buf_get_uncached(
struct xfs_buftarg *target,
size_t numblks,
- int flags)
+ struct xfs_buf **bpp)
{
- unsigned long page_count;
- int error, i;
- struct xfs_buf *bp;
+ int error;
DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
- /* flags might contain irrelevant bits, pass only what we care about */
- bp = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT);
- if (unlikely(bp == NULL))
- goto fail;
-
- page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
- error = _xfs_buf_get_pages(bp, page_count);
- if (error)
- goto fail_free_buf;
-
- for (i = 0; i < page_count; i++) {
- bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
- if (!bp->b_pages[i])
- goto fail_free_mem;
- }
- bp->b_flags |= _XBF_PAGES;
-
- error = _xfs_buf_map_pages(bp, 0);
- if (unlikely(error)) {
- xfs_warn(target->bt_mount,
- "%s: failed to map pages", __func__);
- goto fail_free_mem;
- }
-
- trace_xfs_buf_get_uncached(bp, _RET_IP_);
- return bp;
-
- fail_free_mem:
- while (--i >= 0)
- __free_page(bp->b_pages[i]);
- _xfs_buf_free_pages(bp);
- fail_free_buf:
- xfs_buf_free_maps(bp);
- kmem_zone_free(xfs_buf_zone, bp);
- fail:
- return NULL;
+ error = xfs_buf_alloc(target, &map, 1, 0, bpp);
+ if (!error)
+ trace_xfs_buf_get_uncached(*bpp, _RET_IP_);
+ return error;
}
/*
@@ -1031,75 +852,61 @@ xfs_buf_get_uncached(
*/
void
xfs_buf_hold(
- xfs_buf_t *bp)
+ struct xfs_buf *bp)
{
trace_xfs_buf_hold(bp, _RET_IP_);
- atomic_inc(&bp->b_hold);
+
+ spin_lock(&bp->b_lock);
+ bp->b_hold++;
+ spin_unlock(&bp->b_lock);
}
-/*
- * Release a hold on the specified buffer. If the hold count is 1, the buffer is
- * placed on LRU or freed (depending on b_lru_ref).
- */
-void
-xfs_buf_rele(
- xfs_buf_t *bp)
+static void
+xfs_buf_rele_uncached(
+ struct xfs_buf *bp)
{
- struct xfs_perag *pag = bp->b_pag;
- bool release;
- bool freebuf = false;
-
- trace_xfs_buf_rele(bp, _RET_IP_);
+ ASSERT(list_empty(&bp->b_lru));
- if (!pag) {
- ASSERT(list_empty(&bp->b_lru));
- if (atomic_dec_and_test(&bp->b_hold)) {
- xfs_buf_ioacct_dec(bp);
- xfs_buf_free(bp);
- }
+ spin_lock(&bp->b_lock);
+ if (--bp->b_hold) {
+ spin_unlock(&bp->b_lock);
return;
}
+ spin_unlock(&bp->b_lock);
+ xfs_buf_free(bp);
+}
- ASSERT(atomic_read(&bp->b_hold) > 0);
+static void
+xfs_buf_rele_cached(
+ struct xfs_buf *bp)
+{
+ struct xfs_buftarg *btp = bp->b_target;
+ struct xfs_perag *pag = bp->b_pag;
+ struct xfs_buf_cache *bch = xfs_buftarg_buf_cache(btp, pag);
+ bool freebuf = false;
+
+ trace_xfs_buf_rele(bp, _RET_IP_);
- /*
- * We grab the b_lock here first to serialise racing xfs_buf_rele()
- * calls. The pag_buf_lock being taken on the last reference only
- * serialises against racing lookups in xfs_buf_find(). IOWs, the second
- * to last reference we drop here is not serialised against the last
- * reference until we take bp->b_lock. Hence if we don't grab b_lock
- * first, the last "release" reference can win the race to the lock and
- * free the buffer before the second-to-last reference is processed,
- * leading to a use-after-free scenario.
- */
spin_lock(&bp->b_lock);
- release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
- if (!release) {
- /*
- * Drop the in-flight state if the buffer is already on the LRU
- * and it holds the only reference. This is racy because we
- * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
- * ensures the decrement occurs only once per-buf.
- */
- if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
- __xfs_buf_ioacct_dec(bp);
+ ASSERT(bp->b_hold >= 1);
+ if (bp->b_hold > 1) {
+ bp->b_hold--;
goto out_unlock;
}
- /* the last reference has been dropped ... */
- __xfs_buf_ioacct_dec(bp);
- if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
+ /* we are asked to drop the last reference */
+ if (atomic_read(&bp->b_lru_ref)) {
/*
- * If the buffer is added to the LRU take a new reference to the
+ * If the buffer is added to the LRU, keep the reference to the
* buffer for the LRU and clear the (now stale) dispose list
- * state flag
+ * state flag, else drop the reference.
*/
- if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
+ if (list_lru_add_obj(&btp->bt_lru, &bp->b_lru))
bp->b_state &= ~XFS_BSTATE_DISPOSE;
- atomic_inc(&bp->b_hold);
- }
- spin_unlock(&pag->pag_buf_lock);
+ else
+ bp->b_hold--;
} else {
+ bp->b_hold--;
/*
* most of the time buffers will already be removed from the
* LRU, so optimise that case by checking for the
@@ -1107,16 +914,16 @@ xfs_buf_rele(
* was on was the disposal list
*/
if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
- list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
+ list_lru_del_obj(&btp->bt_lru, &bp->b_lru);
} else {
ASSERT(list_empty(&bp->b_lru));
}
ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
- rhashtable_remove_fast(&pag->pag_buf_hash, &bp->b_rhash_head,
- xfs_buf_hash_params);
- spin_unlock(&pag->pag_buf_lock);
- xfs_perag_put(pag);
+ rhashtable_remove_fast(&bch->bc_hash, &bp->b_rhash_head,
+ xfs_buf_hash_params);
+ if (pag)
+ xfs_perag_put(pag);
freebuf = true;
}
@@ -1127,6 +934,19 @@ out_unlock:
xfs_buf_free(bp);
}
+/*
+ * Release a hold on the specified buffer.
+ */
+void
+xfs_buf_rele(
+ struct xfs_buf *bp)
+{
+ trace_xfs_buf_rele(bp, _RET_IP_);
+ if (xfs_buf_is_uncached(bp))
+ xfs_buf_rele_uncached(bp);
+ else
+ xfs_buf_rele_cached(bp);
+}
/*
* Lock a buffer object, if it is not already locked.
@@ -1169,7 +989,7 @@ xfs_buf_lock(
trace_xfs_buf_lock(bp, _RET_IP_);
if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
- xfs_log_force(bp->b_target->bt_mount, 0);
+ xfs_log_force(bp->b_mount, 0);
down(&bp->b_sema);
trace_xfs_buf_lock_done(bp, _RET_IP_);
@@ -1187,7 +1007,7 @@ xfs_buf_unlock(
STATIC void
xfs_buf_wait_unpin(
- xfs_buf_t *bp)
+ struct xfs_buf *bp)
{
DECLARE_WAITQUEUE (wait, current);
@@ -1205,39 +1025,195 @@ xfs_buf_wait_unpin(
set_current_state(TASK_RUNNING);
}
+static void
+xfs_buf_ioerror_alert_ratelimited(
+ struct xfs_buf *bp)
+{
+ static unsigned long lasttime;
+ static struct xfs_buftarg *lasttarg;
+
+ if (bp->b_target != lasttarg ||
+ time_after(jiffies, (lasttime + 5*HZ))) {
+ lasttime = jiffies;
+ xfs_buf_ioerror_alert(bp, __this_address);
+ }
+ lasttarg = bp->b_target;
+}
+
/*
- * Buffer Utility Routines
+ * Account for this latest trip around the retry handler, and decide if
+ * we've failed enough times to constitute a permanent failure.
*/
+static bool
+xfs_buf_ioerror_permanent(
+ struct xfs_buf *bp,
+ struct xfs_error_cfg *cfg)
+{
+ struct xfs_mount *mp = bp->b_mount;
-void
-xfs_buf_ioend(
- struct xfs_buf *bp)
+ if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
+ ++bp->b_retries > cfg->max_retries)
+ return true;
+ if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
+ time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
+ return true;
+
+ /* At unmount we may treat errors differently */
+ if (xfs_is_unmounting(mp) && mp->m_fail_unmount)
+ return true;
+
+ return false;
+}
+
+/*
+ * On a sync write or shutdown we just want to stale the buffer and let the
+ * caller handle the error in bp->b_error appropriately.
+ *
+ * If the write was asynchronous then no one will be looking for the error. If
+ * this is the first failure of this type, clear the error state and write the
+ * buffer out again. This means we always retry an async write failure at least
+ * once, but we also need to set the buffer up to behave correctly now for
+ * repeated failures.
+ *
+ * If we get repeated async write failures, then we take action according to the
+ * error configuration we have been set up to use.
+ *
+ * Returns true if this function took care of error handling and the caller must
+ * not touch the buffer again. Return false if the caller should proceed with
+ * normal I/O completion handling.
+ */
+static bool
+xfs_buf_ioend_handle_error(
+ struct xfs_buf *bp)
{
- bool read = bp->b_flags & XBF_READ;
+ struct xfs_mount *mp = bp->b_mount;
+ struct xfs_error_cfg *cfg;
+ struct xfs_log_item *lip;
- trace_xfs_buf_iodone(bp, _RET_IP_);
+ /*
+ * If we've already shutdown the journal because of I/O errors, there's
+ * no point in giving this a retry.
+ */
+ if (xlog_is_shutdown(mp->m_log))
+ goto out_stale;
- bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
+ xfs_buf_ioerror_alert_ratelimited(bp);
/*
- * Pull in IO completion errors now. We are guaranteed to be running
- * single threaded, so we don't need the lock to read b_io_error.
+ * We're not going to bother about retrying this during recovery.
+ * One strike!
*/
- if (!bp->b_error && bp->b_io_error)
- xfs_buf_ioerror(bp, bp->b_io_error);
+ if (bp->b_flags & _XBF_LOGRECOVERY) {
+ xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
+ return false;
+ }
- /* Only validate buffers that were read without errors */
- if (read && !bp->b_error && bp->b_ops) {
- ASSERT(!bp->b_iodone);
- bp->b_ops->verify_read(bp);
+ /*
+ * Synchronous writes will have callers process the error.
+ */
+ if (!(bp->b_flags & XBF_ASYNC))
+ goto out_stale;
+
+ trace_xfs_buf_iodone_async(bp, _RET_IP_);
+
+ cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
+ if (bp->b_last_error != bp->b_error ||
+ !(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL))) {
+ bp->b_last_error = bp->b_error;
+ if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER &&
+ !bp->b_first_retry_time)
+ bp->b_first_retry_time = jiffies;
+ goto resubmit;
}
- if (!bp->b_error)
- bp->b_flags |= XBF_DONE;
+ /*
+ * Permanent error - we need to trigger a shutdown if we haven't already
+ * to indicate that inconsistency will result from this action.
+ */
+ if (xfs_buf_ioerror_permanent(bp, cfg)) {
+ xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
+ goto out_stale;
+ }
+
+ /* Still considered a transient error. Caller will schedule retries. */
+ list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
+ set_bit(XFS_LI_FAILED, &lip->li_flags);
+ clear_bit(XFS_LI_FLUSHING, &lip->li_flags);
+ }
+
+ xfs_buf_ioerror(bp, 0);
+ xfs_buf_relse(bp);
+ return true;
- if (bp->b_iodone)
- (*(bp->b_iodone))(bp);
- else if (bp->b_flags & XBF_ASYNC)
+resubmit:
+ xfs_buf_ioerror(bp, 0);
+ bp->b_flags |= (XBF_DONE | XBF_WRITE_FAIL);
+ reinit_completion(&bp->b_iowait);
+ xfs_buf_submit(bp);
+ return true;
+out_stale:
+ xfs_buf_stale(bp);
+ bp->b_flags |= XBF_DONE;
+ bp->b_flags &= ~XBF_WRITE;
+ trace_xfs_buf_error_relse(bp, _RET_IP_);
+ return false;
+}
+
+/* returns false if the caller needs to resubmit the I/O, else true */
+static bool
+__xfs_buf_ioend(
+ struct xfs_buf *bp)
+{
+ trace_xfs_buf_iodone(bp, _RET_IP_);
+
+ if (bp->b_flags & XBF_READ) {
+ if (!bp->b_error && is_vmalloc_addr(bp->b_addr))
+ invalidate_kernel_vmap_range(bp->b_addr,
+ roundup(BBTOB(bp->b_length), PAGE_SIZE));
+ if (!bp->b_error && bp->b_ops)
+ bp->b_ops->verify_read(bp);
+ if (!bp->b_error)
+ bp->b_flags |= XBF_DONE;
+ if (bp->b_flags & XBF_READ_AHEAD)
+ percpu_counter_dec(&bp->b_target->bt_readahead_count);
+ } else {
+ if (!bp->b_error) {
+ bp->b_flags &= ~XBF_WRITE_FAIL;
+ bp->b_flags |= XBF_DONE;
+ }
+
+ if (unlikely(bp->b_error) && xfs_buf_ioend_handle_error(bp))
+ return false;
+
+ /* clear the retry state */
+ bp->b_last_error = 0;
+ bp->b_retries = 0;
+ bp->b_first_retry_time = 0;
+
+ /*
+ * Note that for things like remote attribute buffers, there may
+ * not be a buffer log item here, so processing the buffer log
+ * item must remain optional.
+ */
+ if (bp->b_log_item)
+ xfs_buf_item_done(bp);
+
+ if (bp->b_iodone)
+ bp->b_iodone(bp);
+ }
+
+ bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD |
+ _XBF_LOGRECOVERY);
+ return true;
+}
+
+static void
+xfs_buf_ioend(
+ struct xfs_buf *bp)
+{
+ if (!__xfs_buf_ioend(bp))
+ return;
+ if (bp->b_flags & XBF_ASYNC)
xfs_buf_relse(bp);
else
complete(&bp->b_iowait);
@@ -1248,22 +1224,15 @@ xfs_buf_ioend_work(
struct work_struct *work)
{
struct xfs_buf *bp =
- container_of(work, xfs_buf_t, b_ioend_work);
+ container_of(work, struct xfs_buf, b_ioend_work);
- xfs_buf_ioend(bp);
-}
-
-static void
-xfs_buf_ioend_async(
- struct xfs_buf *bp)
-{
- INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
- queue_work(bp->b_ioend_wq, &bp->b_ioend_work);
+ if (__xfs_buf_ioend(bp))
+ xfs_buf_relse(bp);
}
void
__xfs_buf_ioerror(
- xfs_buf_t *bp,
+ struct xfs_buf *bp,
int error,
xfs_failaddr_t failaddr)
{
@@ -1275,12 +1244,28 @@ __xfs_buf_ioerror(
void
xfs_buf_ioerror_alert(
struct xfs_buf *bp,
- const char *func)
+ xfs_failaddr_t func)
+{
+ xfs_buf_alert_ratelimited(bp, "XFS: metadata IO error",
+ "metadata I/O error in \"%pS\" at daddr 0x%llx len %d error %d",
+ func, (uint64_t)xfs_buf_daddr(bp),
+ bp->b_length, -bp->b_error);
+}
+
+/*
+ * To simulate an I/O failure, the buffer must be locked and held with at least
+ * three references. The LRU reference is dropped by the stale call. The buf
+ * item reference is dropped via ioend processing. The third reference is owned
+ * by the caller and is dropped on I/O completion if the buffer is XBF_ASYNC.
+ */
+void
+xfs_buf_ioend_fail(
+ struct xfs_buf *bp)
{
- xfs_alert(bp->b_target->bt_mount,
-"metadata I/O error in \"%s\" at daddr 0x%llx len %d error %d",
- func, (uint64_t)XFS_BUF_ADDR(bp), bp->b_length,
- -bp->b_error);
+ bp->b_flags &= ~XBF_DONE;
+ xfs_buf_stale(bp);
+ xfs_buf_ioerror(bp, -EIO);
+ xfs_buf_ioend(bp);
}
int
@@ -1293,13 +1278,12 @@ xfs_bwrite(
bp->b_flags |= XBF_WRITE;
bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
- XBF_WRITE_FAIL | XBF_DONE);
+ XBF_DONE);
- error = xfs_buf_submit(bp);
- if (error) {
- xfs_force_shutdown(bp->b_target->bt_mount,
- SHUTDOWN_META_IO_ERROR);
- }
+ xfs_buf_submit(bp);
+ error = xfs_buf_iowait(bp);
+ if (error)
+ xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
return error;
}
@@ -1307,194 +1291,77 @@ static void
xfs_buf_bio_end_io(
struct bio *bio)
{
- struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private;
-
- /*
- * don't overwrite existing errors - otherwise we can lose errors on
- * buffers that require multiple bios to complete.
- */
- if (bio->bi_status) {
- int error = blk_status_to_errno(bio->bi_status);
-
- cmpxchg(&bp->b_io_error, 0, error);
- }
+ struct xfs_buf *bp = bio->bi_private;
- if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
- invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
-
- if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
- xfs_buf_ioend_async(bp);
- bio_put(bio);
-}
-
-static void
-xfs_buf_ioapply_map(
- struct xfs_buf *bp,
- int map,
- int *buf_offset,
- int *count,
- int op,
- int op_flags)
-{
- int page_index;
- int total_nr_pages = bp->b_page_count;
- int nr_pages;
- struct bio *bio;
- sector_t sector = bp->b_maps[map].bm_bn;
- int size;
- int offset;
-
- /* skip the pages in the buffer before the start offset */
- page_index = 0;
- offset = *buf_offset;
- while (offset >= PAGE_SIZE) {
- page_index++;
- offset -= PAGE_SIZE;
- }
-
- /*
- * Limit the IO size to the length of the current vector, and update the
- * remaining IO count for the next time around.
- */
- size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
- *count -= size;
- *buf_offset += size;
-
-next_chunk:
- atomic_inc(&bp->b_io_remaining);
- nr_pages = min(total_nr_pages, BIO_MAX_PAGES);
-
- bio = bio_alloc(GFP_NOIO, nr_pages);
- bio_set_dev(bio, bp->b_target->bt_bdev);
- bio->bi_iter.bi_sector = sector;
- bio->bi_end_io = xfs_buf_bio_end_io;
- bio->bi_private = bp;
- bio_set_op_attrs(bio, op, op_flags);
-
- for (; size && nr_pages; nr_pages--, page_index++) {
- int rbytes, nbytes = PAGE_SIZE - offset;
-
- if (nbytes > size)
- nbytes = size;
-
- rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
- offset);
- if (rbytes < nbytes)
- break;
-
- offset = 0;
- sector += BTOBB(nbytes);
- size -= nbytes;
- total_nr_pages--;
- }
+ if (bio->bi_status)
+ xfs_buf_ioerror(bp, blk_status_to_errno(bio->bi_status));
+ else if ((bp->b_flags & XBF_WRITE) && (bp->b_flags & XBF_ASYNC) &&
+ XFS_TEST_ERROR(bp->b_mount, XFS_ERRTAG_BUF_IOERROR))
+ xfs_buf_ioerror(bp, -EIO);
- if (likely(bio->bi_iter.bi_size)) {
- if (xfs_buf_is_vmapped(bp)) {
- flush_kernel_vmap_range(bp->b_addr,
- xfs_buf_vmap_len(bp));
- }
- submit_bio(bio);
- if (size)
- goto next_chunk;
+ if (bp->b_flags & XBF_ASYNC) {
+ INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
+ queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work);
} else {
- /*
- * This is guaranteed not to be the last io reference count
- * because the caller (xfs_buf_submit) holds a count itself.
- */
- atomic_dec(&bp->b_io_remaining);
- xfs_buf_ioerror(bp, -EIO);
- bio_put(bio);
+ complete(&bp->b_iowait);
}
+ bio_put(bio);
}
-STATIC void
-_xfs_buf_ioapply(
- struct xfs_buf *bp)
+static inline blk_opf_t
+xfs_buf_bio_op(
+ struct xfs_buf *bp)
{
- struct blk_plug plug;
- int op;
- int op_flags = 0;
- int offset;
- int size;
- int i;
-
- /*
- * Make sure we capture only current IO errors rather than stale errors
- * left over from previous use of the buffer (e.g. failed readahead).
- */
- bp->b_error = 0;
-
- /*
- * Initialize the I/O completion workqueue if we haven't yet or the
- * submitter has not opted to specify a custom one.
- */
- if (!bp->b_ioend_wq)
- bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue;
+ blk_opf_t op;
if (bp->b_flags & XBF_WRITE) {
op = REQ_OP_WRITE;
- if (bp->b_flags & XBF_SYNCIO)
- op_flags = REQ_SYNC;
- if (bp->b_flags & XBF_FUA)
- op_flags |= REQ_FUA;
- if (bp->b_flags & XBF_FLUSH)
- op_flags |= REQ_PREFLUSH;
-
- /*
- * Run the write verifier callback function if it exists. If
- * this function fails it will mark the buffer with an error and
- * the IO should not be dispatched.
- */
- if (bp->b_ops) {
- bp->b_ops->verify_write(bp);
- if (bp->b_error) {
- xfs_force_shutdown(bp->b_target->bt_mount,
- SHUTDOWN_CORRUPT_INCORE);
- return;
- }
- } else if (bp->b_bn != XFS_BUF_DADDR_NULL) {
- struct xfs_mount *mp = bp->b_target->bt_mount;
-
- /*
- * non-crc filesystems don't attach verifiers during
- * log recovery, so don't warn for such filesystems.
- */
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
- xfs_warn(mp,
- "%s: no buf ops on daddr 0x%llx len %d",
- __func__, bp->b_bn, bp->b_length);
- xfs_hex_dump(bp->b_addr,
- XFS_CORRUPTION_DUMP_LEN);
- dump_stack();
- }
- }
- } else if (bp->b_flags & XBF_READ_AHEAD) {
- op = REQ_OP_READ;
- op_flags = REQ_RAHEAD;
} else {
op = REQ_OP_READ;
+ if (bp->b_flags & XBF_READ_AHEAD)
+ op |= REQ_RAHEAD;
}
- /* we only use the buffer cache for meta-data */
- op_flags |= REQ_META;
+ return op | REQ_META;
+}
+
+static void
+xfs_buf_submit_bio(
+ struct xfs_buf *bp)
+{
+ unsigned int len = BBTOB(bp->b_length);
+ unsigned int nr_vecs = bio_add_max_vecs(bp->b_addr, len);
+ unsigned int map = 0;
+ struct blk_plug plug;
+ struct bio *bio;
+
+ bio = bio_alloc(bp->b_target->bt_bdev, nr_vecs, xfs_buf_bio_op(bp),
+ GFP_NOIO);
+ if (is_vmalloc_addr(bp->b_addr))
+ bio_add_vmalloc(bio, bp->b_addr, len);
+ else
+ bio_add_virt_nofail(bio, bp->b_addr, len);
+ bio->bi_private = bp;
+ bio->bi_end_io = xfs_buf_bio_end_io;
/*
- * Walk all the vectors issuing IO on them. Set up the initial offset
- * into the buffer and the desired IO size before we start -
- * _xfs_buf_ioapply_vec() will modify them appropriately for each
- * subsequent call.
+ * If there is more than one map segment, split out a new bio for each
+ * map except of the last one. The last map is handled by the
+ * remainder of the original bio outside the loop.
*/
- offset = bp->b_offset;
- size = BBTOB(bp->b_io_length);
blk_start_plug(&plug);
- for (i = 0; i < bp->b_map_count; i++) {
- xfs_buf_ioapply_map(bp, i, &offset, &size, op, op_flags);
- if (bp->b_error)
- break;
- if (size <= 0)
- break; /* all done */
+ for (map = 0; map < bp->b_map_count - 1; map++) {
+ struct bio *split;
+
+ split = bio_split(bio, bp->b_maps[map].bm_len, GFP_NOFS,
+ &fs_bio_set);
+ split->bi_iter.bi_sector = bp->b_maps[map].bm_bn;
+ bio_chain(split, bio);
+ submit_bio(split);
}
+ bio->bi_iter.bi_sector = bp->b_maps[map].bm_bn;
+ submit_bio(bio);
blk_finish_plug(&plug);
}
@@ -1507,142 +1374,123 @@ xfs_buf_iowait(
{
ASSERT(!(bp->b_flags & XBF_ASYNC));
- trace_xfs_buf_iowait(bp, _RET_IP_);
- wait_for_completion(&bp->b_iowait);
- trace_xfs_buf_iowait_done(bp, _RET_IP_);
+ do {
+ trace_xfs_buf_iowait(bp, _RET_IP_);
+ wait_for_completion(&bp->b_iowait);
+ trace_xfs_buf_iowait_done(bp, _RET_IP_);
+ } while (!__xfs_buf_ioend(bp));
return bp->b_error;
}
/*
+ * Run the write verifier callback function if it exists. If this fails, mark
+ * the buffer with an error and do not dispatch the I/O.
+ */
+static bool
+xfs_buf_verify_write(
+ struct xfs_buf *bp)
+{
+ if (bp->b_ops) {
+ bp->b_ops->verify_write(bp);
+ if (bp->b_error)
+ return false;
+ } else if (bp->b_rhash_key != XFS_BUF_DADDR_NULL) {
+ /*
+ * Non-crc filesystems don't attach verifiers during log
+ * recovery, so don't warn for such filesystems.
+ */
+ if (xfs_has_crc(bp->b_mount)) {
+ xfs_warn(bp->b_mount,
+ "%s: no buf ops on daddr 0x%llx len %d",
+ __func__, xfs_buf_daddr(bp),
+ bp->b_length);
+ xfs_hex_dump(bp->b_addr, XFS_CORRUPTION_DUMP_LEN);
+ dump_stack();
+ }
+ }
+
+ return true;
+}
+
+/*
* Buffer I/O submission path, read or write. Asynchronous submission transfers
* the buffer lock ownership and the current reference to the IO. It is not
* safe to reference the buffer after a call to this function unless the caller
* holds an additional reference itself.
*/
-int
-__xfs_buf_submit(
- struct xfs_buf *bp,
- bool wait)
+static void
+xfs_buf_submit(
+ struct xfs_buf *bp)
{
- int error = 0;
-
trace_xfs_buf_submit(bp, _RET_IP_);
ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
- /* on shutdown we stale and complete the buffer immediately */
- if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
- xfs_buf_ioerror(bp, -EIO);
- bp->b_flags &= ~XBF_DONE;
- xfs_buf_stale(bp);
- if (bp->b_flags & XBF_ASYNC)
- xfs_buf_ioend(bp);
- return -EIO;
- }
-
/*
- * Grab a reference so the buffer does not go away underneath us. For
- * async buffers, I/O completion drops the callers reference, which
- * could occur before submission returns.
+ * On log shutdown we stale and complete the buffer immediately. We can
+ * be called to read the superblock before the log has been set up, so
+ * be careful checking the log state.
+ *
+ * Checking the mount shutdown state here can result in the log tail
+ * moving inappropriately on disk as the log may not yet be shut down.
+ * i.e. failing this buffer on mount shutdown can remove it from the AIL
+ * and move the tail of the log forwards without having written this
+ * buffer to disk. This corrupts the log tail state in memory, and
+ * because the log may not be shut down yet, it can then be propagated
+ * to disk before the log is shutdown. Hence we check log shutdown
+ * state here rather than mount state to avoid corrupting the log tail
+ * on shutdown.
*/
- xfs_buf_hold(bp);
+ if (bp->b_mount->m_log && xlog_is_shutdown(bp->b_mount->m_log)) {
+ xfs_buf_ioend_fail(bp);
+ return;
+ }
if (bp->b_flags & XBF_WRITE)
xfs_buf_wait_unpin(bp);
- /* clear the internal error state to avoid spurious errors */
- bp->b_io_error = 0;
-
/*
- * Set the count to 1 initially, this will stop an I/O completion
- * callout which happens before we have started all the I/O from calling
- * xfs_buf_ioend too early.
+ * Make sure we capture only current IO errors rather than stale errors
+ * left over from previous use of the buffer (e.g. failed readahead).
*/
- atomic_set(&bp->b_io_remaining, 1);
- if (bp->b_flags & XBF_ASYNC)
- xfs_buf_ioacct_inc(bp);
- _xfs_buf_ioapply(bp);
+ bp->b_error = 0;
- /*
- * If _xfs_buf_ioapply failed, we can get back here with only the IO
- * reference we took above. If we drop it to zero, run completion so
- * that we don't return to the caller with completion still pending.
- */
- if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
- if (bp->b_error || !(bp->b_flags & XBF_ASYNC))
- xfs_buf_ioend(bp);
- else
- xfs_buf_ioend_async(bp);
+ if ((bp->b_flags & XBF_WRITE) && !xfs_buf_verify_write(bp)) {
+ xfs_force_shutdown(bp->b_mount, SHUTDOWN_CORRUPT_INCORE);
+ xfs_buf_ioend(bp);
+ return;
}
- if (wait)
- error = xfs_buf_iowait(bp);
-
- /*
- * Release the hold that keeps the buffer referenced for the entire
- * I/O. Note that if the buffer is async, it is not safe to reference
- * after this release.
- */
- xfs_buf_rele(bp);
- return error;
-}
-
-void *
-xfs_buf_offset(
- struct xfs_buf *bp,
- size_t offset)
-{
- struct page *page;
-
- if (bp->b_addr)
- return bp->b_addr + offset;
+ /* In-memory targets are directly mapped, no I/O required. */
+ if (xfs_buftarg_is_mem(bp->b_target)) {
+ xfs_buf_ioend(bp);
+ return;
+ }
- offset += bp->b_offset;
- page = bp->b_pages[offset >> PAGE_SHIFT];
- return page_address(page) + (offset & (PAGE_SIZE-1));
+ xfs_buf_submit_bio(bp);
}
/*
- * Move data into or out of a buffer.
+ * Log a message about and stale a buffer that a caller has decided is corrupt.
+ *
+ * This function should be called for the kinds of metadata corruption that
+ * cannot be detect from a verifier, such as incorrect inter-block relationship
+ * data. Do /not/ call this function from a verifier function.
+ *
+ * The buffer must be XBF_DONE prior to the call. Afterwards, the buffer will
+ * be marked stale, but b_error will not be set. The caller is responsible for
+ * releasing the buffer or fixing it.
*/
void
-xfs_buf_iomove(
- xfs_buf_t *bp, /* buffer to process */
- size_t boff, /* starting buffer offset */
- size_t bsize, /* length to copy */
- void *data, /* data address */
- xfs_buf_rw_t mode) /* read/write/zero flag */
-{
- size_t bend;
-
- bend = boff + bsize;
- while (boff < bend) {
- struct page *page;
- int page_index, page_offset, csize;
-
- page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
- page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
- page = bp->b_pages[page_index];
- csize = min_t(size_t, PAGE_SIZE - page_offset,
- BBTOB(bp->b_io_length) - boff);
-
- ASSERT((csize + page_offset) <= PAGE_SIZE);
-
- switch (mode) {
- case XBRW_ZERO:
- memset(page_address(page) + page_offset, 0, csize);
- break;
- case XBRW_READ:
- memcpy(data, page_address(page) + page_offset, csize);
- break;
- case XBRW_WRITE:
- memcpy(page_address(page) + page_offset, data, csize);
- }
+__xfs_buf_mark_corrupt(
+ struct xfs_buf *bp,
+ xfs_failaddr_t fa)
+{
+ ASSERT(bp->b_flags & XBF_DONE);
- boff += csize;
- data += csize;
- }
+ xfs_buf_corruption_error(bp, fa);
+ xfs_buf_stale(bp);
}
/*
@@ -1655,23 +1503,23 @@ xfs_buf_iomove(
* while freeing all the buffers only held by the LRU.
*/
static enum lru_status
-xfs_buftarg_wait_rele(
+xfs_buftarg_drain_rele(
struct list_head *item,
struct list_lru_one *lru,
- spinlock_t *lru_lock,
void *arg)
{
struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
struct list_head *dispose = arg;
- if (atomic_read(&bp->b_hold) > 1) {
+ if (!spin_trylock(&bp->b_lock))
+ return LRU_SKIP;
+ if (bp->b_hold > 1) {
/* need to wait, so skip it this pass */
- trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
+ spin_unlock(&bp->b_lock);
+ trace_xfs_buf_drain_buftarg(bp, _RET_IP_);
return LRU_SKIP;
}
- if (!spin_trylock(&bp->b_lock))
- return LRU_SKIP;
/*
* clear the LRU reference count so the buffer doesn't get
@@ -1684,17 +1532,16 @@ xfs_buftarg_wait_rele(
return LRU_REMOVED;
}
+/*
+ * Wait for outstanding I/O on the buftarg to complete.
+ */
void
-xfs_wait_buftarg(
+xfs_buftarg_wait(
struct xfs_buftarg *btp)
{
- LIST_HEAD(dispose);
- int loop = 0;
-
/*
- * First wait on the buftarg I/O count for all in-flight buffers to be
- * released. This is critical as new buffers do not make the LRU until
- * they are released.
+ * First wait for all in-flight readahead buffers to be released. This is
+ * critical as new buffers do not make the LRU until they are released.
*
* Next, flush the buffer workqueue to ensure all completion processing
* has finished. Just waiting on buffer locks is not sufficient for
@@ -1703,13 +1550,24 @@ xfs_wait_buftarg(
* all reference counts have been dropped before we start walking the
* LRU list.
*/
- while (percpu_counter_sum(&btp->bt_io_count))
+ while (percpu_counter_sum(&btp->bt_readahead_count))
delay(100);
flush_workqueue(btp->bt_mount->m_buf_workqueue);
+}
+
+void
+xfs_buftarg_drain(
+ struct xfs_buftarg *btp)
+{
+ LIST_HEAD(dispose);
+ int loop = 0;
+ bool write_fail = false;
+
+ xfs_buftarg_wait(btp);
/* loop until there is nothing left on the lru list. */
while (list_lru_count(&btp->bt_lru)) {
- list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
+ list_lru_walk(&btp->bt_lru, xfs_buftarg_drain_rele,
&dispose, LONG_MAX);
while (!list_empty(&dispose)) {
@@ -1717,24 +1575,35 @@ xfs_wait_buftarg(
bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
list_del_init(&bp->b_lru);
if (bp->b_flags & XBF_WRITE_FAIL) {
- xfs_alert(btp->bt_mount,
+ write_fail = true;
+ xfs_buf_alert_ratelimited(bp,
+ "XFS: Corruption Alert",
"Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!",
- (long long)bp->b_bn);
- xfs_alert(btp->bt_mount,
-"Please run xfs_repair to determine the extent of the problem.");
+ (long long)xfs_buf_daddr(bp));
}
xfs_buf_rele(bp);
}
if (loop++ != 0)
delay(100);
}
+
+ /*
+ * If one or more failed buffers were freed, that means dirty metadata
+ * was thrown away. This should only ever happen after I/O completion
+ * handling has elevated I/O error(s) to permanent failures and shuts
+ * down the journal.
+ */
+ if (write_fail) {
+ ASSERT(xlog_is_shutdown(btp->bt_mount->m_log));
+ xfs_alert(btp->bt_mount,
+ "Please run xfs_repair to determine the extent of the problem.");
+ }
}
static enum lru_status
xfs_buftarg_isolate(
struct list_head *item,
struct list_lru_one *lru,
- spinlock_t *lru_lock,
void *arg)
{
struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
@@ -1767,8 +1636,7 @@ xfs_buftarg_shrink_scan(
struct shrinker *shrink,
struct shrink_control *sc)
{
- struct xfs_buftarg *btp = container_of(shrink,
- struct xfs_buftarg, bt_shrinker);
+ struct xfs_buftarg *btp = shrink->private_data;
LIST_HEAD(dispose);
unsigned long freed;
@@ -1790,100 +1658,189 @@ xfs_buftarg_shrink_count(
struct shrinker *shrink,
struct shrink_control *sc)
{
- struct xfs_buftarg *btp = container_of(shrink,
- struct xfs_buftarg, bt_shrinker);
+ struct xfs_buftarg *btp = shrink->private_data;
return list_lru_shrink_count(&btp->bt_lru, sc);
}
void
-xfs_free_buftarg(
+xfs_destroy_buftarg(
struct xfs_buftarg *btp)
{
- unregister_shrinker(&btp->bt_shrinker);
- ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
- percpu_counter_destroy(&btp->bt_io_count);
+ shrinker_free(btp->bt_shrinker);
+ ASSERT(percpu_counter_sum(&btp->bt_readahead_count) == 0);
+ percpu_counter_destroy(&btp->bt_readahead_count);
list_lru_destroy(&btp->bt_lru);
+}
+
+void
+xfs_free_buftarg(
+ struct xfs_buftarg *btp)
+{
+ xfs_destroy_buftarg(btp);
+ fs_put_dax(btp->bt_daxdev, btp->bt_mount);
+ /* the main block device is closed by kill_block_super */
+ if (btp->bt_bdev != btp->bt_mount->m_super->s_bdev)
+ bdev_fput(btp->bt_file);
+ kfree(btp);
+}
+
+/*
+ * Configure this buffer target for hardware-assisted atomic writes if the
+ * underlying block device supports is congruent with the filesystem geometry.
+ */
+static inline void
+xfs_configure_buftarg_atomic_writes(
+ struct xfs_buftarg *btp)
+{
+ struct xfs_mount *mp = btp->bt_mount;
+ unsigned int min_bytes, max_bytes;
- xfs_blkdev_issue_flush(btp);
+ min_bytes = bdev_atomic_write_unit_min_bytes(btp->bt_bdev);
+ max_bytes = bdev_atomic_write_unit_max_bytes(btp->bt_bdev);
- kmem_free(btp);
+ /*
+ * Ignore atomic write geometry that is nonsense or doesn't even cover
+ * a single fsblock.
+ */
+ if (min_bytes > max_bytes ||
+ min_bytes > mp->m_sb.sb_blocksize ||
+ max_bytes < mp->m_sb.sb_blocksize) {
+ min_bytes = 0;
+ max_bytes = 0;
+ }
+
+ btp->bt_awu_min = min_bytes;
+ btp->bt_awu_max = max_bytes;
}
+/* Configure a buffer target that abstracts a block device. */
int
-xfs_setsize_buftarg(
- xfs_buftarg_t *btp,
- unsigned int sectorsize)
+xfs_configure_buftarg(
+ struct xfs_buftarg *btp,
+ unsigned int sectorsize,
+ xfs_rfsblock_t nr_blocks)
{
- /* Set up metadata sector size info */
+ struct xfs_mount *mp = btp->bt_mount;
+
+ if (btp->bt_bdev) {
+ int error;
+
+ error = bdev_validate_blocksize(btp->bt_bdev, sectorsize);
+ if (error) {
+ xfs_warn(mp,
+ "Cannot use blocksize %u on device %pg, err %d",
+ sectorsize, btp->bt_bdev, error);
+ return -EINVAL;
+ }
+
+ if (bdev_can_atomic_write(btp->bt_bdev))
+ xfs_configure_buftarg_atomic_writes(btp);
+ }
+
btp->bt_meta_sectorsize = sectorsize;
btp->bt_meta_sectormask = sectorsize - 1;
+ /* m_blkbb_log is not set up yet */
+ btp->bt_nr_sectors = nr_blocks << (mp->m_sb.sb_blocklog - BBSHIFT);
+ return 0;
+}
- if (set_blocksize(btp->bt_bdev, sectorsize)) {
- xfs_warn(btp->bt_mount,
- "Cannot set_blocksize to %u on device %pg",
- sectorsize, btp->bt_bdev);
- return -EINVAL;
- }
+int
+xfs_init_buftarg(
+ struct xfs_buftarg *btp,
+ size_t logical_sectorsize,
+ const char *descr)
+{
+ /* The maximum size of the buftarg is only known once the sb is read. */
+ btp->bt_nr_sectors = XFS_BUF_DADDR_MAX;
/* Set up device logical sector size mask */
- btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev);
- btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1;
+ btp->bt_logical_sectorsize = logical_sectorsize;
+ btp->bt_logical_sectormask = logical_sectorsize - 1;
+ /*
+ * Buffer IO error rate limiting. Limit it to no more than 10 messages
+ * per 30 seconds so as to not spam logs too much on repeated errors.
+ */
+ ratelimit_state_init(&btp->bt_ioerror_rl, 30 * HZ,
+ DEFAULT_RATELIMIT_BURST);
+
+ if (list_lru_init(&btp->bt_lru))
+ return -ENOMEM;
+ if (percpu_counter_init(&btp->bt_readahead_count, 0, GFP_KERNEL))
+ goto out_destroy_lru;
+
+ btp->bt_shrinker =
+ shrinker_alloc(SHRINKER_NUMA_AWARE, "xfs-buf:%s", descr);
+ if (!btp->bt_shrinker)
+ goto out_destroy_io_count;
+ btp->bt_shrinker->count_objects = xfs_buftarg_shrink_count;
+ btp->bt_shrinker->scan_objects = xfs_buftarg_shrink_scan;
+ btp->bt_shrinker->private_data = btp;
+ shrinker_register(btp->bt_shrinker);
return 0;
-}
-/*
- * When allocating the initial buffer target we have not yet
- * read in the superblock, so don't know what sized sectors
- * are being used at this early stage. Play safe.
- */
-STATIC int
-xfs_setsize_buftarg_early(
- xfs_buftarg_t *btp,
- struct block_device *bdev)
-{
- return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));
+out_destroy_io_count:
+ percpu_counter_destroy(&btp->bt_readahead_count);
+out_destroy_lru:
+ list_lru_destroy(&btp->bt_lru);
+ return -ENOMEM;
}
-xfs_buftarg_t *
+struct xfs_buftarg *
xfs_alloc_buftarg(
struct xfs_mount *mp,
- struct block_device *bdev,
- struct dax_device *dax_dev)
+ struct file *bdev_file)
{
- xfs_buftarg_t *btp;
+ struct xfs_buftarg *btp;
+ const struct dax_holder_operations *ops = NULL;
+ int error;
+
- btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
+#if defined(CONFIG_FS_DAX) && defined(CONFIG_MEMORY_FAILURE)
+ ops = &xfs_dax_holder_operations;
+#endif
+ btp = kzalloc(sizeof(*btp), GFP_KERNEL | __GFP_NOFAIL);
btp->bt_mount = mp;
- btp->bt_dev = bdev->bd_dev;
- btp->bt_bdev = bdev;
- btp->bt_daxdev = dax_dev;
+ btp->bt_file = bdev_file;
+ btp->bt_bdev = file_bdev(bdev_file);
+ btp->bt_dev = btp->bt_bdev->bd_dev;
+ btp->bt_daxdev = fs_dax_get_by_bdev(btp->bt_bdev, &btp->bt_dax_part_off,
+ mp, ops);
- if (xfs_setsize_buftarg_early(btp, bdev))
+ /*
+ * Flush and invalidate all devices' pagecaches before reading any
+ * metadata because XFS doesn't use the bdev pagecache.
+ */
+ error = sync_blockdev(btp->bt_bdev);
+ if (error)
goto error_free;
- if (list_lru_init(&btp->bt_lru))
- goto error_free;
+ /*
+ * When allocating the buftargs we have not yet read the super block and
+ * thus don't know the file system sector size yet.
+ */
+ btp->bt_meta_sectorsize = bdev_logical_block_size(btp->bt_bdev);
+ btp->bt_meta_sectormask = btp->bt_meta_sectorsize - 1;
- if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
- goto error_lru;
+ error = xfs_init_buftarg(btp, btp->bt_meta_sectorsize,
+ mp->m_super->s_id);
+ if (error)
+ goto error_free;
- btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
- btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
- btp->bt_shrinker.seeks = DEFAULT_SEEKS;
- btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
- if (register_shrinker(&btp->bt_shrinker))
- goto error_pcpu;
return btp;
-error_pcpu:
- percpu_counter_destroy(&btp->bt_io_count);
-error_lru:
- list_lru_destroy(&btp->bt_lru);
error_free:
- kmem_free(btp);
- return NULL;
+ kfree(btp);
+ return ERR_PTR(error);
+}
+
+static inline void
+xfs_buf_list_del(
+ struct xfs_buf *bp)
+{
+ list_del_init(&bp->b_list);
+ wake_up_var(&bp->b_list);
}
/*
@@ -1903,7 +1860,7 @@ xfs_buf_delwri_cancel(
xfs_buf_lock(bp);
bp->b_flags &= ~_XBF_DELWRI_Q;
- list_del_init(&bp->b_list);
+ xfs_buf_list_del(bp);
xfs_buf_relse(bp);
}
}
@@ -1949,7 +1906,7 @@ xfs_buf_delwri_queue(
*/
bp->b_flags |= _XBF_DELWRI_Q;
if (list_empty(&bp->b_list)) {
- atomic_inc(&bp->b_hold);
+ xfs_buf_hold(bp);
list_add_tail(&bp->b_list, list);
}
@@ -1957,15 +1914,43 @@ xfs_buf_delwri_queue(
}
/*
+ * Queue a buffer to this delwri list as part of a data integrity operation.
+ * If the buffer is on any other delwri list, we'll wait for that to clear
+ * so that the caller can submit the buffer for IO and wait for the result.
+ * Callers must ensure the buffer is not already on the list.
+ */
+void
+xfs_buf_delwri_queue_here(
+ struct xfs_buf *bp,
+ struct list_head *buffer_list)
+{
+ /*
+ * We need this buffer to end up on the /caller's/ delwri list, not any
+ * old list. This can happen if the buffer is marked stale (which
+ * clears DELWRI_Q) after the AIL queues the buffer to its list but
+ * before the AIL has a chance to submit the list.
+ */
+ while (!list_empty(&bp->b_list)) {
+ xfs_buf_unlock(bp);
+ wait_var_event(&bp->b_list, list_empty(&bp->b_list));
+ xfs_buf_lock(bp);
+ }
+
+ ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
+
+ xfs_buf_delwri_queue(bp, buffer_list);
+}
+
+/*
* Compare function is more complex than it needs to be because
* the return value is only 32 bits and we are doing comparisons
* on 64 bit values
*/
static int
xfs_buf_cmp(
- void *priv,
- struct list_head *a,
- struct list_head *b)
+ void *priv,
+ const struct list_head *a,
+ const struct list_head *b)
{
struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
@@ -1979,71 +1964,26 @@ xfs_buf_cmp(
return 0;
}
-/*
- * Submit buffers for write. If wait_list is specified, the buffers are
- * submitted using sync I/O and placed on the wait list such that the caller can
- * iowait each buffer. Otherwise async I/O is used and the buffers are released
- * at I/O completion time. In either case, buffers remain locked until I/O
- * completes and the buffer is released from the queue.
- */
-static int
-xfs_buf_delwri_submit_buffers(
- struct list_head *buffer_list,
- struct list_head *wait_list)
+static bool
+xfs_buf_delwri_submit_prep(
+ struct xfs_buf *bp)
{
- struct xfs_buf *bp, *n;
- int pinned = 0;
- struct blk_plug plug;
-
- list_sort(NULL, buffer_list, xfs_buf_cmp);
-
- blk_start_plug(&plug);
- list_for_each_entry_safe(bp, n, buffer_list, b_list) {
- if (!wait_list) {
- if (xfs_buf_ispinned(bp)) {
- pinned++;
- continue;
- }
- if (!xfs_buf_trylock(bp))
- continue;
- } else {
- xfs_buf_lock(bp);
- }
-
- /*
- * Someone else might have written the buffer synchronously or
- * marked it stale in the meantime. In that case only the
- * _XBF_DELWRI_Q flag got cleared, and we have to drop the
- * reference and remove it from the list here.
- */
- if (!(bp->b_flags & _XBF_DELWRI_Q)) {
- list_del_init(&bp->b_list);
- xfs_buf_relse(bp);
- continue;
- }
-
- trace_xfs_buf_delwri_split(bp, _RET_IP_);
-
- /*
- * If we have a wait list, each buffer (and associated delwri
- * queue reference) transfers to it and is submitted
- * synchronously. Otherwise, drop the buffer from the delwri
- * queue and submit async.
- */
- bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL);
- bp->b_flags |= XBF_WRITE;
- if (wait_list) {
- bp->b_flags &= ~XBF_ASYNC;
- list_move_tail(&bp->b_list, wait_list);
- } else {
- bp->b_flags |= XBF_ASYNC;
- list_del_init(&bp->b_list);
- }
- __xfs_buf_submit(bp, false);
+ /*
+ * Someone else might have written the buffer synchronously or marked it
+ * stale in the meantime. In that case only the _XBF_DELWRI_Q flag got
+ * cleared, and we have to drop the reference and remove it from the
+ * list here.
+ */
+ if (!(bp->b_flags & _XBF_DELWRI_Q)) {
+ xfs_buf_list_del(bp);
+ xfs_buf_relse(bp);
+ return false;
}
- blk_finish_plug(&plug);
- return pinned;
+ trace_xfs_buf_delwri_split(bp, _RET_IP_);
+ bp->b_flags &= ~_XBF_DELWRI_Q;
+ bp->b_flags |= XBF_WRITE;
+ return true;
}
/*
@@ -2066,7 +2006,30 @@ int
xfs_buf_delwri_submit_nowait(
struct list_head *buffer_list)
{
- return xfs_buf_delwri_submit_buffers(buffer_list, NULL);
+ struct xfs_buf *bp, *n;
+ int pinned = 0;
+ struct blk_plug plug;
+
+ list_sort(NULL, buffer_list, xfs_buf_cmp);
+
+ blk_start_plug(&plug);
+ list_for_each_entry_safe(bp, n, buffer_list, b_list) {
+ if (!xfs_buf_trylock(bp))
+ continue;
+ if (xfs_buf_ispinned(bp)) {
+ xfs_buf_unlock(bp);
+ pinned++;
+ continue;
+ }
+ if (!xfs_buf_delwri_submit_prep(bp))
+ continue;
+ bp->b_flags |= XBF_ASYNC;
+ xfs_buf_list_del(bp);
+ xfs_buf_submit(bp);
+ }
+ blk_finish_plug(&plug);
+
+ return pinned;
}
/*
@@ -2083,15 +2046,27 @@ xfs_buf_delwri_submit(
{
LIST_HEAD (wait_list);
int error = 0, error2;
- struct xfs_buf *bp;
+ struct xfs_buf *bp, *n;
+ struct blk_plug plug;
- xfs_buf_delwri_submit_buffers(buffer_list, &wait_list);
+ list_sort(NULL, buffer_list, xfs_buf_cmp);
+
+ blk_start_plug(&plug);
+ list_for_each_entry_safe(bp, n, buffer_list, b_list) {
+ xfs_buf_lock(bp);
+ if (!xfs_buf_delwri_submit_prep(bp))
+ continue;
+ bp->b_flags &= ~XBF_ASYNC;
+ list_move_tail(&bp->b_list, &wait_list);
+ xfs_buf_submit(bp);
+ }
+ blk_finish_plug(&plug);
/* Wait for IO to complete. */
while (!list_empty(&wait_list)) {
bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
- list_del_init(&bp->b_list);
+ xfs_buf_list_del(bp);
/*
* Wait on the locked buffer, check for errors and unlock and
@@ -2106,91 +2081,52 @@ xfs_buf_delwri_submit(
return error;
}
-/*
- * Push a single buffer on a delwri queue.
- *
- * The purpose of this function is to submit a single buffer of a delwri queue
- * and return with the buffer still on the original queue. The waiting delwri
- * buffer submission infrastructure guarantees transfer of the delwri queue
- * buffer reference to a temporary wait list. We reuse this infrastructure to
- * transfer the buffer back to the original queue.
- *
- * Note the buffer transitions from the queued state, to the submitted and wait
- * listed state and back to the queued state during this call. The buffer
- * locking and queue management logic between _delwri_pushbuf() and
- * _delwri_queue() guarantee that the buffer cannot be queued to another list
- * before returning.
- */
-int
-xfs_buf_delwri_pushbuf(
- struct xfs_buf *bp,
- struct list_head *buffer_list)
+void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
{
- LIST_HEAD (submit_list);
- int error;
-
- ASSERT(bp->b_flags & _XBF_DELWRI_Q);
-
- trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_);
-
/*
- * Isolate the buffer to a new local list so we can submit it for I/O
- * independently from the rest of the original list.
- */
- xfs_buf_lock(bp);
- list_move(&bp->b_list, &submit_list);
- xfs_buf_unlock(bp);
-
- /*
- * Delwri submission clears the DELWRI_Q buffer flag and returns with
- * the buffer on the wait list with the original reference. Rather than
- * bounce the buffer from a local wait list back to the original list
- * after I/O completion, reuse the original list as the wait list.
- */
- xfs_buf_delwri_submit_buffers(&submit_list, buffer_list);
-
- /*
- * The buffer is now locked, under I/O and wait listed on the original
- * delwri queue. Wait for I/O completion, restore the DELWRI_Q flag and
- * return with the buffer unlocked and on the original queue.
+ * Set the lru reference count to 0 based on the error injection tag.
+ * This allows userspace to disrupt buffer caching for debug/testing
+ * purposes.
*/
- error = xfs_buf_iowait(bp);
- bp->b_flags |= _XBF_DELWRI_Q;
- xfs_buf_unlock(bp);
+ if (XFS_TEST_ERROR(bp->b_mount, XFS_ERRTAG_BUF_LRU_REF))
+ lru_ref = 0;
- return error;
+ atomic_set(&bp->b_lru_ref, lru_ref);
}
-int __init
-xfs_buf_init(void)
+/*
+ * Verify an on-disk magic value against the magic value specified in the
+ * verifier structure. The verifier magic is in disk byte order so the caller is
+ * expected to pass the value directly from disk.
+ */
+bool
+xfs_verify_magic(
+ struct xfs_buf *bp,
+ __be32 dmagic)
{
- xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
- KM_ZONE_HWALIGN, NULL);
- if (!xfs_buf_zone)
- goto out;
+ struct xfs_mount *mp = bp->b_mount;
+ int idx;
- return 0;
-
- out:
- return -ENOMEM;
-}
-
-void
-xfs_buf_terminate(void)
-{
- kmem_zone_destroy(xfs_buf_zone);
+ idx = xfs_has_crc(mp);
+ if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx]))
+ return false;
+ return dmagic == bp->b_ops->magic[idx];
}
-
-void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
+/*
+ * Verify an on-disk magic value against the magic value specified in the
+ * verifier structure. The verifier magic is in disk byte order so the caller is
+ * expected to pass the value directly from disk.
+ */
+bool
+xfs_verify_magic16(
+ struct xfs_buf *bp,
+ __be16 dmagic)
{
- /*
- * Set the lru reference count to 0 based on the error injection tag.
- * This allows userspace to disrupt buffer caching for debug/testing
- * purposes.
- */
- if (XFS_TEST_ERROR(false, bp->b_target->bt_mount,
- XFS_ERRTAG_BUF_LRU_REF))
- lru_ref = 0;
+ struct xfs_mount *mp = bp->b_mount;
+ int idx;
- atomic_set(&bp->b_lru_ref, lru_ref);
+ idx = xfs_has_crc(mp);
+ if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx]))
+ return false;
+ return dmagic == bp->b_ops->magic16[idx];
}