summaryrefslogtreecommitdiff
path: root/fs/xfs/scrub/attr.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/scrub/attr.c')
-rw-r--r--fs/xfs/scrub/attr.c491
1 files changed, 358 insertions, 133 deletions
diff --git a/fs/xfs/scrub/attr.c b/fs/xfs/scrub/attr.c
index 81d5e90547a1..708334f9b2bd 100644
--- a/fs/xfs/scrub/attr.c
+++ b/fs/xfs/scrub/attr.c
@@ -1,7 +1,7 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Copyright (C) 2017 Oracle. All Rights Reserved.
- * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ * Copyright (C) 2017-2023 Oracle. All Rights Reserved.
+ * Author: Darrick J. Wong <djwong@kernel.org>
*/
#include "xfs.h"
#include "xfs_fs.h"
@@ -9,113 +9,251 @@
#include "xfs_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
-#include "xfs_defer.h"
-#include "xfs_btree.h"
-#include "xfs_bit.h"
#include "xfs_log_format.h"
#include "xfs_trans.h"
-#include "xfs_sb.h"
#include "xfs_inode.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
-#include "xfs_dir2.h"
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
-#include "scrub/xfs_scrub.h"
+#include "xfs_attr_sf.h"
+#include "xfs_parent.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/dabtree.h"
-#include "scrub/trace.h"
+#include "scrub/attr.h"
+#include "scrub/listxattr.h"
+#include "scrub/repair.h"
-#include <linux/posix_acl_xattr.h>
-#include <linux/xattr.h>
+/* Free the buffers linked from the xattr buffer. */
+static void
+xchk_xattr_buf_cleanup(
+ void *priv)
+{
+ struct xchk_xattr_buf *ab = priv;
+
+ kvfree(ab->freemap);
+ ab->freemap = NULL;
+ kvfree(ab->usedmap);
+ ab->usedmap = NULL;
+ kvfree(ab->value);
+ ab->value = NULL;
+ ab->value_sz = 0;
+ kvfree(ab->name);
+ ab->name = NULL;
+}
+
+/*
+ * Allocate the free space bitmap if we're trying harder; there are leaf blocks
+ * in the attr fork; or we can't tell if there are leaf blocks.
+ */
+static inline bool
+xchk_xattr_want_freemap(
+ struct xfs_scrub *sc)
+{
+ struct xfs_ifork *ifp;
+
+ if (sc->flags & XCHK_TRY_HARDER)
+ return true;
+
+ if (!sc->ip)
+ return true;
+
+ ifp = xfs_ifork_ptr(sc->ip, XFS_ATTR_FORK);
+ if (!ifp)
+ return false;
+
+ return xfs_ifork_has_extents(ifp);
+}
+
+/*
+ * Allocate enough memory to hold an attr value and attr block bitmaps,
+ * reallocating the buffer if necessary. Buffer contents are not preserved
+ * across a reallocation.
+ */
+int
+xchk_setup_xattr_buf(
+ struct xfs_scrub *sc,
+ size_t value_size)
+{
+ size_t bmp_sz;
+ struct xchk_xattr_buf *ab = sc->buf;
+ void *new_val;
+
+ bmp_sz = sizeof(long) * BITS_TO_LONGS(sc->mp->m_attr_geo->blksize);
+
+ if (ab)
+ goto resize_value;
+
+ ab = kvzalloc(sizeof(struct xchk_xattr_buf), XCHK_GFP_FLAGS);
+ if (!ab)
+ return -ENOMEM;
+ sc->buf = ab;
+ sc->buf_cleanup = xchk_xattr_buf_cleanup;
+
+ ab->usedmap = kvmalloc(bmp_sz, XCHK_GFP_FLAGS);
+ if (!ab->usedmap)
+ return -ENOMEM;
+
+ if (xchk_xattr_want_freemap(sc)) {
+ ab->freemap = kvmalloc(bmp_sz, XCHK_GFP_FLAGS);
+ if (!ab->freemap)
+ return -ENOMEM;
+ }
+
+ if (xchk_could_repair(sc)) {
+ ab->name = kvmalloc(XATTR_NAME_MAX + 1, XCHK_GFP_FLAGS);
+ if (!ab->name)
+ return -ENOMEM;
+ }
+
+resize_value:
+ if (ab->value_sz >= value_size)
+ return 0;
+
+ if (ab->value) {
+ kvfree(ab->value);
+ ab->value = NULL;
+ ab->value_sz = 0;
+ }
+
+ new_val = kvmalloc(value_size, XCHK_GFP_FLAGS);
+ if (!new_val)
+ return -ENOMEM;
+
+ ab->value = new_val;
+ ab->value_sz = value_size;
+ return 0;
+}
/* Set us up to scrub an inode's extended attributes. */
int
xchk_setup_xattr(
- struct xfs_scrub *sc,
- struct xfs_inode *ip)
+ struct xfs_scrub *sc)
{
- size_t sz;
+ int error;
+
+ if (xchk_could_repair(sc)) {
+ error = xrep_setup_xattr(sc);
+ if (error)
+ return error;
+ }
/*
- * Allocate the buffer without the inode lock held. We need enough
- * space to read every xattr value in the file or enough space to
- * hold three copies of the xattr free space bitmap. (Not both at
- * the same time.)
+ * We failed to get memory while checking attrs, so this time try to
+ * get all the memory we're ever going to need. Allocate the buffer
+ * without the inode lock held, which means we can sleep.
*/
- sz = max_t(size_t, XATTR_SIZE_MAX, 3 * sizeof(long) *
- BITS_TO_LONGS(sc->mp->m_attr_geo->blksize));
- sc->buf = kmem_zalloc_large(sz, KM_SLEEP);
- if (!sc->buf)
- return -ENOMEM;
+ if (sc->flags & XCHK_TRY_HARDER) {
+ error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX);
+ if (error)
+ return error;
+ }
- return xchk_setup_inode_contents(sc, ip, 0);
+ return xchk_setup_inode_contents(sc, 0);
}
/* Extended Attributes */
-struct xchk_xattr {
- struct xfs_attr_list_context context;
- struct xfs_scrub *sc;
-};
-
/*
* Check that an extended attribute key can be looked up by hash.
*
- * We use the XFS attribute list iterator (i.e. xfs_attr_list_int_ilocked)
- * to call this function for every attribute key in an inode. Once
- * we're here, we load the attribute value to see if any errors happen,
- * or if we get more or less data than we expected.
+ * We use the extended attribute walk helper to call this function for every
+ * attribute key in an inode. Once we're here, we load the attribute value to
+ * see if any errors happen, or if we get more or less data than we expected.
*/
-static void
-xchk_xattr_listent(
- struct xfs_attr_list_context *context,
- int flags,
- unsigned char *name,
- int namelen,
- int valuelen)
+static int
+xchk_xattr_actor(
+ struct xfs_scrub *sc,
+ struct xfs_inode *ip,
+ unsigned int attr_flags,
+ const unsigned char *name,
+ unsigned int namelen,
+ const void *value,
+ unsigned int valuelen,
+ void *priv)
{
- struct xchk_xattr *sx;
- struct xfs_da_args args = { NULL };
+ struct xfs_da_args args = {
+ .attr_filter = attr_flags & XFS_ATTR_NSP_ONDISK_MASK,
+ .geo = sc->mp->m_attr_geo,
+ .whichfork = XFS_ATTR_FORK,
+ .dp = ip,
+ .name = name,
+ .namelen = namelen,
+ .trans = sc->tp,
+ .valuelen = valuelen,
+ .owner = ip->i_ino,
+ };
+ struct xchk_xattr_buf *ab;
int error = 0;
- sx = container_of(context, struct xchk_xattr, context);
+ ab = sc->buf;
- if (flags & XFS_ATTR_INCOMPLETE) {
+ if (xchk_should_terminate(sc, &error))
+ return error;
+
+ if (attr_flags & ~XFS_ATTR_ONDISK_MASK) {
+ xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, args.blkno);
+ return -ECANCELED;
+ }
+
+ if (attr_flags & XFS_ATTR_INCOMPLETE) {
/* Incomplete attr key, just mark the inode for preening. */
- xchk_ino_set_preen(sx->sc, context->dp->i_ino);
- return;
+ xchk_ino_set_preen(sc, ip->i_ino);
+ return 0;
+ }
+
+ /* Does this name make sense? */
+ if (!xfs_attr_namecheck(attr_flags, name, namelen)) {
+ xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, args.blkno);
+ return -ECANCELED;
+ }
+
+ /* Check parent pointer record. */
+ if ((attr_flags & XFS_ATTR_PARENT) &&
+ !xfs_parent_valuecheck(sc->mp, value, valuelen)) {
+ xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, args.blkno);
+ return -ECANCELED;
}
- args.flags = ATTR_KERNOTIME;
- if (flags & XFS_ATTR_ROOT)
- args.flags |= ATTR_ROOT;
- else if (flags & XFS_ATTR_SECURE)
- args.flags |= ATTR_SECURE;
- args.geo = context->dp->i_mount->m_attr_geo;
- args.whichfork = XFS_ATTR_FORK;
- args.dp = context->dp;
- args.name = name;
- args.namelen = namelen;
- args.hashval = xfs_da_hashname(args.name, args.namelen);
- args.trans = context->tp;
- args.value = sx->sc->buf;
- args.valuelen = XATTR_SIZE_MAX;
-
- error = xfs_attr_get_ilocked(context->dp, &args);
- if (error == -EEXIST)
- error = 0;
- if (!xchk_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno,
+ /*
+ * Try to allocate enough memory to extract the attr value. If that
+ * doesn't work, return -EDEADLOCK as a signal to try again with a
+ * maximally sized buffer.
+ */
+ error = xchk_setup_xattr_buf(sc, valuelen);
+ if (error == -ENOMEM)
+ error = -EDEADLOCK;
+ if (error)
+ return error;
+
+ /*
+ * Parent pointers are matched on attr name and value, so we must
+ * supply the xfs_parent_rec here when confirming that the dabtree
+ * indexing works correctly.
+ */
+ if (attr_flags & XFS_ATTR_PARENT)
+ memcpy(ab->value, value, valuelen);
+
+ args.value = ab->value;
+
+ /*
+ * Get the attr value to ensure that lookup can find this attribute
+ * through the dabtree indexing and that remote value retrieval also
+ * works correctly.
+ */
+ xfs_attr_sethash(&args);
+ error = xfs_attr_get_ilocked(&args);
+ /* ENODATA means the hash lookup failed and the attr is bad */
+ if (error == -ENODATA)
+ error = -EFSCORRUPTED;
+ if (!xchk_fblock_process_error(sc, XFS_ATTR_FORK, args.blkno,
&error))
- goto fail_xref;
+ return error;
if (args.valuelen != valuelen)
- xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK,
- args.blkno);
-fail_xref:
- if (sx->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
- context->seen_enough = 1;
- return;
+ xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, args.blkno);
+
+ return 0;
}
/*
@@ -125,7 +263,7 @@ fail_xref:
* Within a char, the lowest bit of the char represents the byte with
* the smallest address
*/
-STATIC bool
+bool
xchk_xattr_set_map(
struct xfs_scrub *sc,
unsigned long *map,
@@ -156,27 +294,23 @@ xchk_xattr_set_map(
STATIC bool
xchk_xattr_check_freemap(
struct xfs_scrub *sc,
- unsigned long *map,
struct xfs_attr3_icleaf_hdr *leafhdr)
{
- unsigned long *freemap;
- unsigned long *dstmap;
+ struct xchk_xattr_buf *ab = sc->buf;
unsigned int mapsize = sc->mp->m_attr_geo->blksize;
int i;
/* Construct bitmap of freemap contents. */
- freemap = (unsigned long *)sc->buf + BITS_TO_LONGS(mapsize);
- bitmap_zero(freemap, mapsize);
+ bitmap_zero(ab->freemap, mapsize);
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
- if (!xchk_xattr_set_map(sc, freemap,
+ if (!xchk_xattr_set_map(sc, ab->freemap,
leafhdr->freemap[i].base,
leafhdr->freemap[i].size))
return false;
}
/* Look for bits that are set in freemap and are marked in use. */
- dstmap = freemap + BITS_TO_LONGS(mapsize);
- return bitmap_and(dstmap, freemap, map, mapsize) == 0;
+ return !bitmap_intersects(ab->freemap, ab->usedmap, mapsize);
}
/*
@@ -190,13 +324,13 @@ xchk_xattr_entry(
char *buf_end,
struct xfs_attr_leafblock *leaf,
struct xfs_attr3_icleaf_hdr *leafhdr,
- unsigned long *usedmap,
struct xfs_attr_leaf_entry *ent,
int idx,
unsigned int *usedbytes,
__u32 *last_hashval)
{
struct xfs_mount *mp = ds->state->mp;
+ struct xchk_xattr_buf *ab = ds->sc->buf;
char *name_end;
struct xfs_attr_leaf_name_local *lentry;
struct xfs_attr_leaf_name_remote *rentry;
@@ -236,7 +370,7 @@ xchk_xattr_entry(
if (name_end > buf_end)
xchk_da_set_corrupt(ds, level);
- if (!xchk_xattr_set_map(ds->sc, usedmap, nameidx, namesize))
+ if (!xchk_xattr_set_map(ds->sc, ab->usedmap, nameidx, namesize))
xchk_da_set_corrupt(ds, level);
if (!(ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
*usedbytes += namesize;
@@ -256,7 +390,7 @@ xchk_xattr_block(
struct xfs_attr_leafblock *leaf = bp->b_addr;
struct xfs_attr_leaf_entry *ent;
struct xfs_attr_leaf_entry *entries;
- unsigned long *usedmap = ds->sc->buf;
+ struct xchk_xattr_buf *ab = ds->sc->buf;
char *buf_end;
size_t off;
__u32 last_hashval = 0;
@@ -266,15 +400,16 @@ xchk_xattr_block(
if (*last_checked == blk->blkno)
return 0;
+
*last_checked = blk->blkno;
- bitmap_zero(usedmap, mp->m_attr_geo->blksize);
+ bitmap_zero(ab->usedmap, mp->m_attr_geo->blksize);
/* Check all the padding. */
- if (xfs_sb_version_hascrc(&ds->sc->mp->m_sb)) {
- struct xfs_attr3_leafblock *leaf = bp->b_addr;
+ if (xfs_has_crc(ds->sc->mp)) {
+ struct xfs_attr3_leafblock *leaf3 = bp->b_addr;
- if (leaf->hdr.pad1 != 0 || leaf->hdr.pad2 != 0 ||
- leaf->hdr.info.hdr.pad != 0)
+ if (leaf3->hdr.pad1 != 0 || leaf3->hdr.pad2 != 0 ||
+ leaf3->hdr.info.hdr.pad != 0)
xchk_da_set_corrupt(ds, level);
} else {
if (leaf->hdr.pad1 != 0 || leaf->hdr.info.pad != 0)
@@ -285,14 +420,27 @@ xchk_xattr_block(
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf);
hdrsize = xfs_attr3_leaf_hdr_size(leaf);
+ /*
+ * Empty xattr leaf blocks mapped at block 0 are probably a byproduct
+ * of a race between setxattr and a log shutdown. Anywhere else in the
+ * attr fork is a corruption.
+ */
+ if (leafhdr.count == 0) {
+ if (blk->blkno == 0)
+ xchk_da_set_preen(ds, level);
+ else
+ xchk_da_set_corrupt(ds, level);
+ }
if (leafhdr.usedbytes > mp->m_attr_geo->blksize)
xchk_da_set_corrupt(ds, level);
if (leafhdr.firstused > mp->m_attr_geo->blksize)
xchk_da_set_corrupt(ds, level);
if (leafhdr.firstused < hdrsize)
xchk_da_set_corrupt(ds, level);
- if (!xchk_xattr_set_map(ds->sc, usedmap, 0, hdrsize))
+ if (!xchk_xattr_set_map(ds->sc, ab->usedmap, 0, hdrsize))
xchk_da_set_corrupt(ds, level);
+ if (leafhdr.holes)
+ xchk_da_set_preen(ds, level);
if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out;
@@ -305,7 +453,7 @@ xchk_xattr_block(
for (i = 0, ent = entries; i < leafhdr.count; ent++, i++) {
/* Mark the leaf entry itself. */
off = (char *)ent - (char *)leaf;
- if (!xchk_xattr_set_map(ds->sc, usedmap, off,
+ if (!xchk_xattr_set_map(ds->sc, ab->usedmap, off,
sizeof(xfs_attr_leaf_entry_t))) {
xchk_da_set_corrupt(ds, level);
goto out;
@@ -313,13 +461,13 @@ xchk_xattr_block(
/* Check the entry and nameval. */
xchk_xattr_entry(ds, level, buf_end, leaf, &leafhdr,
- usedmap, ent, i, &usedbytes, &last_hashval);
+ ent, i, &usedbytes, &last_hashval);
if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
goto out;
}
- if (!xchk_xattr_check_freemap(ds->sc, usedmap, &leafhdr))
+ if (!xchk_xattr_check_freemap(ds->sc, &leafhdr))
xchk_da_set_corrupt(ds, level);
if (leafhdr.usedbytes != usedbytes)
@@ -333,23 +481,23 @@ out:
STATIC int
xchk_xattr_rec(
struct xchk_da_btree *ds,
- int level,
- void *rec)
+ int level)
{
struct xfs_mount *mp = ds->state->mp;
- struct xfs_attr_leaf_entry *ent = rec;
- struct xfs_da_state_blk *blk;
+ struct xfs_da_state_blk *blk = &ds->state->path.blk[level];
struct xfs_attr_leaf_name_local *lentry;
struct xfs_attr_leaf_name_remote *rentry;
struct xfs_buf *bp;
+ struct xfs_attr_leaf_entry *ent;
xfs_dahash_t calc_hash;
xfs_dahash_t hash;
int nameidx;
int hdrsize;
- unsigned int badflags;
int error;
- blk = &ds->state->path.blk[level];
+ ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
+
+ ent = xfs_attr3_leaf_entryp(blk->bp->b_addr) + blk->index;
/* Check the whole block, if necessary. */
error = xchk_xattr_block(ds, level);
@@ -374,10 +522,15 @@ xchk_xattr_rec(
/* Retrieve the entry and check it. */
hash = be32_to_cpu(ent->hashval);
- badflags = ~(XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_SECURE |
- XFS_ATTR_INCOMPLETE);
- if ((ent->flags & badflags) != 0)
+ if (ent->flags & ~XFS_ATTR_ONDISK_MASK) {
xchk_da_set_corrupt(ds, level);
+ return 0;
+ }
+ if (!xfs_attr_check_namespace(ent->flags)) {
+ xchk_da_set_corrupt(ds, level);
+ return 0;
+ }
+
if (ent->flags & XFS_ATTR_LOCAL) {
lentry = (struct xfs_attr_leaf_name_local *)
(((char *)bp->b_addr) + nameidx);
@@ -385,7 +538,10 @@ xchk_xattr_rec(
xchk_da_set_corrupt(ds, level);
goto out;
}
- calc_hash = xfs_da_hashname(lentry->nameval, lentry->namelen);
+ calc_hash = xfs_attr_hashval(mp, ent->flags, lentry->nameval,
+ lentry->namelen,
+ lentry->nameval + lentry->namelen,
+ be16_to_cpu(lentry->valuelen));
} else {
rentry = (struct xfs_attr_leaf_name_remote *)
(((char *)bp->b_addr) + nameidx);
@@ -393,7 +549,13 @@ xchk_xattr_rec(
xchk_da_set_corrupt(ds, level);
goto out;
}
- calc_hash = xfs_da_hashname(rentry->name, rentry->namelen);
+ if (ent->flags & XFS_ATTR_PARENT) {
+ xchk_da_set_corrupt(ds, level);
+ goto out;
+ }
+ calc_hash = xfs_attr_hashval(mp, ent->flags, rentry->name,
+ rentry->namelen, NULL,
+ be32_to_cpu(rentry->valuelen));
}
if (calc_hash != hash)
xchk_da_set_corrupt(ds, level);
@@ -402,46 +564,109 @@ out:
return error;
}
+/* Check space usage of shortform attrs. */
+STATIC int
+xchk_xattr_check_sf(
+ struct xfs_scrub *sc)
+{
+ struct xchk_xattr_buf *ab = sc->buf;
+ struct xfs_ifork *ifp = &sc->ip->i_af;
+ struct xfs_attr_sf_hdr *sf = ifp->if_data;
+ struct xfs_attr_sf_entry *sfe = xfs_attr_sf_firstentry(sf);
+ struct xfs_attr_sf_entry *next;
+ unsigned char *end = ifp->if_data + ifp->if_bytes;
+ int i;
+ int error = 0;
+
+ bitmap_zero(ab->usedmap, ifp->if_bytes);
+ xchk_xattr_set_map(sc, ab->usedmap, 0, sizeof(*sf));
+
+ if ((unsigned char *)sfe > end) {
+ xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, 0);
+ return 0;
+ }
+
+ for (i = 0; i < sf->count; i++) {
+ unsigned char *name = sfe->nameval;
+ unsigned char *value = &sfe->nameval[sfe->namelen];
+
+ if (xchk_should_terminate(sc, &error))
+ return error;
+
+ next = xfs_attr_sf_nextentry(sfe);
+ if ((unsigned char *)next > end) {
+ xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, 0);
+ break;
+ }
+
+ /*
+ * Shortform entries do not set LOCAL or INCOMPLETE, so the
+ * only valid flag bits here are for namespaces.
+ */
+ if (sfe->flags & ~XFS_ATTR_NSP_ONDISK_MASK) {
+ xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, 0);
+ break;
+ }
+
+ if (!xchk_xattr_set_map(sc, ab->usedmap,
+ (char *)sfe - (char *)sf,
+ sizeof(struct xfs_attr_sf_entry))) {
+ xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, 0);
+ break;
+ }
+
+ if (!xchk_xattr_set_map(sc, ab->usedmap,
+ (char *)name - (char *)sf,
+ sfe->namelen)) {
+ xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, 0);
+ break;
+ }
+
+ if (!xchk_xattr_set_map(sc, ab->usedmap,
+ (char *)value - (char *)sf,
+ sfe->valuelen)) {
+ xchk_fblock_set_corrupt(sc, XFS_ATTR_FORK, 0);
+ break;
+ }
+
+ sfe = next;
+ }
+
+ return 0;
+}
+
/* Scrub the extended attribute metadata. */
int
xchk_xattr(
struct xfs_scrub *sc)
{
- struct xchk_xattr sx;
- struct attrlist_cursor_kern cursor = { 0 };
xfs_dablk_t last_checked = -1U;
int error = 0;
if (!xfs_inode_hasattr(sc->ip))
return -ENOENT;
- memset(&sx, 0, sizeof(sx));
- /* Check attribute tree structure */
- error = xchk_da_btree(sc, XFS_ATTR_FORK, xchk_xattr_rec,
- &last_checked);
+ /* Allocate memory for xattr checking. */
+ error = xchk_setup_xattr_buf(sc, 0);
+ if (error == -ENOMEM)
+ return -EDEADLOCK;
if (error)
- goto out;
+ return error;
+
+ /* Check the physical structure of the xattr. */
+ if (sc->ip->i_af.if_format == XFS_DINODE_FMT_LOCAL)
+ error = xchk_xattr_check_sf(sc);
+ else
+ error = xchk_da_btree(sc, XFS_ATTR_FORK, xchk_xattr_rec,
+ &last_checked);
+ if (error)
+ return error;
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
- goto out;
-
- /* Check that every attr key can also be looked up by hash. */
- sx.context.dp = sc->ip;
- sx.context.cursor = &cursor;
- sx.context.resynch = 1;
- sx.context.put_listent = xchk_xattr_listent;
- sx.context.tp = sc->tp;
- sx.context.flags = ATTR_INCOMPLETE;
- sx.sc = sc;
+ return 0;
/*
- * Look up every xattr in this file by name.
- *
- * Use the backend implementation of xfs_attr_list to call
- * xchk_xattr_listent on every attribute key in this inode.
- * In other words, we use the same iterator/callback mechanism
- * that listattr uses to scrub extended attributes, though in our
- * _listent function, we check the value of the attribute.
+ * Look up every xattr in this file by name and hash.
*
* The VFS only locks i_rwsem when modifying attrs, so keep all
* three locks held because that's the only way to ensure we're
@@ -450,9 +675,9 @@ xchk_xattr(
* iteration, which doesn't really follow the usual buffer
* locking order.
*/
- error = xfs_attr_list_int_ilocked(&sx.context);
+ error = xchk_xattr_walk(sc, sc->ip, xchk_xattr_actor, NULL, NULL);
if (!xchk_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error))
- goto out;
-out:
- return error;
+ return error;
+
+ return 0;
}