diff options
Diffstat (limited to 'fs/xfs/xfs_super.c')
-rw-r--r-- | fs/xfs/xfs_super.c | 389 |
1 files changed, 256 insertions, 133 deletions
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 98401de832ee..0055066fb1d9 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -43,7 +43,11 @@ #include "xfs_iunlink_item.h" #include "xfs_dahash_test.h" #include "xfs_rtbitmap.h" +#include "xfs_exchmaps_item.h" +#include "xfs_parent.h" +#include "xfs_rtalloc.h" #include "scrub/stats.h" +#include "scrub/rcbag_btree.h" #include <linux/magic.h> #include <linux/fs_context.h> @@ -63,6 +67,9 @@ enum xfs_dax_mode { XFS_DAX_NEVER = 2, }; +/* Were quota mount options provided? Must use the upper 16 bits of qflags. */ +#define XFS_QFLAGS_MNTOPTS (1U << 31) + static void xfs_mount_set_dax_mode( struct xfs_mount *mp, @@ -235,7 +242,7 @@ xfs_set_inode_alloc_perag( xfs_ino_t ino, xfs_agnumber_t max_metadata) { - if (!xfs_is_inode32(pag->pag_mount)) { + if (!xfs_is_inode32(pag_mount(pag))) { set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); return false; @@ -248,7 +255,7 @@ xfs_set_inode_alloc_perag( } set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); - if (pag->pag_agno < max_metadata) + if (pag_agno(pag) < max_metadata) set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); else clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); @@ -308,9 +315,9 @@ xfs_set_inode_alloc( * the allocator to accommodate the request. */ if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32) - set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate); + xfs_set_inode32(mp); else - clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate); + xfs_clear_inode32(mp); for (index = 0; index < agcount; index++) { struct xfs_perag *pag; @@ -361,16 +368,16 @@ STATIC int xfs_blkdev_get( xfs_mount_t *mp, const char *name, - struct bdev_handle **handlep) + struct file **bdev_filep) { int error = 0; - *handlep = bdev_open_by_path(name, + *bdev_filep = bdev_file_open_by_path(name, BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_RESTRICT_WRITES, mp->m_super, &fs_holder_ops); - if (IS_ERR(*handlep)) { - error = PTR_ERR(*handlep); - *handlep = NULL; + if (IS_ERR(*bdev_filep)) { + error = PTR_ERR(*bdev_filep); + *bdev_filep = NULL; xfs_warn(mp, "Invalid device [%s], error=%d", name, error); } @@ -435,26 +442,26 @@ xfs_open_devices( { struct super_block *sb = mp->m_super; struct block_device *ddev = sb->s_bdev; - struct bdev_handle *logdev_handle = NULL, *rtdev_handle = NULL; + struct file *logdev_file = NULL, *rtdev_file = NULL; int error; /* * Open real time and log devices - order is important. */ if (mp->m_logname) { - error = xfs_blkdev_get(mp, mp->m_logname, &logdev_handle); + error = xfs_blkdev_get(mp, mp->m_logname, &logdev_file); if (error) return error; } if (mp->m_rtname) { - error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev_handle); + error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev_file); if (error) goto out_close_logdev; - if (rtdev_handle->bdev == ddev || - (logdev_handle && - rtdev_handle->bdev == logdev_handle->bdev)) { + if (file_bdev(rtdev_file) == ddev || + (logdev_file && + file_bdev(rtdev_file) == file_bdev(logdev_file))) { xfs_warn(mp, "Cannot mount filesystem with identical rtdev and ddev/logdev."); error = -EINVAL; @@ -466,25 +473,25 @@ xfs_open_devices( * Setup xfs_mount buffer target pointers */ error = -ENOMEM; - mp->m_ddev_targp = xfs_alloc_buftarg(mp, sb->s_bdev_handle); + mp->m_ddev_targp = xfs_alloc_buftarg(mp, sb->s_bdev_file); if (!mp->m_ddev_targp) goto out_close_rtdev; - if (rtdev_handle) { - mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev_handle); + if (rtdev_file) { + mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev_file); if (!mp->m_rtdev_targp) goto out_free_ddev_targ; } - if (logdev_handle && logdev_handle->bdev != ddev) { - mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev_handle); + if (logdev_file && file_bdev(logdev_file) != ddev) { + mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev_file); if (!mp->m_logdev_targp) goto out_free_rtdev_targ; } else { mp->m_logdev_targp = mp->m_ddev_targp; /* Handle won't be used, drop it */ - if (logdev_handle) - bdev_release(logdev_handle); + if (logdev_file) + bdev_fput(logdev_file); } return 0; @@ -495,11 +502,11 @@ xfs_open_devices( out_free_ddev_targ: xfs_free_buftarg(mp->m_ddev_targp); out_close_rtdev: - if (rtdev_handle) - bdev_release(rtdev_handle); + if (rtdev_file) + bdev_fput(rtdev_file); out_close_logdev: - if (logdev_handle) - bdev_release(logdev_handle); + if (logdev_file) + bdev_fput(logdev_file); return error; } @@ -715,9 +722,7 @@ xfs_fs_inode_init_once( /* xfs inode */ atomic_set(&ip->i_pincount, 0); spin_lock_init(&ip->i_flags_lock); - - mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, - "xfsino", ip->i_ino); + init_rwsem(&ip->i_lock); } /* @@ -760,7 +765,7 @@ xfs_mount_free( debugfs_remove(mp->m_debugfs); kfree(mp->m_rtname); kfree(mp->m_logname); - kmem_free(mp); + kfree(mp); } STATIC int @@ -814,20 +819,76 @@ xfs_fs_sync_fs( return 0; } +static xfs_extlen_t +xfs_internal_log_size( + struct xfs_mount *mp) +{ + if (!mp->m_sb.sb_logstart) + return 0; + return mp->m_sb.sb_logblocks; +} + +static void +xfs_statfs_data( + struct xfs_mount *mp, + struct kstatfs *st) +{ + int64_t fdblocks = + percpu_counter_sum(&mp->m_fdblocks); + + /* make sure st->f_bfree does not underflow */ + st->f_bfree = max(0LL, fdblocks - xfs_fdblocks_unavailable(mp)); + /* + * sb_dblocks can change during growfs, but nothing cares about reporting + * the old or new value during growfs. + */ + st->f_blocks = mp->m_sb.sb_dblocks - xfs_internal_log_size(mp); +} + +/* + * When stat(v)fs is called on a file with the realtime bit set or a directory + * with the rtinherit bit, report freespace information for the RT device + * instead of the main data device. + */ +static void +xfs_statfs_rt( + struct xfs_mount *mp, + struct kstatfs *st) +{ + st->f_bfree = xfs_rtbxlen_to_blen(mp, + percpu_counter_sum_positive(&mp->m_frextents)); + st->f_blocks = mp->m_sb.sb_rblocks; +} + +static void +xfs_statfs_inodes( + struct xfs_mount *mp, + struct kstatfs *st) +{ + uint64_t icount = percpu_counter_sum(&mp->m_icount); + uint64_t ifree = percpu_counter_sum(&mp->m_ifree); + uint64_t fakeinos = XFS_FSB_TO_INO(mp, st->f_bfree); + + st->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER); + if (M_IGEO(mp)->maxicount) + st->f_files = min_t(typeof(st->f_files), st->f_files, + M_IGEO(mp)->maxicount); + + /* If sb_icount overshot maxicount, report actual allocation */ + st->f_files = max_t(typeof(st->f_files), st->f_files, + mp->m_sb.sb_icount); + + /* Make sure st->f_ffree does not underflow */ + st->f_ffree = max_t(int64_t, 0, st->f_files - (icount - ifree)); +} + STATIC int xfs_fs_statfs( struct dentry *dentry, - struct kstatfs *statp) + struct kstatfs *st) { struct xfs_mount *mp = XFS_M(dentry->d_sb); - xfs_sb_t *sbp = &mp->m_sb; struct xfs_inode *ip = XFS_I(d_inode(dentry)); - uint64_t fakeinos, id; - uint64_t icount; - uint64_t ifree; - uint64_t fdblocks; - xfs_extlen_t lsize; - int64_t ffree; /* * Expedite background inodegc but don't wait. We do not want to block @@ -835,58 +896,28 @@ xfs_fs_statfs( */ xfs_inodegc_push(mp); - statp->f_type = XFS_SUPER_MAGIC; - statp->f_namelen = MAXNAMELEN - 1; - - id = huge_encode_dev(mp->m_ddev_targp->bt_dev); - statp->f_fsid = u64_to_fsid(id); - - icount = percpu_counter_sum(&mp->m_icount); - ifree = percpu_counter_sum(&mp->m_ifree); - fdblocks = percpu_counter_sum(&mp->m_fdblocks); - - spin_lock(&mp->m_sb_lock); - statp->f_bsize = sbp->sb_blocksize; - lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; - statp->f_blocks = sbp->sb_dblocks - lsize; - spin_unlock(&mp->m_sb_lock); + st->f_type = XFS_SUPER_MAGIC; + st->f_namelen = MAXNAMELEN - 1; + st->f_bsize = mp->m_sb.sb_blocksize; + st->f_fsid = u64_to_fsid(huge_encode_dev(mp->m_ddev_targp->bt_dev)); - /* make sure statp->f_bfree does not underflow */ - statp->f_bfree = max_t(int64_t, 0, - fdblocks - xfs_fdblocks_unavailable(mp)); - statp->f_bavail = statp->f_bfree; - - fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree); - statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER); - if (M_IGEO(mp)->maxicount) - statp->f_files = min_t(typeof(statp->f_files), - statp->f_files, - M_IGEO(mp)->maxicount); - - /* If sb_icount overshot maxicount, report actual allocation */ - statp->f_files = max_t(typeof(statp->f_files), - statp->f_files, - sbp->sb_icount); - - /* make sure statp->f_ffree does not underflow */ - ffree = statp->f_files - (icount - ifree); - statp->f_ffree = max_t(int64_t, ffree, 0); + xfs_statfs_data(mp, st); + xfs_statfs_inodes(mp, st); + if (XFS_IS_REALTIME_MOUNT(mp) && + (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) + xfs_statfs_rt(mp, st); if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) && ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) == (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD)) - xfs_qm_statvfs(ip, statp); - - if (XFS_IS_REALTIME_MOUNT(mp) && - (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) { - s64 freertx; - - statp->f_blocks = sbp->sb_rblocks; - freertx = percpu_counter_sum_positive(&mp->m_frextents); - statp->f_bavail = statp->f_bfree = xfs_rtx_to_rtb(mp, freertx); - } + xfs_qm_statvfs(ip, st); + /* + * XFS does not distinguish between blocks available to privileged and + * unprivileged users. + */ + st->f_bavail = st->f_bfree; return 0; } @@ -1052,12 +1083,18 @@ xfs_init_percpu_counters( if (error) goto free_fdblocks; - error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL); + error = percpu_counter_init(&mp->m_delalloc_rtextents, 0, GFP_KERNEL); if (error) goto free_delalloc; + error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL); + if (error) + goto free_delalloc_rt; + return 0; +free_delalloc_rt: + percpu_counter_destroy(&mp->m_delalloc_rtextents); free_delalloc: percpu_counter_destroy(&mp->m_delalloc_blks); free_fdblocks: @@ -1087,6 +1124,9 @@ xfs_destroy_percpu_counters( percpu_counter_destroy(&mp->m_ifree); percpu_counter_destroy(&mp->m_fdblocks); ASSERT(xfs_is_shutdown(mp) || + percpu_counter_sum(&mp->m_delalloc_rtextents) == 0); + percpu_counter_destroy(&mp->m_delalloc_rtextents); + ASSERT(xfs_is_shutdown(mp) || percpu_counter_sum(&mp->m_delalloc_blks) == 0); percpu_counter_destroy(&mp->m_delalloc_blks); percpu_counter_destroy(&mp->m_frextents); @@ -1134,6 +1174,7 @@ xfs_fs_put_super( xfs_filestream_unmount(mp); xfs_unmountfs(mp); + xfs_rtmount_freesb(mp); xfs_freesb(mp); xchk_mount_stats_free(mp); free_percpu(mp->m_stats.xs_stats); @@ -1251,6 +1292,8 @@ xfs_fs_parse_param( int size = 0; int opt; + BUILD_BUG_ON(XFS_QFLAGS_MNTOPTS & XFS_MOUNT_QUOTA_ALL); + opt = fs_parse(fc, xfs_fs_parameters, param, &result); if (opt < 0) return opt; @@ -1328,32 +1371,39 @@ xfs_fs_parse_param( case Opt_noquota: parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; + parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS; return 0; case Opt_quota: case Opt_uquota: case Opt_usrquota: parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD); + parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS; return 0; case Opt_qnoenforce: case Opt_uqnoenforce: parsing_mp->m_qflags |= XFS_UQUOTA_ACCT; parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD; + parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS; return 0; case Opt_pquota: case Opt_prjquota: parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD); + parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS; return 0; case Opt_pqnoenforce: parsing_mp->m_qflags |= XFS_PQUOTA_ACCT; parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD; + parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS; return 0; case Opt_gquota: case Opt_grpquota: parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD); + parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS; return 0; case Opt_gqnoenforce: parsing_mp->m_qflags |= XFS_GQUOTA_ACCT; parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD; + parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS; return 0; case Opt_discard: parsing_mp->m_features |= XFS_FEAT_DISCARD; @@ -1420,7 +1470,8 @@ xfs_fs_validate_params( return -EINVAL; } - if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) { + if (!IS_ENABLED(CONFIG_XFS_QUOTA) && + (mp->m_qflags & ~XFS_QFLAGS_MNTOPTS)) { xfs_warn(mp, "quota support not available in this kernel."); return -EINVAL; } @@ -1501,7 +1552,7 @@ xfs_fs_fill_super( * the newer fsopen/fsconfig API. */ if (fc->sb_flags & SB_RDONLY) - set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); + xfs_set_readonly(mp); if (fc->sb_flags & SB_DIRSYNC) mp->m_features |= XFS_FEAT_DIRSYNC; if (fc->sb_flags & SB_SYNCHRONOUS) @@ -1580,17 +1631,21 @@ xfs_fs_fill_super( if (error) goto out_free_sb; - /* V4 support is undergoing deprecation. */ - if (!xfs_has_crc(mp)) { -#ifdef CONFIG_XFS_SUPPORT_V4 + /* + * V4 support is undergoing deprecation. + * + * Note: this has to use an open coded m_features check as xfs_has_crc + * always returns false for !CONFIG_XFS_SUPPORT_V4. + */ + if (!(mp->m_features & XFS_FEAT_CRC)) { + if (!IS_ENABLED(CONFIG_XFS_SUPPORT_V4)) { + xfs_warn(mp, + "Deprecated V4 format (crc=0) not supported by kernel."); + error = -EINVAL; + goto out_free_sb; + } xfs_warn_once(mp, "Deprecated V4 format (crc=0) will not be supported after September 2030."); -#else - xfs_warn(mp, - "Deprecated V4 format (crc=0) not supported by kernel."); - error = -EINVAL; - goto out_free_sb; -#endif } /* ASCII case insensitivity is undergoing deprecation. */ @@ -1606,8 +1661,12 @@ xfs_fs_fill_super( #endif } - /* Filesystem claims it needs repair, so refuse the mount. */ - if (xfs_has_needsrepair(mp)) { + /* + * Filesystem claims it needs repair, so refuse the mount unless + * norecovery is also specified, in which case the filesystem can + * be mounted with no risk of further damage. + */ + if (xfs_has_needsrepair(mp) && !xfs_has_norecovery(mp)) { xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair."); error = -EFSCORRUPTED; goto out_free_sb; @@ -1624,16 +1683,26 @@ xfs_fs_fill_super( goto out_free_sb; } - /* - * Until this is fixed only page-sized or smaller data blocks work. - */ if (mp->m_sb.sb_blocksize > PAGE_SIZE) { - xfs_warn(mp, - "File system with blocksize %d bytes. " - "Only pagesize (%ld) or less will currently work.", + size_t max_folio_size = mapping_max_folio_size_supported(); + + if (!xfs_has_crc(mp)) { + xfs_warn(mp, +"V4 Filesystem with blocksize %d bytes. Only pagesize (%ld) or less is supported.", mp->m_sb.sb_blocksize, PAGE_SIZE); - error = -ENOSYS; - goto out_free_sb; + error = -ENOSYS; + goto out_free_sb; + } + + if (mp->m_sb.sb_blocksize > max_folio_size) { + xfs_warn(mp, +"block size (%u bytes) not supported; Only block size (%zu) or less is supported", + mp->m_sb.sb_blocksize, max_folio_size); + error = -ENOSYS; + goto out_free_sb; + } + + xfs_warn_experimental(mp, XFS_EXPERIMENTAL_LBS); } /* Ensure this filesystem fits in the page cache limits */ @@ -1665,10 +1734,14 @@ xfs_fs_fill_super( goto out_free_sb; } - error = xfs_filestream_mount(mp); + error = xfs_rtmount_readsb(mp); if (error) goto out_free_sb; + error = xfs_filestream_mount(mp); + if (error) + goto out_free_rtsb; + /* * we must configure the block size in the superblock before we run the * full mount process as the mount process can lookup and cache inodes. @@ -1687,7 +1760,7 @@ xfs_fs_fill_super( sb->s_time_max = XFS_LEGACY_TIME_MAX; } trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max); - sb->s_iflags |= SB_I_CGROUPWB; + sb->s_iflags |= SB_I_CGROUPWB | SB_I_ALLOW_HSM; set_posix_acl_flag(sb); @@ -1707,10 +1780,15 @@ xfs_fs_fill_super( mp->m_features &= ~XFS_FEAT_DISCARD; } + if (xfs_has_metadir(mp)) + xfs_warn_experimental(mp, XFS_EXPERIMENTAL_METADIR); + if (xfs_has_reflink(mp)) { - if (mp->m_sb.sb_rblocks) { + if (xfs_has_realtime(mp) && + !xfs_reflink_supports_rextsize(mp, mp->m_sb.sb_rextsize)) { xfs_alert(mp, - "reflink not compatible with realtime device!"); + "reflink not compatible with realtime extent size %u!", + mp->m_sb.sb_rextsize); error = -EINVAL; goto out_filestream_unmount; } @@ -1721,12 +1799,20 @@ xfs_fs_fill_super( } } - if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) { - xfs_alert(mp, - "reverse mapping btree not compatible with realtime device!"); - error = -EINVAL; - goto out_filestream_unmount; - } + + if (xfs_has_exchange_range(mp)) + xfs_warn_experimental(mp, XFS_EXPERIMENTAL_EXCHRANGE); + + if (xfs_has_parent(mp)) + xfs_warn_experimental(mp, XFS_EXPERIMENTAL_PPTR); + + /* + * If no quota mount options were provided, maybe we'll try to pick + * up the quota accounting and enforcement flags from the ondisk sb. + */ + if (!(mp->m_qflags & XFS_QFLAGS_MNTOPTS)) + xfs_set_resuming_quotaon(mp); + mp->m_qflags &= ~XFS_QFLAGS_MNTOPTS; error = xfs_mountfs(mp); if (error) @@ -1747,6 +1833,8 @@ xfs_fs_fill_super( out_filestream_unmount: xfs_filestream_unmount(mp); + out_free_rtsb: + xfs_rtmount_freesb(mp); out_free_sb: xfs_freesb(mp); out_free_scrub_stats: @@ -1766,7 +1854,7 @@ xfs_fs_fill_super( out_unmount: xfs_filestream_unmount(mp); xfs_unmountfs(mp); - goto out_free_sb; + goto out_free_rtsb; } static int @@ -1798,7 +1886,7 @@ xfs_remount_rw( return -EINVAL; } - clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); + xfs_clear_readonly(mp); /* * If this is the first remount to writeable state we might have some @@ -1874,11 +1962,7 @@ xfs_remount_ro( xfs_inodegc_stop(mp); /* Free the per-AG metadata reservation pool. */ - error = xfs_fs_unreserve_ag_blocks(mp); - if (error) { - xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); - return error; - } + xfs_fs_unreserve_ag_blocks(mp); /* * Before we sync the metadata, we need to free up the reserve block @@ -1890,7 +1974,7 @@ xfs_remount_ro( xfs_save_resvblks(mp); xfs_log_clean(mp); - set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate); + xfs_set_readonly(mp); return 0; } @@ -1916,6 +2000,8 @@ xfs_fs_reconfigure( int flags = fc->sb_flags; int error; + new_mp->m_qflags &= ~XFS_QFLAGS_MNTOPTS; + /* version 5 superblocks always support version counters. */ if (xfs_has_crc(mp)) fc->sb_flags |= SB_I_VERSION; @@ -1981,18 +2067,20 @@ static const struct fs_context_operations xfs_context_ops = { * mount option parsing having already been performed as this can be called from * fsopen() before any parameters have been set. */ -static int xfs_init_fs_context( +static int +xfs_init_fs_context( struct fs_context *fc) { struct xfs_mount *mp; + int i; - mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO); + mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL | __GFP_NOFAIL); if (!mp) return -ENOMEM; spin_lock_init(&mp->m_sb_lock); - INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); - spin_lock_init(&mp->m_perag_lock); + for (i = 0; i < XG_TYPE_MAX; i++) + xa_init(&mp->m_groups[i].xa); mutex_init(&mp->m_growlock); INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker); INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); @@ -2012,6 +2100,8 @@ static int xfs_init_fs_context( mp->m_logbsize = -1; mp->m_allocsize_log = 16; /* 64k */ + xfs_hooks_init(&mp->m_dir_update_hooks); + fc->s_fs_info = mp; fc->ops = &xfs_context_ops; @@ -2032,7 +2122,7 @@ static struct file_system_type xfs_fs_type = { .init_fs_context = xfs_init_fs_context, .parameters = xfs_fs_parameters, .kill_sb = xfs_kill_sb, - .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, + .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME, }; MODULE_ALIAS_FS("xfs"); @@ -2043,8 +2133,7 @@ xfs_init_caches(void) xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0, SLAB_HWCACHE_ALIGN | - SLAB_RECLAIM_ACCOUNT | - SLAB_MEM_SPREAD, + SLAB_RECLAIM_ACCOUNT, NULL); if (!xfs_buf_cache) goto out; @@ -2059,10 +2148,14 @@ xfs_init_caches(void) if (error) goto out_destroy_log_ticket_cache; - error = xfs_defer_init_item_caches(); + error = rcbagbt_init_cur_cache(); if (error) goto out_destroy_btree_cur_cache; + error = xfs_defer_init_item_caches(); + if (error) + goto out_destroy_rcbagbt_cur_cache; + xfs_da_state_cache = kmem_cache_create("xfs_da_state", sizeof(struct xfs_da_state), 0, 0, NULL); @@ -2109,14 +2202,14 @@ xfs_init_caches(void) sizeof(struct xfs_inode), 0, (SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | - SLAB_MEM_SPREAD | SLAB_ACCOUNT), + SLAB_ACCOUNT), xfs_fs_inode_init_once); if (!xfs_inode_cache) goto out_destroy_efi_cache; xfs_ili_cache = kmem_cache_create("xfs_ili", sizeof(struct xfs_inode_log_item), 0, - SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, + SLAB_RECLAIM_ACCOUNT, NULL); if (!xfs_ili_cache) goto out_destroy_inode_cache; @@ -2181,8 +2274,32 @@ xfs_init_caches(void) if (!xfs_iunlink_cache) goto out_destroy_attri_cache; + xfs_xmd_cache = kmem_cache_create("xfs_xmd_item", + sizeof(struct xfs_xmd_log_item), + 0, 0, NULL); + if (!xfs_xmd_cache) + goto out_destroy_iul_cache; + + xfs_xmi_cache = kmem_cache_create("xfs_xmi_item", + sizeof(struct xfs_xmi_log_item), + 0, 0, NULL); + if (!xfs_xmi_cache) + goto out_destroy_xmd_cache; + + xfs_parent_args_cache = kmem_cache_create("xfs_parent_args", + sizeof(struct xfs_parent_args), + 0, 0, NULL); + if (!xfs_parent_args_cache) + goto out_destroy_xmi_cache; + return 0; + out_destroy_xmi_cache: + kmem_cache_destroy(xfs_xmi_cache); + out_destroy_xmd_cache: + kmem_cache_destroy(xfs_xmd_cache); + out_destroy_iul_cache: + kmem_cache_destroy(xfs_iunlink_cache); out_destroy_attri_cache: kmem_cache_destroy(xfs_attri_cache); out_destroy_attrd_cache: @@ -2219,6 +2336,8 @@ xfs_init_caches(void) kmem_cache_destroy(xfs_da_state_cache); out_destroy_defer_item_cache: xfs_defer_destroy_item_caches(); + out_destroy_rcbagbt_cur_cache: + rcbagbt_destroy_cur_cache(); out_destroy_btree_cur_cache: xfs_btree_destroy_cur_caches(); out_destroy_log_ticket_cache: @@ -2237,6 +2356,9 @@ xfs_destroy_caches(void) * destroy caches. */ rcu_barrier(); + kmem_cache_destroy(xfs_parent_args_cache); + kmem_cache_destroy(xfs_xmd_cache); + kmem_cache_destroy(xfs_xmi_cache); kmem_cache_destroy(xfs_iunlink_cache); kmem_cache_destroy(xfs_attri_cache); kmem_cache_destroy(xfs_attrd_cache); @@ -2256,6 +2378,7 @@ xfs_destroy_caches(void) kmem_cache_destroy(xfs_ifork_cache); kmem_cache_destroy(xfs_da_state_cache); xfs_defer_destroy_item_caches(); + rcbagbt_destroy_cur_cache(); xfs_btree_destroy_cur_caches(); kmem_cache_destroy(xfs_log_ticket_cache); kmem_cache_destroy(xfs_buf_cache); |