summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_mount.h
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_mount.h')
-rw-r--r--fs/xfs/xfs_mount.h352
1 files changed, 293 insertions, 59 deletions
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index e880aa48de68..d85084f9f317 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -72,6 +72,76 @@ struct xfs_inodegc {
};
/*
+ * Container for each type of groups, used to look up individual groups and
+ * describes the geometry.
+ */
+struct xfs_groups {
+ struct xarray xa;
+
+ /*
+ * Maximum capacity of the group in FSBs.
+ *
+ * Each group is laid out densely in the daddr space. For the
+ * degenerate case of a pre-rtgroups filesystem, the incore rtgroup
+ * pretends to have a zero-block and zero-blklog rtgroup.
+ */
+ uint32_t blocks;
+
+ /*
+ * Log(2) of the logical size of each group.
+ *
+ * Compared to the blocks field above this is rounded up to the next
+ * power of two, and thus lays out the xfs_fsblock_t/xfs_rtblock_t
+ * space sparsely with a hole from blocks to (1 << blklog) at the end
+ * of each group.
+ */
+ uint8_t blklog;
+
+ /*
+ * Zoned devices can have gaps beyond the usable capacity of a zone and
+ * the end in the LBA/daddr address space. In other words, the hardware
+ * equivalent to the RT groups already takes care of the power of 2
+ * alignment for us. In this case the sparse FSB/RTB address space maps
+ * 1:1 to the device address space.
+ */
+ bool has_daddr_gaps;
+
+ /*
+ * Mask to extract the group-relative block number from a FSB.
+ * For a pre-rtgroups filesystem we pretend to have one very large
+ * rtgroup, so this mask must be 64-bit.
+ */
+ uint64_t blkmask;
+
+ /*
+ * Start of the first group in the device. This is used to support a
+ * RT device following the data device on the same block device for
+ * SMR hard drives.
+ */
+ xfs_fsblock_t start_fsb;
+
+ /*
+ * Maximum length of an atomic write for files stored in this
+ * collection of allocation groups, in fsblocks.
+ */
+ xfs_extlen_t awu_max;
+};
+
+struct xfs_freecounter {
+ /* free blocks for general use: */
+ struct percpu_counter count;
+
+ /* total reserved blocks: */
+ uint64_t res_total;
+
+ /* available reserved blocks: */
+ uint64_t res_avail;
+
+ /* reserved blks @ remount,ro: */
+ uint64_t res_saved;
+};
+
+/*
* The struct xfsmount layout is optimised to separate read-mostly variables
* from variables that are frequently modified. We put the read-mostly variables
* first, then place all the other variables at the end.
@@ -85,27 +155,20 @@ typedef struct xfs_mount {
struct super_block *m_super;
struct xfs_ail *m_ail; /* fs active log item list */
struct xfs_buf *m_sb_bp; /* buffer for superblock */
+ struct xfs_buf *m_rtsb_bp; /* realtime superblock */
char *m_rtname; /* realtime device name */
char *m_logname; /* external log device name */
struct xfs_da_geometry *m_dir_geo; /* directory block geometry */
struct xfs_da_geometry *m_attr_geo; /* attribute block geometry */
struct xlog *m_log; /* log specific stuff */
- struct xfs_inode *m_rbmip; /* pointer to bitmap inode */
- struct xfs_inode *m_rsumip; /* pointer to summary inode */
struct xfs_inode *m_rootip; /* pointer to root directory */
+ struct xfs_inode *m_metadirip; /* ptr to metadata directory */
+ struct xfs_inode *m_rtdirip; /* ptr to realtime metadir */
struct xfs_quotainfo *m_quotainfo; /* disk quota information */
struct xfs_buftarg *m_ddev_targp; /* data device */
struct xfs_buftarg *m_logdev_targp;/* log device */
struct xfs_buftarg *m_rtdev_targp; /* rt device */
void __percpu *m_inodegc; /* percpu inodegc structures */
-
- /*
- * Optional cache of rt summary level per bitmap block with the
- * invariant that m_rsum_cache[bbno] > the maximum i for which
- * rsum[i][bbno] != 0, or 0 if rsum[i][bbno] == 0 for all i.
- * Reads and writes are serialized by the rsumip inode lock.
- */
- uint8_t *m_rsum_cache;
struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
struct workqueue_struct *m_buf_workqueue;
struct workqueue_struct *m_unwritten_workqueue;
@@ -120,22 +183,31 @@ typedef struct xfs_mount {
uint8_t m_agno_log; /* log #ag's */
uint8_t m_sectbb_log; /* sectlog - BBSHIFT */
int8_t m_rtxblklog; /* log2 of rextsize, if possible */
+
uint m_blockmask; /* sb_blocksize-1 */
uint m_blockwsize; /* sb_blocksize in words */
- uint m_blockwmask; /* blockwsize-1 */
+ /* number of rt extents per rt bitmap block if rtgroups enabled */
+ unsigned int m_rtx_per_rbmblock;
uint m_alloc_mxr[2]; /* max alloc btree records */
uint m_alloc_mnr[2]; /* min alloc btree records */
uint m_bmap_dmxr[2]; /* max bmap btree records */
uint m_bmap_dmnr[2]; /* min bmap btree records */
uint m_rmap_mxr[2]; /* max rmap btree records */
uint m_rmap_mnr[2]; /* min rmap btree records */
+ uint m_rtrmap_mxr[2]; /* max rtrmap btree records */
+ uint m_rtrmap_mnr[2]; /* min rtrmap btree records */
uint m_refc_mxr[2]; /* max refc btree records */
uint m_refc_mnr[2]; /* min refc btree records */
+ uint m_rtrefc_mxr[2]; /* max rtrefc btree records */
+ uint m_rtrefc_mnr[2]; /* min rtrefc btree records */
uint m_alloc_maxlevels; /* max alloc btree levels */
uint m_bm_maxlevels[2]; /* max bmap btree levels */
uint m_rmap_maxlevels; /* max rmap btree levels */
+ uint m_rtrmap_maxlevels; /* max rtrmap btree level */
uint m_refc_maxlevels; /* max refcount btree level */
+ uint m_rtrefc_maxlevels; /* max rtrefc btree level */
unsigned int m_agbtree_maxlevels; /* max level of all AG btrees */
+ unsigned int m_rtbtree_maxlevels; /* max level of all rt btrees */
xfs_extlen_t m_ag_prealloc_blocks; /* reserved ag blocks */
uint m_alloc_set_aside; /* space we can't use */
uint m_ag_max_usable; /* max space per AG */
@@ -146,8 +218,8 @@ typedef struct xfs_mount {
uint m_allocsize_blocks; /* min write size blocks */
int m_logbufs; /* number of log buffers */
int m_logbsize; /* size of each log buffer */
- uint m_rsumlevels; /* rt summary levels */
- uint m_rsumsize; /* size of rt summary, bytes */
+ unsigned int m_rsumlevels; /* rt summary levels */
+ xfs_filblks_t m_rsumblocks; /* size of rt summary, FSBs */
int m_fixedfsid[2]; /* unchanged for life of FS */
uint m_qflags; /* quota status flags */
uint64_t m_features; /* active filesystem features */
@@ -162,6 +234,12 @@ typedef struct xfs_mount {
bool m_fail_unmount;
bool m_finobt_nores; /* no per-AG finobt resv. */
bool m_update_sb; /* sb needs update in mount */
+ unsigned int m_max_open_zones;
+ unsigned int m_zonegc_low_space;
+ struct xfs_mru_cache *m_zone_cache; /* Inode to open zone cache */
+
+ /* max_atomic_write mount option value */
+ unsigned long long m_awu_max_bytes;
/*
* Bitsets of per-fs metadata that have been checked and/or are sick.
@@ -186,8 +264,8 @@ typedef struct xfs_mount {
spinlock_t ____cacheline_aligned m_sb_lock; /* sb counter lock */
struct percpu_counter m_icount; /* allocated inodes counter */
struct percpu_counter m_ifree; /* free inodes counter */
- struct percpu_counter m_fdblocks; /* free block counter */
- struct percpu_counter m_frextents; /* free rt extent counter */
+
+ struct xfs_freecounter m_free[XC_FREE_NR];
/*
* Count of data device blocks reserved for delayed allocations,
@@ -195,6 +273,12 @@ typedef struct xfs_mount {
* extents or anything related to the rt device.
*/
struct percpu_counter m_delalloc_blks;
+
+ /*
+ * RT version of the above.
+ */
+ struct percpu_counter m_delalloc_rtextents;
+
/*
* Global count of allocation btree blocks in use across all AGs. Only
* used when perag reservation is enabled. Helps prevent block
@@ -202,12 +286,9 @@ typedef struct xfs_mount {
*/
atomic64_t m_allocbt_blks;
- struct radix_tree_root m_perag_tree; /* per-ag accounting info */
- spinlock_t m_perag_lock; /* lock for m_perag_tree */
- uint64_t m_resblks; /* total reserved blocks */
- uint64_t m_resblks_avail;/* available reserved blocks */
- uint64_t m_resblks_save; /* reserved blks @ remount,ro */
+ struct xfs_groups m_groups[XG_TYPE_MAX];
struct delayed_work m_reclaim_work; /* background inode reclaim */
+ struct xfs_zone_info *m_zone_info; /* zone allocator information */
struct dentry *m_debugfs; /* debugfs parent */
struct xfs_kobj m_kobj;
struct xfs_kobj m_error_kobj;
@@ -217,8 +298,15 @@ typedef struct xfs_mount {
#ifdef CONFIG_XFS_ONLINE_SCRUB_STATS
struct xchk_stats *m_scrub_stats;
#endif
+ struct xfs_kobj m_zoned_kobj;
xfs_agnumber_t m_agfrotor; /* last ag where space found */
atomic_t m_agirotor; /* last ag dir inode alloced */
+ atomic_t m_rtgrotor; /* last rtgroup rtpicked */
+
+ struct mutex m_metafile_resv_lock;
+ uint64_t m_metafile_resv_target;
+ uint64_t m_metafile_resv_used;
+ uint64_t m_metafile_resv_avail;
/* Memory shrinker to throttle and reprioritize inodegc */
struct shrinker *m_inodegc_shrinker;
@@ -292,8 +380,12 @@ typedef struct xfs_mount {
#define XFS_FEAT_BIGTIME (1ULL << 24) /* large timestamps */
#define XFS_FEAT_NEEDSREPAIR (1ULL << 25) /* needs xfs_repair */
#define XFS_FEAT_NREXT64 (1ULL << 26) /* large extent counters */
+#define XFS_FEAT_EXCHANGE_RANGE (1ULL << 27) /* exchange range */
+#define XFS_FEAT_METADIR (1ULL << 28) /* metadata directory tree */
+#define XFS_FEAT_ZONED (1ULL << 29) /* zoned RT device */
/* Mount features */
+#define XFS_FEAT_NOLIFETIME (1ULL << 47) /* disable lifetime hints */
#define XFS_FEAT_NOATTR2 (1ULL << 48) /* disable attr2 creation */
#define XFS_FEAT_NOALIGN (1ULL << 49) /* ignore alignment */
#define XFS_FEAT_ALLOCSIZE (1ULL << 50) /* user specified allocation size */
@@ -313,7 +405,7 @@ typedef struct xfs_mount {
#define XFS_FEAT_NOUUID (1ULL << 63) /* ignore uuid during mount */
#define __XFS_HAS_FEAT(name, NAME) \
-static inline bool xfs_has_ ## name (struct xfs_mount *mp) \
+static inline bool xfs_has_ ## name (const struct xfs_mount *mp) \
{ \
return mp->m_features & XFS_FEAT_ ## NAME; \
}
@@ -331,19 +423,10 @@ static inline void xfs_add_ ## name (struct xfs_mount *mp) \
__XFS_ADD_FEAT(attr, ATTR)
__XFS_HAS_FEAT(nlink, NLINK)
__XFS_ADD_FEAT(quota, QUOTA)
-__XFS_HAS_FEAT(align, ALIGN)
__XFS_HAS_FEAT(dalign, DALIGN)
-__XFS_HAS_FEAT(logv2, LOGV2)
__XFS_HAS_FEAT(sector, SECTOR)
-__XFS_HAS_FEAT(extflg, EXTFLG)
__XFS_HAS_FEAT(asciici, ASCIICI)
-__XFS_HAS_FEAT(lazysbcount, LAZYSBCOUNT)
-__XFS_ADD_FEAT(attr2, ATTR2)
__XFS_HAS_FEAT(parent, PARENT)
-__XFS_ADD_FEAT(projid32, PROJID32)
-__XFS_HAS_FEAT(crc, CRC)
-__XFS_HAS_FEAT(v3inodes, V3INODES)
-__XFS_HAS_FEAT(pquotino, PQUOTINO)
__XFS_HAS_FEAT(ftype, FTYPE)
__XFS_HAS_FEAT(finobt, FINOBT)
__XFS_HAS_FEAT(rmapbt, RMAPBT)
@@ -355,6 +438,77 @@ __XFS_HAS_FEAT(inobtcounts, INOBTCNT)
__XFS_HAS_FEAT(bigtime, BIGTIME)
__XFS_HAS_FEAT(needsrepair, NEEDSREPAIR)
__XFS_HAS_FEAT(large_extent_counts, NREXT64)
+__XFS_HAS_FEAT(exchange_range, EXCHANGE_RANGE)
+__XFS_HAS_FEAT(metadir, METADIR)
+__XFS_HAS_FEAT(zoned, ZONED)
+__XFS_HAS_FEAT(nolifetime, NOLIFETIME)
+
+static inline bool xfs_has_rtgroups(const struct xfs_mount *mp)
+{
+ /* all metadir file systems also allow rtgroups */
+ return xfs_has_metadir(mp);
+}
+
+static inline bool xfs_has_rtsb(const struct xfs_mount *mp)
+{
+ /* all rtgroups filesystems with an rt section have an rtsb */
+ return xfs_has_rtgroups(mp) &&
+ xfs_has_realtime(mp) &&
+ !xfs_has_zoned(mp);
+}
+
+static inline bool xfs_has_rtrmapbt(const struct xfs_mount *mp)
+{
+ return xfs_has_rtgroups(mp) && xfs_has_realtime(mp) &&
+ xfs_has_rmapbt(mp);
+}
+
+static inline bool xfs_has_rtreflink(const struct xfs_mount *mp)
+{
+ return xfs_has_metadir(mp) && xfs_has_realtime(mp) &&
+ xfs_has_reflink(mp);
+}
+
+static inline bool xfs_has_nonzoned(const struct xfs_mount *mp)
+{
+ return !xfs_has_zoned(mp);
+}
+
+static inline bool xfs_can_sw_atomic_write(struct xfs_mount *mp)
+{
+ return xfs_has_reflink(mp);
+}
+
+/*
+ * Some features are always on for v5 file systems, allow the compiler to
+ * eliminiate dead code when building without v4 support.
+ */
+#define __XFS_HAS_V4_FEAT(name, NAME) \
+static inline bool xfs_has_ ## name (struct xfs_mount *mp) \
+{ \
+ return !IS_ENABLED(CONFIG_XFS_SUPPORT_V4) || \
+ (mp->m_features & XFS_FEAT_ ## NAME); \
+}
+
+#define __XFS_ADD_V4_FEAT(name, NAME) \
+ __XFS_HAS_V4_FEAT(name, NAME); \
+static inline void xfs_add_ ## name (struct xfs_mount *mp) \
+{ \
+ if (IS_ENABLED(CONFIG_XFS_SUPPORT_V4)) { \
+ mp->m_features |= XFS_FEAT_ ## NAME; \
+ xfs_sb_version_add ## name(&mp->m_sb); \
+ } \
+}
+
+__XFS_HAS_V4_FEAT(align, ALIGN)
+__XFS_HAS_V4_FEAT(logv2, LOGV2)
+__XFS_HAS_V4_FEAT(extflg, EXTFLG)
+__XFS_HAS_V4_FEAT(lazysbcount, LAZYSBCOUNT)
+__XFS_ADD_V4_FEAT(attr2, ATTR2)
+__XFS_ADD_V4_FEAT(projid32, PROJID32)
+__XFS_HAS_V4_FEAT(v3inodes, V3INODES)
+__XFS_HAS_V4_FEAT(crc, CRC)
+__XFS_HAS_V4_FEAT(pquotino, PQUOTINO)
/*
* Mount features
@@ -404,14 +558,26 @@ __XFS_HAS_FEAT(nouuid, NOUUID)
*/
#define XFS_OPSTATE_BLOCKGC_ENABLED 6
-/* Kernel has logged a warning about online fsck being used on this fs. */
-#define XFS_OPSTATE_WARNED_SCRUB 7
/* Kernel has logged a warning about shrink being used on this fs. */
-#define XFS_OPSTATE_WARNED_SHRINK 8
+#define XFS_OPSTATE_WARNED_SHRINK 9
/* Kernel has logged a warning about logged xattr updates being used. */
-#define XFS_OPSTATE_WARNED_LARP 9
+#define XFS_OPSTATE_WARNED_LARP 10
/* Mount time quotacheck is running */
-#define XFS_OPSTATE_QUOTACHECK_RUNNING 10
+#define XFS_OPSTATE_QUOTACHECK_RUNNING 11
+/* Do we want to clear log incompat flags? */
+#define XFS_OPSTATE_UNSET_LOG_INCOMPAT 12
+/* Filesystem can use logged extended attributes */
+#define XFS_OPSTATE_USE_LARP 13
+/* Kernel has logged a warning about blocksize > pagesize on this fs. */
+#define XFS_OPSTATE_WARNED_LBS 14
+/* Kernel has logged a warning about metadata dirs being used on this fs. */
+#define XFS_OPSTATE_WARNED_METADIR 17
+/* Filesystem should use qflags to determine quotaon status */
+#define XFS_OPSTATE_RESUMING_QUOTAON 18
+/* Kernel has logged a warning about zoned RT device being used on this fs. */
+#define XFS_OPSTATE_WARNED_ZONED 19
+/* (Zoned) GC is in progress */
+#define XFS_OPSTATE_ZONEGC_RUNNING 20
#define __XFS_IS_OPSTATE(name, NAME) \
static inline bool xfs_is_ ## name (struct xfs_mount *mp) \
@@ -436,9 +602,27 @@ __XFS_IS_OPSTATE(inodegc_enabled, INODEGC_ENABLED)
__XFS_IS_OPSTATE(blockgc_enabled, BLOCKGC_ENABLED)
#ifdef CONFIG_XFS_QUOTA
__XFS_IS_OPSTATE(quotacheck_running, QUOTACHECK_RUNNING)
+__XFS_IS_OPSTATE(resuming_quotaon, RESUMING_QUOTAON)
#else
-# define xfs_is_quotacheck_running(mp) (false)
-#endif
+static inline bool xfs_is_quotacheck_running(struct xfs_mount *mp)
+{
+ return false;
+}
+static inline bool xfs_is_resuming_quotaon(struct xfs_mount *mp)
+{
+ return false;
+}
+static inline void xfs_set_resuming_quotaon(struct xfs_mount *m)
+{
+}
+static inline bool xfs_clear_resuming_quotaon(struct xfs_mount *mp)
+{
+ return false;
+}
+#endif /* CONFIG_XFS_QUOTA */
+__XFS_IS_OPSTATE(done_with_log_incompat, UNSET_LOG_INCOMPAT)
+__XFS_IS_OPSTATE(using_logged_xattrs, USE_LARP)
+__XFS_IS_OPSTATE(zonegc_running, ZONEGC_RUNNING)
static inline bool
xfs_should_warn(struct xfs_mount *mp, long nr)
@@ -454,10 +638,11 @@ xfs_should_warn(struct xfs_mount *mp, long nr)
{ (1UL << XFS_OPSTATE_READONLY), "read_only" }, \
{ (1UL << XFS_OPSTATE_INODEGC_ENABLED), "inodegc" }, \
{ (1UL << XFS_OPSTATE_BLOCKGC_ENABLED), "blockgc" }, \
- { (1UL << XFS_OPSTATE_WARNED_SCRUB), "wscrub" }, \
{ (1UL << XFS_OPSTATE_WARNED_SHRINK), "wshrink" }, \
{ (1UL << XFS_OPSTATE_WARNED_LARP), "wlarp" }, \
- { (1UL << XFS_OPSTATE_QUOTACHECK_RUNNING), "quotacheck" }
+ { (1UL << XFS_OPSTATE_QUOTACHECK_RUNNING), "quotacheck" }, \
+ { (1UL << XFS_OPSTATE_UNSET_LOG_INCOMPAT), "unset_log_incompat" }, \
+ { (1UL << XFS_OPSTATE_USE_LARP), "logged_xattrs" }
/*
* Max and min values for mount-option defined I/O
@@ -506,7 +691,8 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
}
extern void xfs_uuid_table_free(void);
-extern uint64_t xfs_default_resblks(xfs_mount_t *mp);
+uint64_t xfs_default_resblks(struct xfs_mount *mp,
+ enum xfs_free_counter ctr);
extern int xfs_mountfs(xfs_mount_t *mp);
extern void xfs_unmountfs(xfs_mount_t *);
@@ -519,34 +705,74 @@ extern void xfs_unmountfs(xfs_mount_t *);
*/
#define XFS_FDBLOCKS_BATCH 1024
+uint64_t xfs_freecounter_unavailable(struct xfs_mount *mp,
+ enum xfs_free_counter ctr);
+
/*
- * Estimate the amount of free space that is not available to userspace and is
- * not explicitly reserved from the incore fdblocks. This includes:
- *
- * - The minimum number of blocks needed to support splitting a bmap btree
- * - The blocks currently in use by the freespace btrees because they record
- * the actual blocks that will fill per-AG metadata space reservations
+ * Sum up the freecount, but never return negative values.
*/
-static inline uint64_t
-xfs_fdblocks_unavailable(
- struct xfs_mount *mp)
+static inline s64 xfs_sum_freecounter(struct xfs_mount *mp,
+ enum xfs_free_counter ctr)
+{
+ return percpu_counter_sum_positive(&mp->m_free[ctr].count);
+}
+
+/*
+ * Same as above, but does return negative values. Mostly useful for
+ * special cases like repair and tracing.
+ */
+static inline s64 xfs_sum_freecounter_raw(struct xfs_mount *mp,
+ enum xfs_free_counter ctr)
+{
+ return percpu_counter_sum(&mp->m_free[ctr].count);
+}
+
+/*
+ * This just provides and estimate without the cpu-local updates, use
+ * xfs_sum_freecounter for the exact value.
+ */
+static inline s64 xfs_estimate_freecounter(struct xfs_mount *mp,
+ enum xfs_free_counter ctr)
+{
+ return percpu_counter_read_positive(&mp->m_free[ctr].count);
+}
+
+static inline int xfs_compare_freecounter(struct xfs_mount *mp,
+ enum xfs_free_counter ctr, s64 rhs, s32 batch)
+{
+ return __percpu_counter_compare(&mp->m_free[ctr].count, rhs, batch);
+}
+
+static inline void xfs_set_freecounter(struct xfs_mount *mp,
+ enum xfs_free_counter ctr, uint64_t val)
{
- return mp->m_alloc_set_aside + atomic64_read(&mp->m_allocbt_blks);
+ percpu_counter_set(&mp->m_free[ctr].count, val);
}
-int xfs_mod_freecounter(struct xfs_mount *mp, struct percpu_counter *counter,
- int64_t delta, bool rsvd);
+int xfs_dec_freecounter(struct xfs_mount *mp, enum xfs_free_counter ctr,
+ uint64_t delta, bool rsvd);
+void xfs_add_freecounter(struct xfs_mount *mp, enum xfs_free_counter ctr,
+ uint64_t delta);
+
+static inline int xfs_dec_fdblocks(struct xfs_mount *mp, uint64_t delta,
+ bool reserved)
+{
+ return xfs_dec_freecounter(mp, XC_FREE_BLOCKS, delta, reserved);
+}
-static inline int
-xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta, bool reserved)
+static inline void xfs_add_fdblocks(struct xfs_mount *mp, uint64_t delta)
{
- return xfs_mod_freecounter(mp, &mp->m_fdblocks, delta, reserved);
+ xfs_add_freecounter(mp, XC_FREE_BLOCKS, delta);
}
-static inline int
-xfs_mod_frextents(struct xfs_mount *mp, int64_t delta)
+static inline int xfs_dec_frextents(struct xfs_mount *mp, uint64_t delta)
{
- return xfs_mod_freecounter(mp, &mp->m_frextents, delta, false);
+ return xfs_dec_freecounter(mp, XC_FREE_RTEXTENTS, delta, false);
+}
+
+static inline void xfs_add_frextents(struct xfs_mount *mp, uint64_t delta)
+{
+ xfs_add_freecounter(mp, XC_FREE_RTEXTENTS, delta);
}
extern int xfs_readsb(xfs_mount_t *, int);
@@ -566,6 +792,14 @@ struct xfs_error_cfg * xfs_error_get_cfg(struct xfs_mount *mp,
void xfs_force_summary_recalc(struct xfs_mount *mp);
int xfs_add_incompat_log_feature(struct xfs_mount *mp, uint32_t feature);
bool xfs_clear_incompat_log_features(struct xfs_mount *mp);
-void xfs_mod_delalloc(struct xfs_mount *mp, int64_t delta);
+void xfs_mod_delalloc(struct xfs_inode *ip, int64_t data_delta,
+ int64_t ind_delta);
+static inline void xfs_mod_sb_delalloc(struct xfs_mount *mp, int64_t delta)
+{
+ percpu_counter_add(&mp->m_delalloc_blks, delta);
+}
+
+int xfs_set_max_atomic_write_opt(struct xfs_mount *mp,
+ unsigned long long new_max_bytes);
#endif /* __XFS_MOUNT_H__ */