summaryrefslogtreecommitdiff
path: root/fs/xfs
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/scrub/trace.h2
-rw-r--r--fs/xfs/xfs_file.c6
-rw-r--r--fs/xfs/xfs_inode.h11
-rw-r--r--fs/xfs/xfs_ioctl.c2
-rw-r--r--fs/xfs/xfs_iops.c5
-rw-r--r--fs/xfs/xfs_itable.c8
-rw-r--r--fs/xfs/xfs_itable.h10
-rw-r--r--fs/xfs/xfs_mount.c19
-rw-r--r--fs/xfs/xfs_trace.h1
-rw-r--r--fs/xfs/xfs_trans.c2
-rw-r--r--fs/xfs/xfs_zone_alloc.c42
11 files changed, 75 insertions, 33 deletions
diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
index 1e6e9c10cea2..a8187281eb96 100644
--- a/fs/xfs/scrub/trace.h
+++ b/fs/xfs/scrub/trace.h
@@ -479,7 +479,7 @@ DECLARE_EVENT_CLASS(xchk_dqiter_class,
__field(xfs_exntst_t, state)
),
TP_fast_assign(
- __entry->dev = cursor->sc->ip->i_mount->m_super->s_dev;
+ __entry->dev = cursor->sc->mp->m_super->s_dev;
__entry->dqtype = cursor->dqtype;
__entry->ino = cursor->quota_ip->i_ino;
__entry->cur_id = cursor->id;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 55a304cb3aef..f96fbf5c54c9 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1101,9 +1101,6 @@ xfs_file_write_iter(
if (xfs_is_shutdown(ip->i_mount))
return -EIO;
- if (IS_DAX(inode))
- return xfs_file_dax_write(iocb, from);
-
if (iocb->ki_flags & IOCB_ATOMIC) {
if (ocount < xfs_get_atomic_write_min(ip))
return -EINVAL;
@@ -1116,6 +1113,9 @@ xfs_file_write_iter(
return ret;
}
+ if (IS_DAX(inode))
+ return xfs_file_dax_write(iocb, from);
+
if (iocb->ki_flags & IOCB_DIRECT) {
/*
* Allow a directio write to fall back to a buffered
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 07fbdcc4cbf5..bd6d33557194 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -358,9 +358,20 @@ static inline bool xfs_inode_has_bigrtalloc(const struct xfs_inode *ip)
static inline bool xfs_inode_can_hw_atomic_write(const struct xfs_inode *ip)
{
+ if (IS_DAX(VFS_IC(ip)))
+ return false;
+
return xfs_inode_buftarg(ip)->bt_awu_max > 0;
}
+static inline bool xfs_inode_can_sw_atomic_write(const struct xfs_inode *ip)
+{
+ if (IS_DAX(VFS_IC(ip)))
+ return false;
+
+ return xfs_can_sw_atomic_write(ip->i_mount);
+}
+
/*
* In-core inode flags.
*/
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index fe1f74a3b6a3..e1051a530a50 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -219,7 +219,7 @@ xfs_bulk_ireq_setup(
else if (XFS_INO_TO_AGNO(mp, breq->startino) < hdr->agno)
return -EINVAL;
- breq->flags |= XFS_IBULK_SAME_AG;
+ breq->iwalk_flags |= XFS_IWALK_SAME_AG;
/* Asking for an inode past the end of the AG? We're done! */
if (XFS_INO_TO_AGNO(mp, breq->startino) > hdr->agno)
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 149b5460fbfd..603effabe1ee 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -616,7 +616,8 @@ xfs_get_atomic_write_min(
* write of exactly one single fsblock if the bdev will make that
* guarantee for us.
*/
- if (xfs_inode_can_hw_atomic_write(ip) || xfs_can_sw_atomic_write(mp))
+ if (xfs_inode_can_hw_atomic_write(ip) ||
+ xfs_inode_can_sw_atomic_write(ip))
return mp->m_sb.sb_blocksize;
return 0;
@@ -633,7 +634,7 @@ xfs_get_atomic_write_max(
* write of exactly one single fsblock if the bdev will make that
* guarantee for us.
*/
- if (!xfs_can_sw_atomic_write(mp)) {
+ if (!xfs_inode_can_sw_atomic_write(ip)) {
if (xfs_inode_can_hw_atomic_write(ip))
return mp->m_sb.sb_blocksize;
return 0;
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index c8c9b8d8309f..2aa37a4d2706 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -307,7 +307,6 @@ xfs_bulkstat(
.breq = breq,
};
struct xfs_trans *tp;
- unsigned int iwalk_flags = 0;
int error;
if (breq->idmap != &nop_mnt_idmap) {
@@ -328,10 +327,7 @@ xfs_bulkstat(
* locking abilities to detect cycles in the inobt without deadlocking.
*/
tp = xfs_trans_alloc_empty(breq->mp);
- if (breq->flags & XFS_IBULK_SAME_AG)
- iwalk_flags |= XFS_IWALK_SAME_AG;
-
- error = xfs_iwalk(breq->mp, tp, breq->startino, iwalk_flags,
+ error = xfs_iwalk(breq->mp, tp, breq->startino, breq->iwalk_flags,
xfs_bulkstat_iwalk, breq->icount, &bc);
xfs_trans_cancel(tp);
kfree(bc.buf);
@@ -457,7 +453,7 @@ xfs_inumbers(
* locking abilities to detect cycles in the inobt without deadlocking.
*/
tp = xfs_trans_alloc_empty(breq->mp);
- error = xfs_inobt_walk(breq->mp, tp, breq->startino, breq->flags,
+ error = xfs_inobt_walk(breq->mp, tp, breq->startino, breq->iwalk_flags,
xfs_inumbers_walk, breq->icount, &ic);
xfs_trans_cancel(tp);
diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h
index f10e8f8f2335..2d0612f14d6e 100644
--- a/fs/xfs/xfs_itable.h
+++ b/fs/xfs/xfs_itable.h
@@ -13,17 +13,15 @@ struct xfs_ibulk {
xfs_ino_t startino; /* start with this inode */
unsigned int icount; /* number of elements in ubuffer */
unsigned int ocount; /* number of records returned */
- unsigned int flags; /* see XFS_IBULK_FLAG_* */
+ unsigned int flags; /* XFS_IBULK_FLAG_* */
+ unsigned int iwalk_flags; /* XFS_IWALK_FLAG_* */
};
-/* Only iterate within the same AG as startino */
-#define XFS_IBULK_SAME_AG (1U << 0)
-
/* Fill out the bs_extents64 field if set. */
-#define XFS_IBULK_NREXT64 (1U << 1)
+#define XFS_IBULK_NREXT64 (1U << 0)
/* Signal that we can return metadata directories. */
-#define XFS_IBULK_METADIR (1U << 2)
+#define XFS_IBULK_METADIR (1U << 1)
/*
* Advance the user buffer pointer by one record of the given size. If the
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 2133fbaf1766..dc32c5e34d81 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -779,6 +779,25 @@ xfs_set_max_atomic_write_opt(
return -EINVAL;
}
+ if (xfs_has_reflink(mp))
+ goto set_limit;
+
+ if (new_max_fsbs == 1) {
+ if (mp->m_ddev_targp->bt_awu_max ||
+ (mp->m_rtdev_targp && mp->m_rtdev_targp->bt_awu_max)) {
+ } else {
+ xfs_warn(mp,
+ "cannot support atomic writes of size %lluk with no reflink or HW support",
+ new_max_bytes >> 10);
+ return -EINVAL;
+ }
+ } else {
+ xfs_warn(mp,
+ "cannot support atomic writes of size %lluk with no reflink support",
+ new_max_bytes >> 10);
+ return -EINVAL;
+ }
+
set_limit:
error = xfs_calc_atomic_write_reservation(mp, new_max_fsbs);
if (error) {
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index e1794e3e3156..ac344e42846c 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -455,6 +455,7 @@ DEFINE_EVENT(xfs_zone_alloc_class, name, \
xfs_extlen_t len), \
TP_ARGS(oz, rgbno, len))
DEFINE_ZONE_ALLOC_EVENT(xfs_zone_record_blocks);
+DEFINE_ZONE_ALLOC_EVENT(xfs_zone_skip_blocks);
DEFINE_ZONE_ALLOC_EVENT(xfs_zone_alloc_blocks);
TRACE_EVENT(xfs_zone_gc_select_victim,
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index ece374d622b3..575e7028f423 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -253,8 +253,8 @@ xfs_trans_alloc(
* by doing GFP_KERNEL allocations inside sb_start_intwrite().
*/
retry:
- WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
tp = __xfs_trans_alloc(mp, flags);
+ WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
error = xfs_trans_reserve(tp, resp, blocks, rtextents);
if (error == -ENOSPC && want_retry) {
xfs_trans_cancel(tp);
diff --git a/fs/xfs/xfs_zone_alloc.c b/fs/xfs/xfs_zone_alloc.c
index 33f7eee521a8..f8bd6d741755 100644
--- a/fs/xfs/xfs_zone_alloc.c
+++ b/fs/xfs/xfs_zone_alloc.c
@@ -166,10 +166,9 @@ xfs_open_zone_mark_full(
static void
xfs_zone_record_blocks(
struct xfs_trans *tp,
- xfs_fsblock_t fsbno,
- xfs_filblks_t len,
struct xfs_open_zone *oz,
- bool used)
+ xfs_fsblock_t fsbno,
+ xfs_filblks_t len)
{
struct xfs_mount *mp = tp->t_mountp;
struct xfs_rtgroup *rtg = oz->oz_rtg;
@@ -179,18 +178,37 @@ xfs_zone_record_blocks(
xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_RMAP);
- if (used) {
- rmapip->i_used_blocks += len;
- ASSERT(rmapip->i_used_blocks <= rtg_blocks(rtg));
- } else {
- xfs_add_frextents(mp, len);
- }
+ rmapip->i_used_blocks += len;
+ ASSERT(rmapip->i_used_blocks <= rtg_blocks(rtg));
oz->oz_written += len;
if (oz->oz_written == rtg_blocks(rtg))
xfs_open_zone_mark_full(oz);
xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE);
}
+/*
+ * Called for blocks that have been written to disk, but not actually linked to
+ * an inode, which can happen when garbage collection races with user data
+ * writes to a file.
+ */
+static void
+xfs_zone_skip_blocks(
+ struct xfs_open_zone *oz,
+ xfs_filblks_t len)
+{
+ struct xfs_rtgroup *rtg = oz->oz_rtg;
+
+ trace_xfs_zone_skip_blocks(oz, 0, len);
+
+ xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
+ oz->oz_written += len;
+ if (oz->oz_written == rtg_blocks(rtg))
+ xfs_open_zone_mark_full(oz);
+ xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP);
+
+ xfs_add_frextents(rtg_mount(rtg), len);
+}
+
static int
xfs_zoned_map_extent(
struct xfs_trans *tp,
@@ -250,8 +268,7 @@ xfs_zoned_map_extent(
}
}
- xfs_zone_record_blocks(tp, new->br_startblock, new->br_blockcount, oz,
- true);
+ xfs_zone_record_blocks(tp, oz, new->br_startblock, new->br_blockcount);
/* Map the new blocks into the data fork. */
xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, new);
@@ -259,8 +276,7 @@ xfs_zoned_map_extent(
skip:
trace_xfs_reflink_cow_remap_skip(ip, new);
- xfs_zone_record_blocks(tp, new->br_startblock, new->br_blockcount, oz,
- false);
+ xfs_zone_skip_blocks(oz, new->br_blockcount);
return 0;
}