summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_zone_gc.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_zone_gc.c')
-rw-r--r--fs/xfs/xfs_zone_gc.c47
1 files changed, 30 insertions, 17 deletions
diff --git a/fs/xfs/xfs_zone_gc.c b/fs/xfs/xfs_zone_gc.c
index c5136ea9bb1d..064cd1a857a0 100644
--- a/fs/xfs/xfs_zone_gc.c
+++ b/fs/xfs/xfs_zone_gc.c
@@ -162,18 +162,36 @@ struct xfs_zone_gc_data {
/*
* We aim to keep enough zones free in stock to fully use the open zone limit
- * for data placement purposes.
+ * for data placement purposes. Additionally, the m_zonegc_low_space tunable
+ * can be set to make sure a fraction of the unused blocks are available for
+ * writing.
*/
bool
xfs_zoned_need_gc(
struct xfs_mount *mp)
{
+ s64 available, free, threshold;
+ s32 remainder;
+
if (!xfs_group_marked(mp, XG_TYPE_RTG, XFS_RTG_RECLAIMABLE))
return false;
- if (xfs_estimate_freecounter(mp, XC_FREE_RTAVAILABLE) <
+
+ available = xfs_estimate_freecounter(mp, XC_FREE_RTAVAILABLE);
+
+ if (available <
mp->m_groups[XG_TYPE_RTG].blocks *
(mp->m_max_open_zones - XFS_OPEN_GC_ZONES))
return true;
+
+ free = xfs_estimate_freecounter(mp, XC_FREE_RTEXTENTS);
+
+ threshold = div_s64_rem(free, 100, &remainder);
+ threshold = threshold * mp->m_zonegc_low_space +
+ remainder * div_s64(mp->m_zonegc_low_space, 100);
+
+ if (available < threshold)
+ return true;
+
return false;
}
@@ -272,8 +290,6 @@ xfs_zone_gc_query_cb(
return 0;
}
-#define cmp_int(l, r) ((l > r) - (l < r))
-
static int
xfs_zone_gc_rmap_rec_cmp(
const void *a,
@@ -312,10 +328,7 @@ xfs_zone_gc_query(
iter->rec_idx = 0;
iter->rec_count = 0;
- error = xfs_trans_alloc_empty(mp, &tp);
- if (error)
- return error;
-
+ tp = xfs_trans_alloc_empty(mp);
xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP);
cur = xfs_rtrmapbt_init_cursor(tp, rtg);
error = xfs_rmap_query_range(cur, &ri_low, &ri_high,
@@ -517,8 +530,7 @@ xfs_zone_gc_steal_open(
spin_lock(&zi->zi_open_zones_lock);
list_for_each_entry(oz, &zi->zi_open_zones, oz_entry) {
- if (!found ||
- oz->oz_write_pointer < found->oz_write_pointer)
+ if (!found || oz->oz_allocated < found->oz_allocated)
found = oz;
}
@@ -568,7 +580,7 @@ xfs_zone_gc_ensure_target(
{
struct xfs_open_zone *oz = mp->m_zone_info->zi_open_gc_zone;
- if (!oz || oz->oz_write_pointer == rtg_blocks(oz->oz_rtg))
+ if (!oz || oz->oz_allocated == rtg_blocks(oz->oz_rtg))
return xfs_zone_gc_select_target(mp);
return oz;
}
@@ -589,7 +601,7 @@ xfs_zone_gc_space_available(
oz = xfs_zone_gc_ensure_target(data->mp);
if (!oz)
return false;
- return oz->oz_write_pointer < rtg_blocks(oz->oz_rtg) &&
+ return oz->oz_allocated < rtg_blocks(oz->oz_rtg) &&
xfs_zone_gc_scratch_available(data);
}
@@ -631,7 +643,7 @@ xfs_zone_gc_alloc_blocks(
*/
spin_lock(&mp->m_sb_lock);
*count_fsb = min(*count_fsb,
- rtg_blocks(oz->oz_rtg) - oz->oz_write_pointer);
+ rtg_blocks(oz->oz_rtg) - oz->oz_allocated);
*count_fsb = min3(*count_fsb,
mp->m_free[XC_FREE_RTEXTENTS].res_avail,
mp->m_free[XC_FREE_RTAVAILABLE].res_avail);
@@ -645,8 +657,8 @@ xfs_zone_gc_alloc_blocks(
*daddr = xfs_gbno_to_daddr(&oz->oz_rtg->rtg_group, 0);
*is_seq = bdev_zone_is_seq(mp->m_rtdev_targp->bt_bdev, *daddr);
if (!*is_seq)
- *daddr += XFS_FSB_TO_BB(mp, oz->oz_write_pointer);
- oz->oz_write_pointer += *count_fsb;
+ *daddr += XFS_FSB_TO_BB(mp, oz->oz_allocated);
+ oz->oz_allocated += *count_fsb;
atomic_inc(&oz->oz_ref);
return oz;
}
@@ -789,7 +801,8 @@ xfs_zone_gc_write_chunk(
{
struct xfs_zone_gc_data *data = chunk->data;
struct xfs_mount *mp = chunk->ip->i_mount;
- unsigned int folio_offset = chunk->bio.bi_io_vec->bv_offset;
+ phys_addr_t bvec_paddr =
+ bvec_phys(bio_first_bvec_all(&chunk->bio));
struct xfs_gc_bio *split_chunk;
if (chunk->bio.bi_status)
@@ -804,7 +817,7 @@ xfs_zone_gc_write_chunk(
bio_reset(&chunk->bio, mp->m_rtdev_targp->bt_bdev, REQ_OP_WRITE);
bio_add_folio_nofail(&chunk->bio, chunk->scratch->folio, chunk->len,
- folio_offset);
+ offset_in_folio(chunk->scratch->folio, bvec_paddr));
while ((split_chunk = xfs_zone_gc_split_write(data, chunk)))
xfs_zone_gc_submit_write(data, split_chunk);