summaryrefslogtreecommitdiff
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c50
1 files changed, 25 insertions, 25 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index b2eeee236e80..8b4bef05e222 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -909,7 +909,7 @@ static int attach_extent_buffer_folio(struct extent_buffer *eb,
* will not race with any other ebs.
*/
if (folio->mapping)
- lockdep_assert_held(&folio->mapping->private_lock);
+ lockdep_assert_held(&folio->mapping->i_private_lock);
if (fs_info->nodesize >= PAGE_SIZE) {
if (!folio_test_private(folio))
@@ -1784,16 +1784,16 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
* Take private lock to ensure the subpage won't be detached
* in the meantime.
*/
- spin_lock(&page->mapping->private_lock);
+ spin_lock(&page->mapping->i_private_lock);
if (!folio_test_private(folio)) {
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
break;
}
spin_lock_irqsave(&subpage->lock, flags);
if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
subpage->bitmaps)) {
spin_unlock_irqrestore(&subpage->lock, flags);
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
bit_start++;
continue;
}
@@ -1807,7 +1807,7 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
*/
eb = find_extent_buffer_nolock(fs_info, start);
spin_unlock_irqrestore(&subpage->lock, flags);
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
/*
* The eb has already reached 0 refs thus find_extent_buffer()
@@ -1860,9 +1860,9 @@ static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
return submit_eb_subpage(page, wbc);
- spin_lock(&mapping->private_lock);
+ spin_lock(&mapping->i_private_lock);
if (!folio_test_private(folio)) {
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
return 0;
}
@@ -1873,16 +1873,16 @@ static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
* crashing the machine for something we can survive anyway.
*/
if (WARN_ON(!eb)) {
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
return 0;
}
if (eb == ctx->eb) {
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
return 0;
}
ret = atomic_inc_not_zero(&eb->refs);
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
if (!ret)
return 0;
@@ -3218,7 +3218,7 @@ static bool folio_range_has_eb(struct btrfs_fs_info *fs_info, struct folio *foli
{
struct btrfs_subpage *subpage;
- lockdep_assert_held(&folio->mapping->private_lock);
+ lockdep_assert_held(&folio->mapping->i_private_lock);
if (folio_test_private(folio)) {
subpage = folio_get_private(folio);
@@ -3241,14 +3241,14 @@ static void detach_extent_buffer_folio(struct extent_buffer *eb, struct folio *f
/*
* For mapped eb, we're going to change the folio private, which should
- * be done under the private_lock.
+ * be done under the i_private_lock.
*/
if (mapped)
- spin_lock(&folio->mapping->private_lock);
+ spin_lock(&folio->mapping->i_private_lock);
if (!folio_test_private(folio)) {
if (mapped)
- spin_unlock(&folio->mapping->private_lock);
+ spin_unlock(&folio->mapping->i_private_lock);
return;
}
@@ -3268,7 +3268,7 @@ static void detach_extent_buffer_folio(struct extent_buffer *eb, struct folio *f
folio_detach_private(folio);
}
if (mapped)
- spin_unlock(&folio->mapping->private_lock);
+ spin_unlock(&folio->mapping->i_private_lock);
return;
}
@@ -3291,7 +3291,7 @@ static void detach_extent_buffer_folio(struct extent_buffer *eb, struct folio *f
if (!folio_range_has_eb(fs_info, folio))
btrfs_detach_subpage(fs_info, folio);
- spin_unlock(&folio->mapping->private_lock);
+ spin_unlock(&folio->mapping->i_private_lock);
}
/* Release all pages attached to the extent buffer */
@@ -3726,7 +3726,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
/*
* Preallocate folio private for subpage case, so that we won't
- * allocate memory with private_lock nor page lock hold.
+ * allocate memory with i_private_lock nor page lock hold.
*
* The memory will be freed by attach_extent_buffer_page() or freed
* manually if we exit earlier.
@@ -3789,7 +3789,7 @@ reallocate:
* and free the allocated page.
*/
folio = eb->folios[i];
- spin_lock(&mapping->private_lock);
+ spin_lock(&mapping->i_private_lock);
/* Should not fail, as we have preallocated the memory */
ret = attach_extent_buffer_folio(eb, folio, prealloc);
ASSERT(!ret);
@@ -3803,7 +3803,7 @@ reallocate:
* Thus needs no special handling in error path.
*/
btrfs_folio_inc_eb_refs(fs_info, folio);
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
@@ -4895,12 +4895,12 @@ static int try_release_subpage_extent_buffer(struct page *page)
* Finally to check if we have cleared folio private, as if we have
* released all ebs in the page, the folio private should be cleared now.
*/
- spin_lock(&page->mapping->private_lock);
+ spin_lock(&page->mapping->i_private_lock);
if (!folio_test_private(page_folio(page)))
ret = 1;
else
ret = 0;
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
return ret;
}
@@ -4917,9 +4917,9 @@ int try_release_extent_buffer(struct page *page)
* We need to make sure nobody is changing folio private, as we rely on
* folio private as the pointer to extent buffer.
*/
- spin_lock(&page->mapping->private_lock);
+ spin_lock(&page->mapping->i_private_lock);
if (!folio_test_private(folio)) {
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
return 1;
}
@@ -4934,10 +4934,10 @@ int try_release_extent_buffer(struct page *page)
spin_lock(&eb->refs_lock);
if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
spin_unlock(&eb->refs_lock);
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
return 0;
}
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
/*
* If tree ref isn't set then we know the ref on this eb is a real ref,