summaryrefslogtreecommitdiff
path: root/fs/gfs2/aops.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/gfs2/aops.c')
-rw-r--r--fs/gfs2/aops.c72
1 files changed, 34 insertions, 38 deletions
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 6b060fc9e260..9611bfceda4b 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -155,7 +155,7 @@ static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
+ if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE))
goto out;
if (folio_test_checked(folio) || current->journal_info)
goto out_ignore;
@@ -214,12 +214,12 @@ static int gfs2_write_jdata_batch(struct address_space *mapping,
unsigned nrblocks;
int i;
int ret;
- int nr_pages = 0;
+ size_t size = 0;
int nr_folios = folio_batch_count(fbatch);
for (i = 0; i < nr_folios; i++)
- nr_pages += folio_nr_pages(fbatch->folios[i]);
- nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
+ size += folio_size(fbatch->folios[i]);
+ nrblocks = size >> inode->i_blkbits;
ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
if (ret < 0)
@@ -403,27 +403,27 @@ static int gfs2_jdata_writepages(struct address_space *mapping,
}
/**
- * stuffed_readpage - Fill in a Linux page with stuffed file data
+ * stuffed_readpage - Fill in a Linux folio with stuffed file data
* @ip: the inode
- * @page: the page
+ * @folio: the folio
*
* Returns: errno
*/
-static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
+static int stuffed_readpage(struct gfs2_inode *ip, struct folio *folio)
{
struct buffer_head *dibh;
- u64 dsize = i_size_read(&ip->i_inode);
- void *kaddr;
+ size_t i_size = i_size_read(&ip->i_inode);
+ void *data;
int error;
/*
* Due to the order of unstuffing files and ->fault(), we can be
- * asked for a zero page in the case of a stuffed file being extended,
+ * asked for a zero folio in the case of a stuffed file being extended,
* so we need to supply one here. It doesn't happen often.
*/
- if (unlikely(page->index)) {
- zero_user(page, 0, PAGE_SIZE);
- SetPageUptodate(page);
+ if (unlikely(folio->index)) {
+ folio_zero_range(folio, 0, folio_size(folio));
+ folio_mark_uptodate(folio);
return 0;
}
@@ -431,13 +431,11 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
if (error)
return error;
- kaddr = kmap_local_page(page);
- memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
- memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
- kunmap_local(kaddr);
- flush_dcache_page(page);
+ data = dibh->b_data + sizeof(struct gfs2_dinode);
+ memcpy_to_folio(folio, 0, data, i_size);
+ folio_zero_range(folio, i_size, folio_size(folio) - i_size);
brelse(dibh);
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
return 0;
}
@@ -458,7 +456,7 @@ static int gfs2_read_folio(struct file *file, struct folio *folio)
(i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
error = iomap_read_folio(folio, &gfs2_iomap_ops);
} else if (gfs2_is_stuffed(ip)) {
- error = stuffed_readpage(ip, &folio->page);
+ error = stuffed_readpage(ip, folio);
folio_unlock(folio);
} else {
error = mpage_read_folio(folio, gfs2_block_map);
@@ -479,31 +477,29 @@ static int gfs2_read_folio(struct file *file, struct folio *folio)
*
*/
-int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
- unsigned size)
+ssize_t gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
+ size_t size)
{
struct address_space *mapping = ip->i_inode.i_mapping;
unsigned long index = *pos >> PAGE_SHIFT;
- unsigned offset = *pos & (PAGE_SIZE - 1);
- unsigned copied = 0;
- unsigned amt;
- struct page *page;
+ size_t copied = 0;
do {
- page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
- if (IS_ERR(page)) {
- if (PTR_ERR(page) == -EINTR)
+ size_t offset, chunk;
+ struct folio *folio;
+
+ folio = read_cache_folio(mapping, index, gfs2_read_folio, NULL);
+ if (IS_ERR(folio)) {
+ if (PTR_ERR(folio) == -EINTR)
continue;
- return PTR_ERR(page);
+ return PTR_ERR(folio);
}
- amt = size - copied;
- if (offset + size > PAGE_SIZE)
- amt = PAGE_SIZE - offset;
- memcpy_from_page(buf + copied, page, offset, amt);
- put_page(page);
- copied += amt;
- index++;
- offset = 0;
+ offset = *pos + copied - folio_pos(folio);
+ chunk = min(size - copied, folio_size(folio) - offset);
+ memcpy_from_folio(buf + copied, folio, offset, chunk);
+ index = folio_next_index(folio);
+ folio_put(folio);
+ copied += chunk;
} while(copied < size);
(*pos) += size;
return size;