diff options
Diffstat (limited to 'fs/ubifs/file.c')
| -rw-r--r-- | fs/ubifs/file.c | 752 |
1 files changed, 363 insertions, 389 deletions
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 5d2ffb1a45fc..c3265b8804f5 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -1,21 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 51 - * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * * Authors: Artem Bityutskiy (Битюцкий Артём) * Adrian Hunter */ @@ -43,9 +31,9 @@ * in the "sys_write -> alloc_pages -> direct reclaim path". So, in * 'ubifs_writepage()' we are only guaranteed that the page is locked. * - * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the + * Similarly, @i_mutex is not always locked in 'ubifs_read_folio()', e.g., the * read-ahead path does not lock it ("sys_read -> generic_file_aio_read -> - * ondemand_readahead -> readpage"). In case of readahead, @I_SYNC flag is not + * ondemand_readahead -> read_folio"). In case of readahead, @I_SYNC flag is not * set as well. However, UBIFS disables readahead. */ @@ -54,8 +42,8 @@ #include <linux/slab.h> #include <linux/migrate.h> -static int read_block(struct inode *inode, void *addr, unsigned int block, - struct ubifs_data_node *dn) +static int read_block(struct inode *inode, struct folio *folio, size_t offset, + unsigned int block, struct ubifs_data_node *dn) { struct ubifs_info *c = inode->i_sb->s_fs_info; int err, len, out_len; @@ -67,7 +55,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block, if (err) { if (err == -ENOENT) /* Not found, so it must be a hole */ - memset(addr, 0, UBIFS_BLOCK_SIZE); + folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE); return err; } @@ -79,15 +67,15 @@ static int read_block(struct inode *inode, void *addr, unsigned int block, dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; - if (ubifs_crypt_is_encrypted(inode)) { + if (IS_ENCRYPTED(inode)) { err = ubifs_decrypt(inode, dn, &dlen, block); if (err) goto dump; } out_len = UBIFS_BLOCK_SIZE; - err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len, - le16_to_cpu(dn->compr_type)); + err = ubifs_decompress_folio(c, &dn->data, dlen, folio, offset, + &out_len, le16_to_cpu(dn->compr_type)); if (err || len != out_len) goto dump; @@ -97,47 +85,45 @@ static int read_block(struct inode *inode, void *addr, unsigned int block, * appending data). Ensure that the remainder is zeroed out. */ if (len < UBIFS_BLOCK_SIZE) - memset(addr + len, 0, UBIFS_BLOCK_SIZE - len); + folio_zero_range(folio, offset + len, UBIFS_BLOCK_SIZE - len); return 0; dump: ubifs_err(c, "bad data node (block %u, inode %lu)", block, inode->i_ino); - ubifs_dump_node(c, dn); + ubifs_dump_node(c, dn, UBIFS_MAX_DATA_NODE_SZ); return -EINVAL; } -static int do_readpage(struct page *page) +static int do_readpage(struct folio *folio) { - void *addr; int err = 0, i; unsigned int block, beyond; - struct ubifs_data_node *dn; - struct inode *inode = page->mapping->host; + struct ubifs_data_node *dn = NULL; + struct inode *inode = folio->mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; loff_t i_size = i_size_read(inode); + size_t offset = 0; dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx", - inode->i_ino, page->index, i_size, page->flags); - ubifs_assert(c, !PageChecked(page)); - ubifs_assert(c, !PagePrivate(page)); + inode->i_ino, folio->index, i_size, folio->flags.f); + ubifs_assert(c, !folio_test_checked(folio)); + ubifs_assert(c, !folio->private); - addr = kmap(page); - - block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; + block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT; if (block >= beyond) { /* Reading beyond inode */ - SetPageChecked(page); - memset(addr, 0, PAGE_SIZE); + folio_set_checked(folio); + folio_zero_range(folio, 0, folio_size(folio)); goto out; } dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS); if (!dn) { err = -ENOMEM; - goto error; + goto out; } i = 0; @@ -147,9 +133,9 @@ static int do_readpage(struct page *page) if (block >= beyond) { /* Reading beyond inode */ err = -ENOENT; - memset(addr, 0, UBIFS_BLOCK_SIZE); + folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE); } else { - ret = read_block(inode, addr, block, dn); + ret = read_block(inode, folio, offset, block, dn); if (ret) { err = ret; if (err != -ENOENT) @@ -159,42 +145,32 @@ static int do_readpage(struct page *page) int ilen = i_size & (UBIFS_BLOCK_SIZE - 1); if (ilen && ilen < dlen) - memset(addr + ilen, 0, dlen - ilen); + folio_zero_range(folio, offset + ilen, dlen - ilen); } } - if (++i >= UBIFS_BLOCKS_PER_PAGE) + if (++i >= (UBIFS_BLOCKS_PER_PAGE << folio_order(folio))) break; block += 1; - addr += UBIFS_BLOCK_SIZE; + offset += UBIFS_BLOCK_SIZE; } + if (err) { struct ubifs_info *c = inode->i_sb->s_fs_info; if (err == -ENOENT) { /* Not found, so it must be a hole */ - SetPageChecked(page); + folio_set_checked(folio); dbg_gen("hole"); - goto out_free; + err = 0; + } else { + ubifs_err(c, "cannot read page %lu of inode %lu, error %d", + folio->index, inode->i_ino, err); } - ubifs_err(c, "cannot read page %lu of inode %lu, error %d", - page->index, inode->i_ino, err); - goto error; } -out_free: - kfree(dn); out: - SetPageUptodate(page); - ClearPageError(page); - flush_dcache_page(page); - kunmap(page); - return 0; - -error: kfree(dn); - ClearPageUptodate(page); - SetPageError(page); - flush_dcache_page(page); - kunmap(page); + if (!err) + folio_mark_uptodate(folio); return err; } @@ -217,7 +193,7 @@ static void release_new_page_budget(struct ubifs_info *c) * @c: UBIFS file-system description object * * This is a helper function which releases budget corresponding to the budget - * of changing one one page of data which already exists on the flash media. + * of changing one page of data which already exists on the flash media. */ static void release_existing_page_budget(struct ubifs_info *c) { @@ -227,24 +203,23 @@ static void release_existing_page_budget(struct ubifs_info *c) } static int write_begin_slow(struct address_space *mapping, - loff_t pos, unsigned len, struct page **pagep, - unsigned flags) + loff_t pos, unsigned len, struct folio **foliop) { struct inode *inode = mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; pgoff_t index = pos >> PAGE_SHIFT; struct ubifs_budget_req req = { .new_page = 1 }; - int uninitialized_var(err), appending = !!(pos + len > inode->i_size); - struct page *page; + int err, appending = !!(pos + len > inode->i_size); + struct folio *folio; dbg_gen("ino %lu, pos %llu, len %u, i_size %lld", inode->i_ino, pos, len, inode->i_size); /* - * At the slow path we have to budget before locking the page, because - * budgeting may force write-back, which would wait on locked pages and - * deadlock if we had the page locked. At this point we do not know - * anything about the page, so assume that this is a new page which is + * At the slow path we have to budget before locking the folio, because + * budgeting may force write-back, which would wait on locked folios and + * deadlock if we had the folio locked. At this point we do not know + * anything about the folio, so assume that this is a new folio which is * written to a hole. This corresponds to largest budget. Later the * budget will be amended if this is not true. */ @@ -256,45 +231,43 @@ static int write_begin_slow(struct address_space *mapping, if (unlikely(err)) return err; - page = grab_cache_page_write_begin(mapping, index, flags); - if (unlikely(!page)) { + folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, + mapping_gfp_mask(mapping)); + if (IS_ERR(folio)) { ubifs_release_budget(c, &req); - return -ENOMEM; + return PTR_ERR(folio); } - if (!PageUptodate(page)) { - if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) - SetPageChecked(page); + if (!folio_test_uptodate(folio)) { + if (pos == folio_pos(folio) && len >= folio_size(folio)) + folio_set_checked(folio); else { - err = do_readpage(page); + err = do_readpage(folio); if (err) { - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); ubifs_release_budget(c, &req); return err; } } - - SetPageUptodate(page); - ClearPageError(page); } - if (PagePrivate(page)) + if (folio->private) /* - * The page is dirty, which means it was budgeted twice: + * The folio is dirty, which means it was budgeted twice: * o first time the budget was allocated by the task which - * made the page dirty and set the PG_private flag; + * made the folio dirty and set the private field; * o and then we budgeted for it for the second time at the * very beginning of this function. * - * So what we have to do is to release the page budget we + * So what we have to do is to release the folio budget we * allocated. */ release_new_page_budget(c); - else if (!PageChecked(page)) + else if (!folio_test_checked(folio)) /* - * We are changing a page which already exists on the media. - * This means that changing the page does not make the amount + * We are changing a folio which already exists on the media. + * This means that changing the folio does not make the amount * of indexing information larger, and this part of the budget * which we have already acquired may be released. */ @@ -317,32 +290,33 @@ static int write_begin_slow(struct address_space *mapping, ubifs_release_dirty_inode_budget(c, ui); } - *pagep = page; + *foliop = folio; return 0; } /** * allocate_budget - allocate budget for 'ubifs_write_begin()'. * @c: UBIFS file-system description object - * @page: page to allocate budget for + * @folio: folio to allocate budget for * @ui: UBIFS inode object the page belongs to * @appending: non-zero if the page is appended * * This is a helper function for 'ubifs_write_begin()' which allocates budget * for the operation. The budget is allocated differently depending on whether * this is appending, whether the page is dirty or not, and so on. This - * function leaves the @ui->ui_mutex locked in case of appending. Returns zero - * in case of success and %-ENOSPC in case of failure. + * function leaves the @ui->ui_mutex locked in case of appending. + * + * Returns: %0 in case of success and %-ENOSPC in case of failure. */ -static int allocate_budget(struct ubifs_info *c, struct page *page, +static int allocate_budget(struct ubifs_info *c, struct folio *folio, struct ubifs_inode *ui, int appending) { struct ubifs_budget_req req = { .fast = 1 }; - if (PagePrivate(page)) { + if (folio->private) { if (!appending) /* - * The page is dirty and we are not appending, which + * The folio is dirty and we are not appending, which * means no budget is needed at all. */ return 0; @@ -366,11 +340,11 @@ static int allocate_budget(struct ubifs_info *c, struct page *page, */ req.dirtied_ino = 1; } else { - if (PageChecked(page)) + if (folio_test_checked(folio)) /* * The page corresponds to a hole and does not * exist on the media. So changing it makes - * make the amount of indexing information + * the amount of indexing information * larger, and we have to budget for a new * page. */ @@ -430,17 +404,18 @@ static int allocate_budget(struct ubifs_info *c, struct page *page, * there is a plenty of flash space and the budget will be acquired quickly, * without forcing write-back. The slow path does not make this assumption. */ -static int ubifs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata) +static int ubifs_write_begin(const struct kiocb *iocb, + struct address_space *mapping, + loff_t pos, unsigned len, + struct folio **foliop, void **fsdata) { struct inode *inode = mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_inode *ui = ubifs_inode(inode); pgoff_t index = pos >> PAGE_SHIFT; - int uninitialized_var(err), appending = !!(pos + len > inode->i_size); + int err, appending = !!(pos + len > inode->i_size); int skipped_read = 0; - struct page *page; + struct folio *folio; ubifs_assert(c, ubifs_inode(inode)->ui_size == inode->i_size); ubifs_assert(c, !c->ro_media && !c->ro_mount); @@ -449,13 +424,14 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, return -EROFS; /* Try out the fast-path part first */ - page = grab_cache_page_write_begin(mapping, index, flags); - if (unlikely(!page)) - return -ENOMEM; + folio = __filemap_get_folio(mapping, index, FGP_WRITEBEGIN, + mapping_gfp_mask(mapping)); + if (IS_ERR(folio)) + return PTR_ERR(folio); - if (!PageUptodate(page)) { + if (!folio_test_uptodate(folio)) { /* The page is not loaded from the flash */ - if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) { + if (pos == folio_pos(folio) && len >= folio_size(folio)) { /* * We change whole page so no need to load it. But we * do not know whether this page exists on the media or @@ -465,32 +441,27 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, * media. Thus, we are setting the @PG_checked flag * here. */ - SetPageChecked(page); + folio_set_checked(folio); skipped_read = 1; } else { - err = do_readpage(page); + err = do_readpage(folio); if (err) { - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); return err; } } - - SetPageUptodate(page); - ClearPageError(page); } - err = allocate_budget(c, page, ui, appending); + err = allocate_budget(c, folio, ui, appending); if (unlikely(err)) { ubifs_assert(c, err == -ENOSPC); /* * If we skipped reading the page because we were going to * write all of it, then it is not up to date. */ - if (skipped_read) { - ClearPageChecked(page); - ClearPageUptodate(page); - } + if (skipped_read) + folio_clear_checked(folio); /* * Budgeting failed which means it would have to force * write-back but didn't, because we set the @fast flag in the @@ -502,10 +473,10 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, ubifs_assert(c, mutex_is_locked(&ui->ui_mutex)); mutex_unlock(&ui->ui_mutex); } - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); - return write_begin_slow(mapping, pos, len, pagep, flags); + return write_begin_slow(mapping, pos, len, foliop); } /* @@ -514,22 +485,21 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, * with @ui->ui_mutex locked if we are appending pages, and unlocked * otherwise. This is an optimization (slightly hacky though). */ - *pagep = page; + *foliop = folio; return 0; - } /** * cancel_budget - cancel budget. * @c: UBIFS file-system description object - * @page: page to cancel budget for + * @folio: folio to cancel budget for * @ui: UBIFS inode object the page belongs to * @appending: non-zero if the page is appended * * This is a helper function for a page write operation. It unlocks the * @ui->ui_mutex in case of appending. */ -static void cancel_budget(struct ubifs_info *c, struct page *page, +static void cancel_budget(struct ubifs_info *c, struct folio *folio, struct ubifs_inode *ui, int appending) { if (appending) { @@ -537,17 +507,18 @@ static void cancel_budget(struct ubifs_info *c, struct page *page, ubifs_release_dirty_inode_budget(c, ui); mutex_unlock(&ui->ui_mutex); } - if (!PagePrivate(page)) { - if (PageChecked(page)) + if (!folio->private) { + if (folio_test_checked(folio)) release_new_page_budget(c); else release_existing_page_budget(c); } } -static int ubifs_write_end(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata) +static int ubifs_write_end(const struct kiocb *iocb, + struct address_space *mapping, loff_t pos, + unsigned len, unsigned copied, + struct folio *folio, void *fsdata) { struct inode *inode = mapping->host; struct ubifs_inode *ui = ubifs_inode(inode); @@ -556,44 +527,47 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping, int appending = !!(end_pos > inode->i_size); dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld", - inode->i_ino, pos, page->index, len, copied, inode->i_size); + inode->i_ino, pos, folio->index, len, copied, inode->i_size); - if (unlikely(copied < len && len == PAGE_SIZE)) { + if (unlikely(copied < len && !folio_test_uptodate(folio))) { /* - * VFS copied less data to the page that it intended and + * VFS copied less data to the folio than it intended and * declared in its '->write_begin()' call via the @len - * argument. If the page was not up-to-date, and @len was - * @PAGE_SIZE, the 'ubifs_write_begin()' function did + * argument. If the folio was not up-to-date, + * the 'ubifs_write_begin()' function did * not load it from the media (for optimization reasons). This - * means that part of the page contains garbage. So read the - * page now. + * means that part of the folio contains garbage. So read the + * folio now. */ dbg_gen("copied %d instead of %d, read page and repeat", copied, len); - cancel_budget(c, page, ui, appending); - ClearPageChecked(page); + cancel_budget(c, folio, ui, appending); + folio_clear_checked(folio); /* * Return 0 to force VFS to repeat the whole operation, or the * error code if 'do_readpage()' fails. */ - copied = do_readpage(page); + copied = do_readpage(folio); goto out; } - if (!PagePrivate(page)) { - SetPagePrivate(page); + if (len == folio_size(folio)) + folio_mark_uptodate(folio); + + if (!folio->private) { + folio_attach_private(folio, (void *)1); atomic_long_inc(&c->dirty_pg_cnt); - __set_page_dirty_nobuffers(page); + filemap_dirty_folio(mapping, folio); } if (appending) { i_size_write(inode, end_pos); ui->ui_size = end_pos; /* - * Note, we do not set @I_DIRTY_PAGES (which means that the - * inode has dirty pages), this has been done in - * '__set_page_dirty_nobuffers()'. + * We do not set @I_DIRTY_PAGES (which means that + * the inode has dirty pages), this was done in + * filemap_dirty_folio(). */ __mark_inode_dirty(inode, I_DIRTY_DATASYNC); ubifs_assert(c, mutex_is_locked(&ui->ui_mutex)); @@ -601,49 +575,47 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping, } out: - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); return copied; } /** * populate_page - copy data nodes into a page for bulk-read. * @c: UBIFS file-system description object - * @page: page + * @folio: folio * @bu: bulk-read information * @n: next zbranch slot * - * This function returns %0 on success and a negative error code on failure. + * Returns: %0 on success and a negative error code on failure. */ -static int populate_page(struct ubifs_info *c, struct page *page, +static int populate_page(struct ubifs_info *c, struct folio *folio, struct bu_info *bu, int *n) { int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0; - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; loff_t i_size = i_size_read(inode); unsigned int page_block; - void *addr, *zaddr; + size_t offset = 0; pgoff_t end_index; dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx", - inode->i_ino, page->index, i_size, page->flags); - - addr = zaddr = kmap(page); + inode->i_ino, folio->index, i_size, folio->flags.f); end_index = (i_size - 1) >> PAGE_SHIFT; - if (!i_size || page->index > end_index) { + if (!i_size || folio->index > end_index) { hole = 1; - memset(addr, 0, PAGE_SIZE); + folio_zero_range(folio, 0, folio_size(folio)); goto out_hole; } - page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; + page_block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; while (1) { int err, len, out_len, dlen; if (nn >= bu->cnt) { hole = 1; - memset(addr, 0, UBIFS_BLOCK_SIZE); + folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE); } else if (key_block(c, &bu->zbranch[nn].key) == page_block) { struct ubifs_data_node *dn; @@ -659,19 +631,21 @@ static int populate_page(struct ubifs_info *c, struct page *page, dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; out_len = UBIFS_BLOCK_SIZE; - if (ubifs_crypt_is_encrypted(inode)) { + if (IS_ENCRYPTED(inode)) { err = ubifs_decrypt(inode, dn, &dlen, page_block); if (err) goto out_err; } - err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len, - le16_to_cpu(dn->compr_type)); + err = ubifs_decompress_folio( + c, &dn->data, dlen, folio, offset, &out_len, + le16_to_cpu(dn->compr_type)); if (err || len != out_len) goto out_err; if (len < UBIFS_BLOCK_SIZE) - memset(addr + len, 0, UBIFS_BLOCK_SIZE - len); + folio_zero_range(folio, offset + len, + UBIFS_BLOCK_SIZE - len); nn += 1; read = (i << UBIFS_BLOCK_SHIFT) + len; @@ -680,39 +654,32 @@ static int populate_page(struct ubifs_info *c, struct page *page, continue; } else { hole = 1; - memset(addr, 0, UBIFS_BLOCK_SIZE); + folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE); } if (++i >= UBIFS_BLOCKS_PER_PAGE) break; - addr += UBIFS_BLOCK_SIZE; + offset += UBIFS_BLOCK_SIZE; page_block += 1; } - if (end_index == page->index) { + if (end_index == folio->index) { int len = i_size & (PAGE_SIZE - 1); if (len && len < read) - memset(zaddr + len, 0, read - len); + folio_zero_range(folio, len, read - len); } out_hole: if (hole) { - SetPageChecked(page); + folio_set_checked(folio); dbg_gen("hole"); } - SetPageUptodate(page); - ClearPageError(page); - flush_dcache_page(page); - kunmap(page); + folio_mark_uptodate(folio); *n = nn; return 0; out_err: - ClearPageUptodate(page); - SetPageError(page); - flush_dcache_page(page); - kunmap(page); ubifs_err(c, "bad data node (block %u, inode %lu)", page_block, inode->i_ino); return -EINVAL; @@ -722,15 +689,15 @@ out_err: * ubifs_do_bulk_read - do bulk-read. * @c: UBIFS file-system description object * @bu: bulk-read information - * @page1: first page to read + * @folio1: first folio to read * - * This function returns %1 if the bulk-read is done, otherwise %0 is returned. + * Returns: %1 if the bulk-read is done, otherwise %0 is returned. */ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu, - struct page *page1) + struct folio *folio1) { - pgoff_t offset = page1->index, end_index; - struct address_space *mapping = page1->mapping; + pgoff_t offset = folio1->index, end_index; + struct address_space *mapping = folio1->mapping; struct inode *inode = mapping->host; struct ubifs_inode *ui = ubifs_inode(inode); int err, page_idx, page_cnt, ret = 0, n = 0; @@ -780,11 +747,11 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu, goto out_warn; } - err = populate_page(c, page1, bu, &n); + err = populate_page(c, folio1, bu, &n); if (err) goto out_warn; - unlock_page(page1); + folio_unlock(folio1); ret = 1; isize = i_size_read(inode); @@ -794,17 +761,19 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu, for (page_idx = 1; page_idx < page_cnt; page_idx++) { pgoff_t page_offset = offset + page_idx; - struct page *page; + struct folio *folio; if (page_offset > end_index) break; - page = find_or_create_page(mapping, page_offset, ra_gfp_mask); - if (!page) + folio = __filemap_get_folio(mapping, page_offset, + FGP_LOCK|FGP_ACCESSED|FGP_CREAT|FGP_NOWAIT, + ra_gfp_mask); + if (IS_ERR(folio)) break; - if (!PageUptodate(page)) - err = populate_page(c, page, bu, &n); - unlock_page(page); - put_page(page); + if (!folio_test_uptodate(folio)) + err = populate_page(c, folio, bu, &n); + folio_unlock(folio); + folio_put(folio); if (err) break; } @@ -827,19 +796,21 @@ out_bu_off: /** * ubifs_bulk_read - determine whether to bulk-read and, if so, do it. - * @page: page from which to start bulk-read. + * @folio: folio from which to start bulk-read. * * Some flash media are capable of reading sequentially at faster rates. UBIFS * bulk-read facility is designed to take advantage of that, by reading in one * go consecutive data nodes that are also located consecutively in the same - * LEB. This function returns %1 if a bulk-read is done and %0 otherwise. + * LEB. + * + * Returns: %1 if a bulk-read is done and %0 otherwise. */ -static int ubifs_bulk_read(struct page *page) +static int ubifs_bulk_read(struct folio *folio) { - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_inode *ui = ubifs_inode(inode); - pgoff_t index = page->index, last_page_read = ui->last_page_read; + pgoff_t index = folio->index, last_page_read = ui->last_page_read; struct bu_info *bu; int err = 0, allocated = 0; @@ -887,8 +858,8 @@ static int ubifs_bulk_read(struct page *page) bu->buf_len = c->max_bu_buf_len; data_key_init(c, &bu->key, inode->i_ino, - page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT); - err = ubifs_do_bulk_read(c, bu, page); + folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT); + err = ubifs_do_bulk_read(c, bu, folio); if (!allocated) mutex_unlock(&c->bu_mutex); @@ -900,69 +871,65 @@ out_unlock: return err; } -static int ubifs_readpage(struct file *file, struct page *page) +static int ubifs_read_folio(struct file *file, struct folio *folio) { - if (ubifs_bulk_read(page)) + if (ubifs_bulk_read(folio)) return 0; - do_readpage(page); - unlock_page(page); + do_readpage(folio); + folio_unlock(folio); return 0; } -static int do_writepage(struct page *page, int len) +static int do_writepage(struct folio *folio, size_t len) { - int err = 0, i, blen; + int err = 0, blen; unsigned int block; - void *addr; + size_t offset = 0; union ubifs_key key; - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; #ifdef UBIFS_DEBUG struct ubifs_inode *ui = ubifs_inode(inode); spin_lock(&ui->ui_lock); - ubifs_assert(c, page->index <= ui->synced_i_size >> PAGE_SHIFT); + ubifs_assert(c, folio->index <= ui->synced_i_size >> PAGE_SHIFT); spin_unlock(&ui->ui_lock); #endif - /* Update radix tree tags */ - set_page_writeback(page); + folio_start_writeback(folio); - addr = kmap(page); - block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; - i = 0; - while (len) { - blen = min_t(int, len, UBIFS_BLOCK_SIZE); + block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT; + for (;;) { + blen = min_t(size_t, len, UBIFS_BLOCK_SIZE); data_key_init(c, &key, inode->i_ino, block); - err = ubifs_jnl_write_data(c, inode, &key, addr, blen); + err = ubifs_jnl_write_data(c, inode, &key, folio, offset, blen); if (err) break; - if (++i >= UBIFS_BLOCKS_PER_PAGE) + len -= blen; + if (!len) break; block += 1; - addr += blen; - len -= blen; + offset += blen; } if (err) { - SetPageError(page); - ubifs_err(c, "cannot write page %lu of inode %lu, error %d", - page->index, inode->i_ino, err); + mapping_set_error(folio->mapping, err); + ubifs_err(c, "cannot write folio %lu of inode %lu, error %d", + folio->index, inode->i_ino, err); ubifs_ro_mode(c, err); } - ubifs_assert(c, PagePrivate(page)); - if (PageChecked(page)) + ubifs_assert(c, folio->private != NULL); + if (folio_test_checked(folio)) release_new_page_budget(c); else release_existing_page_budget(c); atomic_long_dec(&c->dirty_pg_cnt); - ClearPagePrivate(page); - ClearPageChecked(page); + folio_detach_private(folio); + folio_clear_checked(folio); - kunmap(page); - unlock_page(page); - end_page_writeback(page); + folio_unlock(folio); + folio_end_writeback(folio); return err; } @@ -1012,22 +979,20 @@ static int do_writepage(struct page *page, int len) * on the page lock and it would not write the truncated inode node to the * journal before we have finished. */ -static int ubifs_writepage(struct page *page, struct writeback_control *wbc) +static int ubifs_writepage(struct folio *folio, struct writeback_control *wbc) { - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_inode *ui = ubifs_inode(inode); loff_t i_size = i_size_read(inode), synced_i_size; - pgoff_t end_index = i_size >> PAGE_SHIFT; - int err, len = i_size & (PAGE_SIZE - 1); - void *kaddr; + int err, len = folio_size(folio); dbg_gen("ino %lu, pg %lu, pg flags %#lx", - inode->i_ino, page->index, page->flags); - ubifs_assert(c, PagePrivate(page)); + inode->i_ino, folio->index, folio->flags.f); + ubifs_assert(c, folio->private != NULL); - /* Is the page fully outside @i_size? (truncate in progress) */ - if (page->index > end_index || (page->index == end_index && !len)) { + /* Is the folio fully outside @i_size? (truncate in progress) */ + if (folio_pos(folio) >= i_size) { err = 0; goto out_unlock; } @@ -1036,12 +1001,12 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc) synced_i_size = ui->synced_i_size; spin_unlock(&ui->ui_lock); - /* Is the page fully inside @i_size? */ - if (page->index < end_index) { - if (page->index >= synced_i_size >> PAGE_SHIFT) { + /* Is the folio fully inside i_size? */ + if (folio_pos(folio) + len <= i_size) { + if (folio_pos(folio) + len > synced_i_size) { err = inode->i_sb->s_op->write_inode(inode, NULL); if (err) - goto out_unlock; + goto out_redirty; /* * The inode has been written, but the write-buffer has * not been synchronized, so in case of an unclean @@ -1051,34 +1016,49 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc) * with this. */ } - return do_writepage(page, PAGE_SIZE); + return do_writepage(folio, len); } /* - * The page straddles @i_size. It must be zeroed out on each and every + * The folio straddles @i_size. It must be zeroed out on each and every * writepage invocation because it may be mmapped. "A file is mapped * in multiples of the page size. For a file that is not a multiple of * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ - kaddr = kmap_atomic(page); - memset(kaddr + len, 0, PAGE_SIZE - len); - flush_dcache_page(page); - kunmap_atomic(kaddr); + len = i_size - folio_pos(folio); + folio_zero_segment(folio, len, folio_size(folio)); if (i_size > synced_i_size) { err = inode->i_sb->s_op->write_inode(inode, NULL); if (err) - goto out_unlock; + goto out_redirty; } - return do_writepage(page, len); - + return do_writepage(folio, len); +out_redirty: + /* + * folio_redirty_for_writepage() won't call ubifs_dirty_inode() because + * it passes I_DIRTY_PAGES flag while calling __mark_inode_dirty(), so + * there is no need to do space budget for dirty inode. + */ + folio_redirty_for_writepage(wbc, folio); out_unlock: - unlock_page(page); + folio_unlock(folio); return err; } +static int ubifs_writepages(struct address_space *mapping, + struct writeback_control *wbc) +{ + struct folio *folio = NULL; + int error; + + while ((folio = writeback_iter(mapping, wbc, folio, &error))) + error = ubifs_writepage(folio, wbc); + return error; +} + /** * do_attr_changes - change inode attributes. * @inode: inode to change attributes for @@ -1091,14 +1071,11 @@ static void do_attr_changes(struct inode *inode, const struct iattr *attr) if (attr->ia_valid & ATTR_GID) inode->i_gid = attr->ia_gid; if (attr->ia_valid & ATTR_ATIME) - inode->i_atime = timespec64_trunc(attr->ia_atime, - inode->i_sb->s_time_gran); + inode_set_atime_to_ts(inode, attr->ia_atime); if (attr->ia_valid & ATTR_MTIME) - inode->i_mtime = timespec64_trunc(attr->ia_mtime, - inode->i_sb->s_time_gran); + inode_set_mtime_to_ts(inode, attr->ia_mtime); if (attr->ia_valid & ATTR_CTIME) - inode->i_ctime = timespec64_trunc(attr->ia_ctime, - inode->i_sb->s_time_gran); + inode_set_ctime_to_ts(inode, attr->ia_ctime); if (attr->ia_valid & ATTR_MODE) { umode_t mode = attr->ia_mode; @@ -1115,7 +1092,9 @@ static void do_attr_changes(struct inode *inode, const struct iattr *attr) * @attr: inode attribute changes description * * This function implements VFS '->setattr()' call when the inode is truncated - * to a smaller size. Returns zero in case of success and a negative error code + * to a smaller size. + * + * Returns: %0 in case of success and a negative error code * in case of failure. */ static int do_truncation(struct ubifs_info *c, struct inode *inode, @@ -1156,11 +1135,11 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode, if (offset) { pgoff_t index = new_size >> PAGE_SHIFT; - struct page *page; + struct folio *folio; - page = find_lock_page(inode->i_mapping, index); - if (page) { - if (PageDirty(page)) { + folio = filemap_lock_folio(inode->i_mapping, index); + if (!IS_ERR(folio)) { + if (folio_test_dirty(folio)) { /* * 'ubifs_jnl_truncate()' will try to truncate * the last data node, but it contains @@ -1169,14 +1148,14 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode, * 'ubifs_jnl_truncate()' will see an already * truncated (and up to date) data node. */ - ubifs_assert(c, PagePrivate(page)); + ubifs_assert(c, folio->private != NULL); - clear_page_dirty_for_io(page); + folio_clear_dirty_for_io(folio); if (UBIFS_BLOCKS_PER_PAGE_SHIFT) - offset = new_size & - (PAGE_SIZE - 1); - err = do_writepage(page, offset); - put_page(page); + offset = offset_in_folio(folio, + new_size); + err = do_writepage(folio, offset); + folio_put(folio); if (err) goto out_budg; /* @@ -1189,8 +1168,8 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode, * to 'ubifs_jnl_truncate()' to save it from * having to read it. */ - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); } } } @@ -1198,7 +1177,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode, mutex_lock(&ui->ui_mutex); ui->ui_size = inode->i_size; /* Truncation changes inode [mc]time */ - inode->i_mtime = inode->i_ctime = current_time(inode); + inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); /* Other attributes may be changed at the same time as well */ do_attr_changes(inode, attr); err = ubifs_jnl_truncate(c, inode, old_size, new_size); @@ -1221,7 +1200,9 @@ out_budg: * @attr: inode attribute changes description * * This function implements VFS '->setattr()' call for all cases except - * truncations to smaller size. Returns zero in case of success and a negative + * truncations to smaller size. + * + * Returns: %0 in case of success and a negative * error code in case of failure. */ static int do_setattr(struct ubifs_info *c, struct inode *inode, @@ -1245,7 +1226,7 @@ static int do_setattr(struct ubifs_info *c, struct inode *inode, mutex_lock(&ui->ui_mutex); if (attr->ia_valid & ATTR_SIZE) { /* Truncation changes inode [mc]time */ - inode->i_mtime = inode->i_ctime = current_time(inode); + inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); /* 'truncate_setsize()' changed @i_size, update @ui_size */ ui->ui_size = inode->i_size; } @@ -1270,7 +1251,8 @@ static int do_setattr(struct ubifs_info *c, struct inode *inode, return err; } -int ubifs_setattr(struct dentry *dentry, struct iattr *attr) +int ubifs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, + struct iattr *attr) { int err; struct inode *inode = d_inode(dentry); @@ -1278,7 +1260,7 @@ int ubifs_setattr(struct dentry *dentry, struct iattr *attr) dbg_gen("ino %lu, mode %#x, ia_valid %#x", inode->i_ino, inode->i_mode, attr->ia_valid); - err = setattr_prepare(dentry, attr); + err = setattr_prepare(&nop_mnt_idmap, dentry, attr); if (err) return err; @@ -1299,25 +1281,25 @@ int ubifs_setattr(struct dentry *dentry, struct iattr *attr) return err; } -static void ubifs_invalidatepage(struct page *page, unsigned int offset, - unsigned int length) +static void ubifs_invalidate_folio(struct folio *folio, size_t offset, + size_t length) { - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; - ubifs_assert(c, PagePrivate(page)); - if (offset || length < PAGE_SIZE) - /* Partial page remains dirty */ + ubifs_assert(c, folio_test_private(folio)); + if (offset || length < folio_size(folio)) + /* Partial folio remains dirty */ return; - if (PageChecked(page)) + if (folio_test_checked(folio)) release_new_page_budget(c); else release_existing_page_budget(c); atomic_long_dec(&c->dirty_pg_cnt); - ClearPagePrivate(page); - ClearPageChecked(page); + folio_detach_private(folio); + folio_clear_checked(folio); } int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync) @@ -1341,7 +1323,7 @@ int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync) inode_lock(inode); /* Synchronize the inode unless this is a 'datasync()' call. */ - if (!datasync || (inode->i_state & I_DIRTY_DATASYNC)) { + if (!datasync || (inode_state_read_once(inode) & I_DIRTY_DATASYNC)) { err = inode->i_sb->s_op->write_inode(inode, NULL); if (err) goto out; @@ -1365,63 +1347,65 @@ out: * This helper function checks if the inode mtime/ctime should be updated or * not. If current values of the time-stamps are within the UBIFS inode time * granularity, they are not updated. This is an optimization. + * + * Returns: %1 if time update is needed, %0 if not */ static inline int mctime_update_needed(const struct inode *inode, const struct timespec64 *now) { - if (!timespec64_equal(&inode->i_mtime, now) || - !timespec64_equal(&inode->i_ctime, now)) + struct timespec64 ctime = inode_get_ctime(inode); + struct timespec64 mtime = inode_get_mtime(inode); + + if (!timespec64_equal(&mtime, now) || !timespec64_equal(&ctime, now)) return 1; return 0; } -#ifdef CONFIG_UBIFS_ATIME_SUPPORT /** * ubifs_update_time - update time of inode. * @inode: inode to update + * @flags: time updating control flag determines updating + * which time fields of @inode * * This function updates time of the inode. + * + * Returns: %0 for success or a negative error code otherwise. */ -int ubifs_update_time(struct inode *inode, struct timespec64 *time, - int flags) +int ubifs_update_time(struct inode *inode, int flags) { struct ubifs_inode *ui = ubifs_inode(inode); struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_budget_req req = { .dirtied_ino = 1, .dirtied_ino_d = ALIGN(ui->data_len, 8) }; - int iflags = I_DIRTY_TIME; int err, release; + if (!IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT)) { + generic_update_time(inode, flags); + return 0; + } + err = ubifs_budget_space(c, &req); if (err) return err; mutex_lock(&ui->ui_mutex); - if (flags & S_ATIME) - inode->i_atime = *time; - if (flags & S_CTIME) - inode->i_ctime = *time; - if (flags & S_MTIME) - inode->i_mtime = *time; - - if (!(inode->i_sb->s_flags & SB_LAZYTIME)) - iflags |= I_DIRTY_SYNC; - + inode_update_timestamps(inode, flags); release = ui->dirty; - __mark_inode_dirty(inode, iflags); + __mark_inode_dirty(inode, I_DIRTY_SYNC); mutex_unlock(&ui->ui_mutex); if (release) ubifs_release_budget(c, &req); return 0; } -#endif /** * update_mctime - update mtime and ctime of an inode. * @inode: inode to update * * This function updates mtime and ctime of the inode if it is not equivalent to - * current time. Returns zero in case of success and a negative error code in + * current time. + * + * Returns: %0 in case of success and a negative error code in * case of failure. */ static int update_mctime(struct inode *inode) @@ -1440,7 +1424,7 @@ static int update_mctime(struct inode *inode) return err; mutex_lock(&ui->ui_mutex); - inode->i_mtime = inode->i_ctime = current_time(inode); + inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); release = ui->dirty; mark_inode_dirty_sync(inode); mutex_unlock(&ui->ui_mutex); @@ -1460,60 +1444,46 @@ static ssize_t ubifs_write_iter(struct kiocb *iocb, struct iov_iter *from) return generic_file_write_iter(iocb, from); } -static int ubifs_set_page_dirty(struct page *page) +static bool ubifs_dirty_folio(struct address_space *mapping, + struct folio *folio) { - int ret; - struct inode *inode = page->mapping->host; - struct ubifs_info *c = inode->i_sb->s_fs_info; + bool ret; + struct ubifs_info *c = mapping->host->i_sb->s_fs_info; - ret = __set_page_dirty_nobuffers(page); + ret = filemap_dirty_folio(mapping, folio); /* * An attempt to dirty a page without budgeting for it - should not * happen. */ - ubifs_assert(c, ret == 0); + ubifs_assert(c, ret == false); return ret; } -#ifdef CONFIG_MIGRATION -static int ubifs_migrate_page(struct address_space *mapping, - struct page *newpage, struct page *page, enum migrate_mode mode) +static bool ubifs_release_folio(struct folio *folio, gfp_t unused_gfp_flags) { - int rc; - - rc = migrate_page_move_mapping(mapping, newpage, page, mode, 0); - if (rc != MIGRATEPAGE_SUCCESS) - return rc; - - if (PagePrivate(page)) { - ClearPagePrivate(page); - SetPagePrivate(newpage); - } - - if (mode != MIGRATE_SYNC_NO_COPY) - migrate_page_copy(newpage, page); - else - migrate_page_states(newpage, page); - return MIGRATEPAGE_SUCCESS; -} -#endif - -static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags) -{ - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; + if (folio_test_writeback(folio)) + return false; + /* - * An attempt to release a dirty page without budgeting for it - should - * not happen. + * Page is private but not dirty, weird? There is one condition + * making it happened. ubifs_writepage skipped the page because + * page index beyonds isize (for example. truncated by other + * process named A), then the page is invalidated by fadvise64 + * syscall before being truncated by process A. */ - if (PageWriteback(page)) - return 0; - ubifs_assert(c, PagePrivate(page)); - ubifs_assert(c, 0); - ClearPagePrivate(page); - ClearPageChecked(page); - return 1; + ubifs_assert(c, folio_test_private(folio)); + if (folio_test_checked(folio)) + release_new_page_budget(c); + else + release_existing_page_budget(c); + + atomic_long_dec(&c->dirty_pg_cnt); + folio_detach_private(folio); + folio_clear_checked(folio); + return true; } /* @@ -1522,14 +1492,14 @@ static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags) */ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf) { - struct page *page = vmf->page; + struct folio *folio = page_folio(vmf->page); struct inode *inode = file_inode(vmf->vma->vm_file); struct ubifs_info *c = inode->i_sb->s_fs_info; struct timespec64 now = current_time(inode); struct ubifs_budget_req req = { .new_page = 1 }; int err, update_time; - dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, page->index, + dbg_gen("ino %lu, pg %lu, i_size %lld", inode->i_ino, folio->index, i_size_read(inode)); ubifs_assert(c, !c->ro_media && !c->ro_mount); @@ -1537,17 +1507,17 @@ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf) return VM_FAULT_SIGBUS; /* -EROFS */ /* - * We have not locked @page so far so we may budget for changing the - * page. Note, we cannot do this after we locked the page, because + * We have not locked @folio so far so we may budget for changing the + * folio. Note, we cannot do this after we locked the folio, because * budgeting may cause write-back which would cause deadlock. * - * At the moment we do not know whether the page is dirty or not, so we - * assume that it is not and budget for a new page. We could look at + * At the moment we do not know whether the folio is dirty or not, so we + * assume that it is not and budget for a new folio. We could look at * the @PG_private flag and figure this out, but we may race with write - * back and the page state may change by the time we lock it, so this + * back and the folio state may change by the time we lock it, so this * would need additional care. We do not bother with this at the * moment, although it might be good idea to do. Instead, we allocate - * budget for a new page and amend it later on if the page was in fact + * budget for a new folio and amend it later on if the folio was in fact * dirty. * * The budgeting-related logic of this function is similar to what we @@ -1570,21 +1540,21 @@ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } - lock_page(page); - if (unlikely(page->mapping != inode->i_mapping || - page_offset(page) > i_size_read(inode))) { - /* Page got truncated out from underneath us */ + folio_lock(folio); + if (unlikely(folio->mapping != inode->i_mapping || + folio_pos(folio) >= i_size_read(inode))) { + /* Folio got truncated out from underneath us */ goto sigbus; } - if (PagePrivate(page)) + if (folio->private) release_new_page_budget(c); else { - if (!PageChecked(page)) + if (!folio_test_checked(folio)) ubifs_convert_page_budget(c); - SetPagePrivate(page); + folio_attach_private(folio, (void *)1); atomic_long_inc(&c->dirty_pg_cnt); - __set_page_dirty_nobuffers(page); + filemap_dirty_folio(folio->mapping, folio); } if (update_time) { @@ -1592,7 +1562,7 @@ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf) struct ubifs_inode *ui = ubifs_inode(inode); mutex_lock(&ui->ui_mutex); - inode->i_mtime = inode->i_ctime = current_time(inode); + inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); release = ui->dirty; mark_inode_dirty_sync(inode); mutex_unlock(&ui->ui_mutex); @@ -1600,11 +1570,11 @@ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf) ubifs_release_dirty_inode_budget(c, ui); } - wait_for_stable_page(page); + folio_wait_stable(folio); return VM_FAULT_LOCKED; sigbus: - unlock_page(page); + folio_unlock(folio); ubifs_release_budget(c, &req); return VM_FAULT_SIGBUS; } @@ -1615,17 +1585,18 @@ static const struct vm_operations_struct ubifs_file_vm_ops = { .page_mkwrite = ubifs_vm_page_mkwrite, }; -static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma) +static int ubifs_file_mmap_prepare(struct vm_area_desc *desc) { int err; - err = generic_file_mmap(file, vma); + err = generic_file_mmap_prepare(desc); if (err) return err; - vma->vm_ops = &ubifs_file_vm_ops; -#ifdef CONFIG_UBIFS_ATIME_SUPPORT - file_accessed(file); -#endif + desc->vm_ops = &ubifs_file_vm_ops; + + if (IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT)) + file_accessed(desc->file); + return 0; } @@ -1644,50 +1615,53 @@ static const char *ubifs_get_link(struct dentry *dentry, return fscrypt_get_symlink(inode, ui->data, ui->data_len, done); } +static int ubifs_symlink_getattr(struct mnt_idmap *idmap, + const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) +{ + ubifs_getattr(idmap, path, stat, request_mask, query_flags); + + if (IS_ENCRYPTED(d_inode(path->dentry))) + return fscrypt_symlink_getattr(path, stat); + return 0; +} + const struct address_space_operations ubifs_file_address_operations = { - .readpage = ubifs_readpage, - .writepage = ubifs_writepage, + .read_folio = ubifs_read_folio, + .writepages = ubifs_writepages, .write_begin = ubifs_write_begin, .write_end = ubifs_write_end, - .invalidatepage = ubifs_invalidatepage, - .set_page_dirty = ubifs_set_page_dirty, -#ifdef CONFIG_MIGRATION - .migratepage = ubifs_migrate_page, -#endif - .releasepage = ubifs_releasepage, + .invalidate_folio = ubifs_invalidate_folio, + .dirty_folio = ubifs_dirty_folio, + .migrate_folio = filemap_migrate_folio, + .release_folio = ubifs_release_folio, }; const struct inode_operations ubifs_file_inode_operations = { .setattr = ubifs_setattr, .getattr = ubifs_getattr, -#ifdef CONFIG_UBIFS_FS_XATTR .listxattr = ubifs_listxattr, -#endif -#ifdef CONFIG_UBIFS_ATIME_SUPPORT .update_time = ubifs_update_time, -#endif + .fileattr_get = ubifs_fileattr_get, + .fileattr_set = ubifs_fileattr_set, }; const struct inode_operations ubifs_symlink_inode_operations = { .get_link = ubifs_get_link, .setattr = ubifs_setattr, - .getattr = ubifs_getattr, -#ifdef CONFIG_UBIFS_FS_XATTR + .getattr = ubifs_symlink_getattr, .listxattr = ubifs_listxattr, -#endif -#ifdef CONFIG_UBIFS_ATIME_SUPPORT .update_time = ubifs_update_time, -#endif }; const struct file_operations ubifs_file_operations = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, .write_iter = ubifs_write_iter, - .mmap = ubifs_file_mmap, + .mmap_prepare = ubifs_file_mmap_prepare, .fsync = ubifs_fsync, .unlocked_ioctl = ubifs_ioctl, - .splice_read = generic_file_splice_read, + .splice_read = filemap_splice_read, .splice_write = iter_file_splice_write, .open = fscrypt_file_open, #ifdef CONFIG_COMPAT |
