summaryrefslogtreecommitdiff
path: root/fs/f2fs/data.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-12 20:05:58 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-12 20:05:58 -0700
commit6d8ef53e8b2fed8b0f91df0c6da7cc92747d934a (patch)
treefabe39021985b631ea5d457126e9ed3d53e910f0 /fs/f2fs/data.c
parentcdb897e3279ad1677138d6bdf1cfaf1393718a08 (diff)
parente6c6de18f010d9a7d592f4044d2c30213cb3a7bc (diff)
Merge tag 'f2fs-for-4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs
Pull f2fs updates from Jaegeuk Kim: "In this round, we've mostly tuned f2fs to provide better user experience for Android. Especially, we've worked on atomic write feature again with SQLite community in order to support it officially. And we added or modified several facilities to analyze and enhance IO behaviors. Major changes include: - add app/fs io stat - add inode checksum feature - support project/journalled quota - enhance atomic write with new ioctl() which exposes feature set - enhance background gc/discard/fstrim flows with new gc_urgent mode - add F2FS_IOC_FS{GET,SET}XATTR - fix some quota flows" * tag 'f2fs-for-4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (63 commits) f2fs: hurry up to issue discard after io interruption f2fs: fix to show correct discard_granularity in sysfs f2fs: detect dirty inode in evict_inode f2fs: clear radix tree dirty tag of pages whose dirty flag is cleared f2fs: speed up gc_urgent mode with SSR f2fs: better to wait for fstrim completion f2fs: avoid race in between read xattr & write xattr f2fs: make get_lock_data_page to handle encrypted inode f2fs: use generic terms used for encrypted block management f2fs: introduce f2fs_encrypted_file for clean-up Revert "f2fs: add a new function get_ssr_cost" f2fs: constify super_operations f2fs: fix to wake up all sleeping flusher f2fs: avoid race in between atomic_read & atomic_inc f2fs: remove unneeded parameter of change_curseg f2fs: update i_flags correctly f2fs: don't check inode's checksum if it was dirtied or writebacked f2fs: don't need to update inode checksum for recovery f2fs: trigger fdatasync for non-atomic_write file f2fs: fix to avoid race in between aio and gc ...
Diffstat (limited to 'fs/f2fs/data.c')
-rw-r--r--fs/f2fs/data.c177
1 files changed, 96 insertions, 81 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index fb96bb71da00..36b535207c88 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -457,14 +457,65 @@ out_fail:
return err;
}
+static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
+ unsigned nr_pages)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct fscrypt_ctx *ctx = NULL;
+ struct bio *bio;
+
+ if (f2fs_encrypted_file(inode)) {
+ ctx = fscrypt_get_ctx(inode, GFP_NOFS);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ /* wait the page to be moved by cleaning */
+ f2fs_wait_on_block_writeback(sbi, blkaddr);
+ }
+
+ bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
+ if (!bio) {
+ if (ctx)
+ fscrypt_release_ctx(ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+ f2fs_target_device(sbi, blkaddr, bio);
+ bio->bi_end_io = f2fs_read_end_io;
+ bio->bi_private = ctx;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
+
+ return bio;
+}
+
+/* This can handle encryption stuffs */
+static int f2fs_submit_page_read(struct inode *inode, struct page *page,
+ block_t blkaddr)
+{
+ struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1);
+
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
+ bio_put(bio);
+ return -EFAULT;
+ }
+ __submit_bio(F2FS_I_SB(inode), bio, DATA);
+ return 0;
+}
+
static void __set_data_blkaddr(struct dnode_of_data *dn)
{
struct f2fs_node *rn = F2FS_NODE(dn->node_page);
__le32 *addr_array;
+ int base = 0;
+
+ if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
+ base = get_extra_isize(dn->inode);
/* Get physical address of data block */
addr_array = blkaddr_in_node(rn);
- addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
+ addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
}
/*
@@ -508,8 +559,8 @@ int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
f2fs_wait_on_page_writeback(dn->node_page, NODE, true);
for (; count > 0; dn->ofs_in_node++) {
- block_t blkaddr =
- datablock_addr(dn->node_page, dn->ofs_in_node);
+ block_t blkaddr = datablock_addr(dn->inode,
+ dn->node_page, dn->ofs_in_node);
if (blkaddr == NULL_ADDR) {
dn->data_blkaddr = NEW_ADDR;
__set_data_blkaddr(dn);
@@ -570,16 +621,6 @@ struct page *get_read_data_page(struct inode *inode, pgoff_t index,
struct page *page;
struct extent_info ei = {0,0,0};
int err;
- struct f2fs_io_info fio = {
- .sbi = F2FS_I_SB(inode),
- .type = DATA,
- .op = REQ_OP_READ,
- .op_flags = op_flags,
- .encrypted_page = NULL,
- };
-
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
- return read_mapping_page(mapping, index, NULL);
page = f2fs_grab_cache_page(mapping, index, for_write);
if (!page)
@@ -620,9 +661,7 @@ got_it:
return page;
}
- fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
- fio.page = page;
- err = f2fs_submit_page_bio(&fio);
+ err = f2fs_submit_page_read(inode, page, dn.data_blkaddr);
if (err)
goto put_err;
return page;
@@ -756,7 +795,8 @@ static int __allocate_data_block(struct dnode_of_data *dn)
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
- dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
+ dn->data_blkaddr = datablock_addr(dn->inode,
+ dn->node_page, dn->ofs_in_node);
if (dn->data_blkaddr == NEW_ADDR)
goto alloc;
@@ -782,7 +822,7 @@ alloc:
static inline bool __force_buffered_io(struct inode *inode, int rw)
{
- return ((f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) ||
+ return (f2fs_encrypted_file(inode) ||
(rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
F2FS_I_SB(inode)->s_ndevs);
}
@@ -814,7 +854,7 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
F2FS_GET_BLOCK_PRE_AIO :
F2FS_GET_BLOCK_PRE_DIO);
}
- if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
+ if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
err = f2fs_convert_inline_inode(inode);
if (err)
return err;
@@ -903,7 +943,7 @@ next_dnode:
end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
next_block:
- blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+ blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
if (create) {
@@ -1040,7 +1080,7 @@ static int get_data_block_dio(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
return __get_data_block(inode, iblock, bh_result, create,
- F2FS_GET_BLOCK_DIO, NULL);
+ F2FS_GET_BLOCK_DEFAULT, NULL);
}
static int get_data_block_bmap(struct inode *inode, sector_t iblock,
@@ -1146,35 +1186,6 @@ out:
return ret;
}
-static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
- unsigned nr_pages)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct fscrypt_ctx *ctx = NULL;
- struct bio *bio;
-
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
- ctx = fscrypt_get_ctx(inode, GFP_NOFS);
- if (IS_ERR(ctx))
- return ERR_CAST(ctx);
-
- /* wait the page to be moved by cleaning */
- f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
- }
-
- bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
- if (!bio) {
- if (ctx)
- fscrypt_release_ctx(ctx);
- return ERR_PTR(-ENOMEM);
- }
- f2fs_target_device(sbi, blkaddr, bio);
- bio->bi_end_io = f2fs_read_end_io;
- bio->bi_private = ctx;
-
- return bio;
-}
-
/*
* This function was originally taken from fs/mpage.c, and customized for f2fs.
* Major change was from block_size == page_size in f2fs by default.
@@ -1240,7 +1251,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
map.m_len = last_block - block_in_file;
if (f2fs_map_blocks(inode, &map, 0,
- F2FS_GET_BLOCK_READ))
+ F2FS_GET_BLOCK_DEFAULT))
goto set_error_page;
}
got_it:
@@ -1271,12 +1282,11 @@ submit_and_realloc:
bio = NULL;
}
if (bio == NULL) {
- bio = f2fs_grab_bio(inode, block_nr, nr_pages);
+ bio = f2fs_grab_read_bio(inode, block_nr, nr_pages);
if (IS_ERR(bio)) {
bio = NULL;
goto set_error_page;
}
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
}
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
@@ -1341,11 +1351,11 @@ static int encrypt_one_page(struct f2fs_io_info *fio)
struct inode *inode = fio->page->mapping->host;
gfp_t gfp_flags = GFP_NOFS;
- if (!f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode))
+ if (!f2fs_encrypted_file(inode))
return 0;
/* wait for GCed encrypted page writeback */
- f2fs_wait_on_encrypted_page_writeback(fio->sbi, fio->old_blkaddr);
+ f2fs_wait_on_block_writeback(fio->sbi, fio->old_blkaddr);
retry_encrypt:
fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
@@ -1471,7 +1481,8 @@ out:
}
static int __write_data_page(struct page *page, bool *submitted,
- struct writeback_control *wbc)
+ struct writeback_control *wbc,
+ enum iostat_type io_type)
{
struct inode *inode = page->mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -1492,6 +1503,7 @@ static int __write_data_page(struct page *page, bool *submitted,
.encrypted_page = NULL,
.submitted = false,
.need_lock = LOCK_RETRY,
+ .io_type = io_type,
};
trace_f2fs_writepage(page, DATA);
@@ -1598,7 +1610,7 @@ redirty_out:
static int f2fs_write_data_page(struct page *page,
struct writeback_control *wbc)
{
- return __write_data_page(page, NULL, wbc);
+ return __write_data_page(page, NULL, wbc, FS_DATA_IO);
}
/*
@@ -1607,7 +1619,8 @@ static int f2fs_write_data_page(struct page *page,
* warm/hot data page.
*/
static int f2fs_write_cache_pages(struct address_space *mapping,
- struct writeback_control *wbc)
+ struct writeback_control *wbc,
+ enum iostat_type io_type)
{
int ret = 0;
int done = 0;
@@ -1697,7 +1710,7 @@ continue_unlock:
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
- ret = __write_data_page(page, &submitted, wbc);
+ ret = __write_data_page(page, &submitted, wbc, io_type);
if (unlikely(ret)) {
/*
* keep nr_to_write, since vfs uses this to
@@ -1752,8 +1765,9 @@ continue_unlock:
return ret;
}
-static int f2fs_write_data_pages(struct address_space *mapping,
- struct writeback_control *wbc)
+int __f2fs_write_data_pages(struct address_space *mapping,
+ struct writeback_control *wbc,
+ enum iostat_type io_type)
{
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -1790,7 +1804,7 @@ static int f2fs_write_data_pages(struct address_space *mapping,
goto skip_write;
blk_start_plug(&plug);
- ret = f2fs_write_cache_pages(mapping, wbc);
+ ret = f2fs_write_cache_pages(mapping, wbc, io_type);
blk_finish_plug(&plug);
if (wbc->sync_mode == WB_SYNC_ALL)
@@ -1809,6 +1823,16 @@ skip_write:
return 0;
}
+static int f2fs_write_data_pages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct inode *inode = mapping->host;
+
+ return __f2fs_write_data_pages(mapping, wbc,
+ F2FS_I(inode)->cp_task == current ?
+ FS_CP_DATA_IO : FS_DATA_IO);
+}
+
static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
struct inode *inode = mapping->host;
@@ -1858,7 +1882,7 @@ restart:
set_new_dnode(&dn, inode, ipage, ipage, 0);
if (f2fs_has_inline_data(inode)) {
- if (pos + len <= MAX_INLINE_DATA) {
+ if (pos + len <= MAX_INLINE_DATA(inode)) {
read_inline_data(page, ipage);
set_inode_flag(inode, FI_DATA_EXIST);
if (inode->i_nlink)
@@ -1956,8 +1980,8 @@ repeat:
f2fs_wait_on_page_writeback(page, DATA, false);
/* wait for GCed encrypted page writeback */
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
- f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
+ if (f2fs_encrypted_file(inode))
+ f2fs_wait_on_block_writeback(sbi, blkaddr);
if (len == PAGE_SIZE || PageUptodate(page))
return 0;
@@ -1971,21 +1995,9 @@ repeat:
zero_user_segment(page, 0, PAGE_SIZE);
SetPageUptodate(page);
} else {
- struct bio *bio;
-
- bio = f2fs_grab_bio(inode, blkaddr, 1);
- if (IS_ERR(bio)) {
- err = PTR_ERR(bio);
- goto fail;
- }
- bio->bi_opf = REQ_OP_READ;
- if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
- bio_put(bio);
- err = -EFAULT;
+ err = f2fs_submit_page_read(inode, page, blkaddr);
+ if (err)
goto fail;
- }
-
- __submit_bio(sbi, bio, DATA);
lock_page(page);
if (unlikely(page->mapping != mapping)) {
@@ -2075,10 +2087,13 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
up_read(&F2FS_I(inode)->dio_rwsem[rw]);
if (rw == WRITE) {
- if (err > 0)
+ if (err > 0) {
+ f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
+ err);
set_inode_flag(inode, FI_UPDATE_WRITE);
- else if (err < 0)
+ } else if (err < 0) {
f2fs_write_failed(mapping, offset + count);
+ }
}
trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);