diff options
Diffstat (limited to 'fs')
665 files changed, 31546 insertions, 13463 deletions
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h index 698c43dd5dc8..f28bc763847a 100644 --- a/fs/9p/v9fs.h +++ b/fs/9p/v9fs.h @@ -202,7 +202,7 @@ static inline struct v9fs_session_info *v9fs_inode2v9ses(struct inode *inode) return inode->i_sb->s_fs_info; } -static inline struct v9fs_session_info *v9fs_dentry2v9ses(struct dentry *dentry) +static inline struct v9fs_session_info *v9fs_dentry2v9ses(const struct dentry *dentry) { return dentry->d_sb->s_fs_info; } diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index 3bc9ce6c575e..32619d146cbc 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -81,13 +81,13 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq) __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); if (pos + total >= i_size_read(rreq->inode)) __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags); - - if (!err) { + if (!err && total) { subreq->transferred += total; __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags); } - netfs_read_subreq_terminated(subreq, err, false); + subreq->error = err; + netfs_read_subreq_terminated(subreq); } /** diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c index 01338d4c2d9e..5061f192eafd 100644 --- a/fs/9p/vfs_dentry.c +++ b/fs/9p/vfs_dentry.c @@ -61,7 +61,7 @@ static void v9fs_dentry_release(struct dentry *dentry) p9_fid_put(hlist_entry(p, struct p9_fid, dlist)); } -static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags) +static int __v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags) { struct p9_fid *fid; struct inode *inode; @@ -99,14 +99,36 @@ out_valid: return 1; } +static int v9fs_lookup_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) +{ + return __v9fs_lookup_revalidate(dentry, flags); +} + +static bool v9fs_dentry_unalias_trylock(const struct dentry *dentry) +{ + struct v9fs_session_info *v9ses = v9fs_dentry2v9ses(dentry); + return down_write_trylock(&v9ses->rename_sem); +} + +static void v9fs_dentry_unalias_unlock(const struct dentry *dentry) +{ + struct v9fs_session_info *v9ses = v9fs_dentry2v9ses(dentry); + up_write(&v9ses->rename_sem); +} + const struct dentry_operations v9fs_cached_dentry_operations = { .d_revalidate = v9fs_lookup_revalidate, - .d_weak_revalidate = v9fs_lookup_revalidate, + .d_weak_revalidate = __v9fs_lookup_revalidate, .d_delete = v9fs_cached_dentry_delete, .d_release = v9fs_dentry_release, + .d_unalias_trylock = v9fs_dentry_unalias_trylock, + .d_unalias_unlock = v9fs_dentry_unalias_unlock, }; const struct dentry_operations v9fs_dentry_operations = { .d_delete = always_delete_dentry, .d_release = v9fs_dentry_release, + .d_unalias_trylock = v9fs_dentry_unalias_trylock, + .d_unalias_unlock = v9fs_dentry_unalias_unlock, }; diff --git a/fs/afs/Makefile b/fs/afs/Makefile index dcdc0f1bb76f..5efd7e13b304 100644 --- a/fs/afs/Makefile +++ b/fs/afs/Makefile @@ -11,6 +11,7 @@ kafs-y := \ cmservice.o \ dir.o \ dir_edit.o \ + dir_search.o \ dir_silly.o \ dynroot.o \ file.o \ diff --git a/fs/afs/callback.c b/fs/afs/callback.c index 99b2c8172021..69e1dd55b160 100644 --- a/fs/afs/callback.c +++ b/fs/afs/callback.c @@ -41,7 +41,7 @@ static void afs_volume_init_callback(struct afs_volume *volume) list_for_each_entry(vnode, &volume->open_mmaps, cb_mmap_link) { if (vnode->cb_v_check != atomic_read(&volume->cb_v_break)) { - atomic64_set(&vnode->cb_expires_at, AFS_NO_CB_PROMISE); + afs_clear_cb_promise(vnode, afs_cb_promise_clear_vol_init_cb); queue_work(system_unbound_wq, &vnode->cb_work); } } @@ -79,7 +79,7 @@ void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reas _enter(""); clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); - if (atomic64_xchg(&vnode->cb_expires_at, AFS_NO_CB_PROMISE) != AFS_NO_CB_PROMISE) { + if (afs_clear_cb_promise(vnode, afs_cb_promise_clear_cb_break)) { vnode->cb_break++; vnode->cb_v_check = atomic_read(&vnode->volume->cb_v_break); afs_clear_permits(vnode); diff --git a/fs/afs/cell.c b/fs/afs/cell.c index caa09875f520..cee42646736c 100644 --- a/fs/afs/cell.c +++ b/fs/afs/cell.c @@ -146,18 +146,20 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net, return ERR_PTR(-ENOMEM); } - cell->name = kmalloc(namelen + 1, GFP_KERNEL); + cell->name = kmalloc(1 + namelen + 1, GFP_KERNEL); if (!cell->name) { kfree(cell); return ERR_PTR(-ENOMEM); } - cell->net = net; + cell->name[0] = '.'; + cell->name++; cell->name_len = namelen; for (i = 0; i < namelen; i++) cell->name[i] = tolower(name[i]); cell->name[i] = 0; + cell->net = net; refcount_set(&cell->ref, 1); atomic_set(&cell->active, 0); INIT_WORK(&cell->manager, afs_manage_cell_work); @@ -211,7 +213,7 @@ parse_failed: if (ret == -EINVAL) printk(KERN_ERR "kAFS: bad VL server IP address\n"); error: - kfree(cell->name); + kfree(cell->name - 1); kfree(cell); _leave(" = %d", ret); return ERR_PTR(ret); @@ -365,6 +367,14 @@ int afs_cell_init(struct afs_net *net, const char *rootcell) len = cp - rootcell; } + if (len == 0 || !rootcell[0] || rootcell[0] == '.' || rootcell[len - 1] == '.') + return -EINVAL; + if (memchr(rootcell, '/', len)) + return -EINVAL; + cp = strstr(rootcell, ".."); + if (cp && cp < rootcell + len) + return -EINVAL; + /* allocate a cell record for the root cell */ new_root = afs_lookup_cell(net, rootcell, len, vllist, false); if (IS_ERR(new_root)) { @@ -502,7 +512,7 @@ static void afs_cell_destroy(struct rcu_head *rcu) afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers)); afs_unuse_cell(net, cell->alias_of, afs_cell_trace_unuse_alias); key_put(cell->anonymous_key); - kfree(cell->name); + kfree(cell->name - 1); kfree(cell); afs_dec_cells_outstanding(net); @@ -710,7 +720,8 @@ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell) afs_proc_cell_remove(cell); mutex_lock(&net->proc_cells_lock); - hlist_del_rcu(&cell->proc_link); + if (!hlist_unhashed(&cell->proc_link)) + hlist_del_rcu(&cell->proc_link); afs_dynroot_rmdir(net, cell); mutex_unlock(&net->proc_cells_lock); diff --git a/fs/afs/dir.c b/fs/afs/dir.c index ada363af5aab..02cbf38e1a77 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -13,6 +13,7 @@ #include <linux/ctype.h> #include <linux/sched.h> #include <linux/iversion.h> +#include <linux/iov_iter.h> #include <linux/task_io_accounting_ops.h> #include "internal.h" #include "afs_fs.h" @@ -22,7 +23,8 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags); static int afs_dir_open(struct inode *inode, struct file *file); static int afs_readdir(struct file *file, struct dir_context *ctx); -static int afs_d_revalidate(struct dentry *dentry, unsigned int flags); +static int afs_d_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags); static int afs_d_delete(const struct dentry *dentry); static void afs_d_iput(struct dentry *dentry, struct inode *inode); static bool afs_lookup_one_filldir(struct dir_context *ctx, const char *name, int nlen, @@ -42,15 +44,6 @@ static int afs_symlink(struct mnt_idmap *idmap, struct inode *dir, static int afs_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags); -static bool afs_dir_release_folio(struct folio *folio, gfp_t gfp_flags); -static void afs_dir_invalidate_folio(struct folio *folio, size_t offset, - size_t length); - -static bool afs_dir_dirty_folio(struct address_space *mapping, - struct folio *folio) -{ - BUG(); /* This should never happen. */ -} const struct file_operations afs_dir_file_operations = { .open = afs_dir_open, @@ -75,10 +68,7 @@ const struct inode_operations afs_dir_inode_operations = { }; const struct address_space_operations afs_dir_aops = { - .dirty_folio = afs_dir_dirty_folio, - .release_folio = afs_dir_release_folio, - .invalidate_folio = afs_dir_invalidate_folio, - .migrate_folio = filemap_migrate_folio, + .writepages = afs_single_writepages, }; const struct dentry_operations afs_fs_dentry_operations = { @@ -99,152 +89,124 @@ struct afs_lookup_one_cookie { struct afs_lookup_cookie { struct dir_context ctx; struct qstr name; - bool found; - bool one_only; unsigned short nr_fids; struct afs_fid fids[50]; }; +static void afs_dir_unuse_cookie(struct afs_vnode *dvnode, int ret) +{ + if (ret == 0) { + struct afs_vnode_cache_aux aux; + loff_t i_size = i_size_read(&dvnode->netfs.inode); + + afs_set_cache_aux(dvnode, &aux); + fscache_unuse_cookie(afs_vnode_cache(dvnode), &aux, &i_size); + } else { + fscache_unuse_cookie(afs_vnode_cache(dvnode), NULL, NULL); + } +} + /* - * Drop the refs that we're holding on the folios we were reading into. We've - * got refs on the first nr_pages pages. + * Iterate through a kmapped directory segment, dumping a summary of + * the contents. */ -static void afs_dir_read_cleanup(struct afs_read *req) +static size_t afs_dir_dump_step(void *iter_base, size_t progress, size_t len, + void *priv, void *priv2) { - struct address_space *mapping = req->vnode->netfs.inode.i_mapping; - struct folio *folio; - pgoff_t last = req->nr_pages - 1; + do { + union afs_xdr_dir_block *block = iter_base; - XA_STATE(xas, &mapping->i_pages, 0); + pr_warn("[%05zx] %32phN\n", progress, block); + iter_base += AFS_DIR_BLOCK_SIZE; + progress += AFS_DIR_BLOCK_SIZE; + len -= AFS_DIR_BLOCK_SIZE; + } while (len > 0); - if (unlikely(!req->nr_pages)) - return; + return len; +} - rcu_read_lock(); - xas_for_each(&xas, folio, last) { - if (xas_retry(&xas, folio)) - continue; - BUG_ON(xa_is_value(folio)); - ASSERTCMP(folio->mapping, ==, mapping); +/* + * Dump the contents of a directory. + */ +static void afs_dir_dump(struct afs_vnode *dvnode) +{ + struct iov_iter iter; + unsigned long long i_size = i_size_read(&dvnode->netfs.inode); - folio_put(folio); - } + pr_warn("DIR %llx:%llx is=%llx\n", + dvnode->fid.vid, dvnode->fid.vnode, i_size); - rcu_read_unlock(); + iov_iter_folio_queue(&iter, ITER_SOURCE, dvnode->directory, 0, 0, i_size); + iterate_folioq(&iter, iov_iter_count(&iter), NULL, NULL, + afs_dir_dump_step); } /* * check that a directory folio is valid */ -static bool afs_dir_check_folio(struct afs_vnode *dvnode, struct folio *folio, - loff_t i_size) +static bool afs_dir_check_block(struct afs_vnode *dvnode, size_t progress, + union afs_xdr_dir_block *block) { - union afs_xdr_dir_block *block; - size_t offset, size; - loff_t pos; + if (block->hdr.magic != AFS_DIR_MAGIC) { + pr_warn("%s(%lx): [%zx] bad magic %04x\n", + __func__, dvnode->netfs.inode.i_ino, + progress, ntohs(block->hdr.magic)); + trace_afs_dir_check_failed(dvnode, progress); + trace_afs_file_error(dvnode, -EIO, afs_file_error_dir_bad_magic); + return false; + } - /* Determine how many magic numbers there should be in this folio, but - * we must take care because the directory may change size under us. + /* Make sure each block is NUL terminated so we can reasonably + * use string functions on it. The filenames in the folio + * *should* be NUL-terminated anyway. */ - pos = folio_pos(folio); - if (i_size <= pos) - goto checked; - - size = min_t(loff_t, folio_size(folio), i_size - pos); - for (offset = 0; offset < size; offset += sizeof(*block)) { - block = kmap_local_folio(folio, offset); - if (block->hdr.magic != AFS_DIR_MAGIC) { - printk("kAFS: %s(%lx): [%llx] bad magic %zx/%zx is %04hx\n", - __func__, dvnode->netfs.inode.i_ino, - pos, offset, size, ntohs(block->hdr.magic)); - trace_afs_dir_check_failed(dvnode, pos + offset, i_size); - kunmap_local(block); - trace_afs_file_error(dvnode, -EIO, afs_file_error_dir_bad_magic); - goto error; - } - - /* Make sure each block is NUL terminated so we can reasonably - * use string functions on it. The filenames in the folio - * *should* be NUL-terminated anyway. - */ - ((u8 *)block)[AFS_DIR_BLOCK_SIZE - 1] = 0; - - kunmap_local(block); - } -checked: + ((u8 *)block)[AFS_DIR_BLOCK_SIZE - 1] = 0; afs_stat_v(dvnode, n_read_dir); return true; - -error: - return false; } /* - * Dump the contents of a directory. + * Iterate through a kmapped directory segment, checking the content. */ -static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req) +static size_t afs_dir_check_step(void *iter_base, size_t progress, size_t len, + void *priv, void *priv2) { - union afs_xdr_dir_block *block; - struct address_space *mapping = dvnode->netfs.inode.i_mapping; - struct folio *folio; - pgoff_t last = req->nr_pages - 1; - size_t offset, size; - - XA_STATE(xas, &mapping->i_pages, 0); - - pr_warn("DIR %llx:%llx f=%llx l=%llx al=%llx\n", - dvnode->fid.vid, dvnode->fid.vnode, - req->file_size, req->len, req->actual_len); - pr_warn("DIR %llx %x %zx %zx\n", - req->pos, req->nr_pages, - req->iter->iov_offset, iov_iter_count(req->iter)); - - xas_for_each(&xas, folio, last) { - if (xas_retry(&xas, folio)) - continue; + struct afs_vnode *dvnode = priv; - BUG_ON(folio->mapping != mapping); + if (WARN_ON_ONCE(progress % AFS_DIR_BLOCK_SIZE || + len % AFS_DIR_BLOCK_SIZE)) + return len; - size = min_t(loff_t, folio_size(folio), req->actual_len - folio_pos(folio)); - for (offset = 0; offset < size; offset += sizeof(*block)) { - block = kmap_local_folio(folio, offset); - pr_warn("[%02lx] %32phN\n", folio->index + offset, block); - kunmap_local(block); - } - } + do { + if (!afs_dir_check_block(dvnode, progress, iter_base)) + break; + iter_base += AFS_DIR_BLOCK_SIZE; + len -= AFS_DIR_BLOCK_SIZE; + } while (len > 0); + + return len; } /* - * Check all the blocks in a directory. All the folios are held pinned. + * Check all the blocks in a directory. */ -static int afs_dir_check(struct afs_vnode *dvnode, struct afs_read *req) +static int afs_dir_check(struct afs_vnode *dvnode) { - struct address_space *mapping = dvnode->netfs.inode.i_mapping; - struct folio *folio; - pgoff_t last = req->nr_pages - 1; - int ret = 0; - - XA_STATE(xas, &mapping->i_pages, 0); + struct iov_iter iter; + unsigned long long i_size = i_size_read(&dvnode->netfs.inode); + size_t checked = 0; - if (unlikely(!req->nr_pages)) + if (unlikely(!i_size)) return 0; - rcu_read_lock(); - xas_for_each(&xas, folio, last) { - if (xas_retry(&xas, folio)) - continue; - - BUG_ON(folio->mapping != mapping); - - if (!afs_dir_check_folio(dvnode, folio, req->actual_len)) { - afs_dir_dump(dvnode, req); - ret = -EIO; - break; - } + iov_iter_folio_queue(&iter, ITER_SOURCE, dvnode->directory, 0, 0, i_size); + checked = iterate_folioq(&iter, iov_iter_count(&iter), dvnode, NULL, + afs_dir_check_step); + if (checked != i_size) { + afs_dir_dump(dvnode); + return -EIO; } - - rcu_read_unlock(); - return ret; + return 0; } /* @@ -264,134 +226,140 @@ static int afs_dir_open(struct inode *inode, struct file *file) } /* - * Read the directory into the pagecache in one go, scrubbing the previous - * contents. The list of folios is returned, pinning them so that they don't - * get reclaimed during the iteration. + * Read a file in a single download. */ -static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key) - __acquires(&dvnode->validate_lock) +static ssize_t afs_do_read_single(struct afs_vnode *dvnode, struct file *file) { - struct address_space *mapping = dvnode->netfs.inode.i_mapping; - struct afs_read *req; + struct iov_iter iter; + ssize_t ret; loff_t i_size; - int nr_pages, i; - int ret; - loff_t remote_size = 0; + bool is_dir = (S_ISDIR(dvnode->netfs.inode.i_mode) && + !test_bit(AFS_VNODE_MOUNTPOINT, &dvnode->flags)); - _enter(""); - - req = kzalloc(sizeof(*req), GFP_KERNEL); - if (!req) - return ERR_PTR(-ENOMEM); - - refcount_set(&req->usage, 1); - req->vnode = dvnode; - req->key = key_get(key); - req->cleanup = afs_dir_read_cleanup; - -expand: i_size = i_size_read(&dvnode->netfs.inode); - if (i_size < remote_size) - i_size = remote_size; - if (i_size < 2048) { - ret = afs_bad(dvnode, afs_file_error_dir_small); - goto error; - } - if (i_size > 2048 * 1024) { - trace_afs_file_error(dvnode, -EFBIG, afs_file_error_dir_big); - ret = -EFBIG; - goto error; + if (is_dir) { + if (i_size < AFS_DIR_BLOCK_SIZE) + return afs_bad(dvnode, afs_file_error_dir_small); + if (i_size > AFS_DIR_BLOCK_SIZE * 1024) { + trace_afs_file_error(dvnode, -EFBIG, afs_file_error_dir_big); + return -EFBIG; + } + } else { + if (i_size > AFSPATHMAX) { + trace_afs_file_error(dvnode, -EFBIG, afs_file_error_dir_big); + return -EFBIG; + } } - _enter("%llu", i_size); + /* Expand the storage. TODO: Shrink the storage too. */ + if (dvnode->directory_size < i_size) { + size_t cur_size = dvnode->directory_size; - nr_pages = (i_size + PAGE_SIZE - 1) / PAGE_SIZE; + ret = netfs_alloc_folioq_buffer(NULL, + &dvnode->directory, &cur_size, i_size, + mapping_gfp_mask(dvnode->netfs.inode.i_mapping)); + dvnode->directory_size = cur_size; + if (ret < 0) + return ret; + } - req->actual_len = i_size; /* May change */ - req->len = nr_pages * PAGE_SIZE; /* We can ask for more than there is */ - req->data_version = dvnode->status.data_version; /* May change */ - iov_iter_xarray(&req->def_iter, ITER_DEST, &dvnode->netfs.inode.i_mapping->i_pages, - 0, i_size); - req->iter = &req->def_iter; + iov_iter_folio_queue(&iter, ITER_DEST, dvnode->directory, 0, 0, dvnode->directory_size); - /* Fill in any gaps that we might find where the memory reclaimer has - * been at work and pin all the folios. If there are any gaps, we will - * need to reread the entire directory contents. + /* AFS requires us to perform the read of a directory synchronously as + * a single unit to avoid issues with the directory contents being + * changed between reads. */ - i = req->nr_pages; - while (i < nr_pages) { - struct folio *folio; - - folio = filemap_get_folio(mapping, i); - if (IS_ERR(folio)) { - if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) - afs_stat_v(dvnode, n_inval); - folio = __filemap_get_folio(mapping, - i, FGP_LOCK | FGP_CREAT, - mapping->gfp_mask); - if (IS_ERR(folio)) { - ret = PTR_ERR(folio); - goto error; - } - folio_attach_private(folio, (void *)1); - folio_unlock(folio); + ret = netfs_read_single(&dvnode->netfs.inode, file, &iter); + if (ret >= 0) { + i_size = i_size_read(&dvnode->netfs.inode); + if (i_size > ret) { + /* The content has grown, so we need to expand the + * buffer. + */ + ret = -ESTALE; + } else if (is_dir) { + int ret2 = afs_dir_check(dvnode); + + if (ret2 < 0) + ret = ret2; + } else if (i_size < folioq_folio_size(dvnode->directory, 0)) { + /* NUL-terminate a symlink. */ + char *symlink = kmap_local_folio(folioq_folio(dvnode->directory, 0), 0); + + symlink[i_size] = 0; + kunmap_local(symlink); } - - req->nr_pages += folio_nr_pages(folio); - i += folio_nr_pages(folio); } - /* If we're going to reload, we need to lock all the pages to prevent - * races. - */ + return ret; +} + +ssize_t afs_read_single(struct afs_vnode *dvnode, struct file *file) +{ + ssize_t ret; + + fscache_use_cookie(afs_vnode_cache(dvnode), false); + ret = afs_do_read_single(dvnode, file); + fscache_unuse_cookie(afs_vnode_cache(dvnode), NULL, NULL); + return ret; +} + +/* + * Read the directory into a folio_queue buffer in one go, scrubbing the + * previous contents. We return -ESTALE if the caller needs to call us again. + */ +ssize_t afs_read_dir(struct afs_vnode *dvnode, struct file *file) + __acquires(&dvnode->validate_lock) +{ + ssize_t ret; + loff_t i_size; + + i_size = i_size_read(&dvnode->netfs.inode); + ret = -ERESTARTSYS; if (down_read_killable(&dvnode->validate_lock) < 0) goto error; - if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) - goto success; + /* We only need to reread the data if it became invalid - or if we + * haven't read it yet. + */ + if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && + test_bit(AFS_VNODE_DIR_READ, &dvnode->flags)) { + ret = i_size; + goto valid; + } up_read(&dvnode->validate_lock); if (down_write_killable(&dvnode->validate_lock) < 0) goto error; - if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) { - trace_afs_reload_dir(dvnode); - ret = afs_fetch_data(dvnode, req); - if (ret < 0) - goto error_unlock; - - task_io_account_read(PAGE_SIZE * req->nr_pages); - - if (req->len < req->file_size) { - /* The content has grown, so we need to expand the - * buffer. - */ - up_write(&dvnode->validate_lock); - remote_size = req->file_size; - goto expand; - } + if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) + afs_invalidate_cache(dvnode, 0); - /* Validate the data we just read. */ - ret = afs_dir_check(dvnode, req); + if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) || + !test_bit(AFS_VNODE_DIR_READ, &dvnode->flags)) { + trace_afs_reload_dir(dvnode); + ret = afs_read_single(dvnode, file); if (ret < 0) goto error_unlock; // TODO: Trim excess pages set_bit(AFS_VNODE_DIR_VALID, &dvnode->flags); + set_bit(AFS_VNODE_DIR_READ, &dvnode->flags); + } else { + ret = i_size; } downgrade_write(&dvnode->validate_lock); -success: - return req; +valid: + return ret; error_unlock: up_write(&dvnode->validate_lock); error: - afs_put_read(req); - _leave(" = %d", ret); - return ERR_PTR(ret); + _leave(" = %zd", ret); + return ret; } /* @@ -399,79 +367,69 @@ error: */ static int afs_dir_iterate_block(struct afs_vnode *dvnode, struct dir_context *ctx, - union afs_xdr_dir_block *block, - unsigned blkoff) + union afs_xdr_dir_block *block) { union afs_xdr_dirent *dire; - unsigned offset, next, curr, nr_slots; + unsigned int blknum, base, hdr, pos, next, nr_slots; size_t nlen; int tmp; - _enter("%llx,%x", ctx->pos, blkoff); + blknum = ctx->pos / AFS_DIR_BLOCK_SIZE; + base = blknum * AFS_DIR_SLOTS_PER_BLOCK; + hdr = (blknum == 0 ? AFS_DIR_RESV_BLOCKS0 : AFS_DIR_RESV_BLOCKS); + pos = DIV_ROUND_UP(ctx->pos, AFS_DIR_DIRENT_SIZE) - base; - curr = (ctx->pos - blkoff) / sizeof(union afs_xdr_dirent); + _enter("%llx,%x", ctx->pos, blknum); /* walk through the block, an entry at a time */ - for (offset = (blkoff == 0 ? AFS_DIR_RESV_BLOCKS0 : AFS_DIR_RESV_BLOCKS); - offset < AFS_DIR_SLOTS_PER_BLOCK; - offset = next - ) { + for (unsigned int slot = hdr; slot < AFS_DIR_SLOTS_PER_BLOCK; slot = next) { /* skip entries marked unused in the bitmap */ - if (!(block->hdr.bitmap[offset / 8] & - (1 << (offset % 8)))) { - _debug("ENT[%zu.%u]: unused", - blkoff / sizeof(union afs_xdr_dir_block), offset); - next = offset + 1; - if (offset >= curr) - ctx->pos = blkoff + - next * sizeof(union afs_xdr_dirent); + if (!(block->hdr.bitmap[slot / 8] & + (1 << (slot % 8)))) { + _debug("ENT[%x]: Unused", base + slot); + next = slot + 1; + if (next >= pos) + ctx->pos = (base + next) * sizeof(union afs_xdr_dirent); continue; } /* got a valid entry */ - dire = &block->dirents[offset]; + dire = &block->dirents[slot]; nlen = strnlen(dire->u.name, - sizeof(*block) - - offset * sizeof(union afs_xdr_dirent)); + (unsigned long)(block + 1) - (unsigned long)dire->u.name - 1); if (nlen > AFSNAMEMAX - 1) { - _debug("ENT[%zu]: name too long (len %u/%zu)", - blkoff / sizeof(union afs_xdr_dir_block), - offset, nlen); + _debug("ENT[%x]: Name too long (len %zx)", + base + slot, nlen); return afs_bad(dvnode, afs_file_error_dir_name_too_long); } - _debug("ENT[%zu.%u]: %s %zu \"%s\"", - blkoff / sizeof(union afs_xdr_dir_block), offset, - (offset < curr ? "skip" : "fill"), + _debug("ENT[%x]: %s %zx \"%s\"", + base + slot, (slot < pos ? "skip" : "fill"), nlen, dire->u.name); nr_slots = afs_dir_calc_slots(nlen); - next = offset + nr_slots; + next = slot + nr_slots; if (next > AFS_DIR_SLOTS_PER_BLOCK) { - _debug("ENT[%zu.%u]:" - " %u extends beyond end dir block" - " (len %zu)", - blkoff / sizeof(union afs_xdr_dir_block), - offset, next, nlen); + _debug("ENT[%x]: extends beyond end dir block (len %zx)", + base + slot, nlen); return afs_bad(dvnode, afs_file_error_dir_over_end); } /* Check that the name-extension dirents are all allocated */ for (tmp = 1; tmp < nr_slots; tmp++) { - unsigned int ix = offset + tmp; - if (!(block->hdr.bitmap[ix / 8] & (1 << (ix % 8)))) { - _debug("ENT[%zu.u]:" - " %u unmarked extension (%u/%u)", - blkoff / sizeof(union afs_xdr_dir_block), - offset, tmp, nr_slots); + unsigned int xslot = slot + tmp; + + if (!(block->hdr.bitmap[xslot / 8] & (1 << (xslot % 8)))) { + _debug("ENT[%x]: Unmarked extension (%x/%x)", + base + slot, tmp, nr_slots); return afs_bad(dvnode, afs_file_error_dir_unmarked_ext); } } /* skip if starts before the current position */ - if (offset < curr) { - if (next > curr) - ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent); + if (slot < pos) { + if (next > pos) + ctx->pos = (base + next) * sizeof(union afs_xdr_dirent); continue; } @@ -485,75 +443,110 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode, return 0; } - ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent); + ctx->pos = (base + next) * sizeof(union afs_xdr_dirent); } _leave(" = 1 [more]"); return 1; } +struct afs_dir_iteration_ctx { + struct dir_context *dir_ctx; + int error; +}; + /* - * iterate through the data blob that lists the contents of an AFS directory + * Iterate through a kmapped directory segment. */ -static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, - struct key *key, afs_dataversion_t *_dir_version) +static size_t afs_dir_iterate_step(void *iter_base, size_t progress, size_t len, + void *priv, void *priv2) { - struct afs_vnode *dvnode = AFS_FS_I(dir); - union afs_xdr_dir_block *dblock; - struct afs_read *req; - struct folio *folio; - unsigned offset, size; + struct afs_dir_iteration_ctx *ctx = priv2; + struct afs_vnode *dvnode = priv; int ret; - _enter("{%lu},%u,,", dir->i_ino, (unsigned)ctx->pos); - - if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dir)->flags)) { - _leave(" = -ESTALE"); - return -ESTALE; + if (WARN_ON_ONCE(progress % AFS_DIR_BLOCK_SIZE || + len % AFS_DIR_BLOCK_SIZE)) { + pr_err("Mis-iteration prog=%zx len=%zx\n", + progress % AFS_DIR_BLOCK_SIZE, + len % AFS_DIR_BLOCK_SIZE); + return len; } - req = afs_read_dir(dvnode, key); - if (IS_ERR(req)) - return PTR_ERR(req); - *_dir_version = req->data_version; + do { + ret = afs_dir_iterate_block(dvnode, ctx->dir_ctx, iter_base); + if (ret != 1) + break; - /* round the file position up to the next entry boundary */ - ctx->pos += sizeof(union afs_xdr_dirent) - 1; - ctx->pos &= ~(sizeof(union afs_xdr_dirent) - 1); + ctx->dir_ctx->pos = round_up(ctx->dir_ctx->pos, AFS_DIR_BLOCK_SIZE); + iter_base += AFS_DIR_BLOCK_SIZE; + len -= AFS_DIR_BLOCK_SIZE; + } while (len > 0); - /* walk through the blocks in sequence */ - ret = 0; - while (ctx->pos < req->actual_len) { - /* Fetch the appropriate folio from the directory and re-add it - * to the LRU. We have all the pages pinned with an extra ref. - */ - folio = __filemap_get_folio(dir->i_mapping, ctx->pos / PAGE_SIZE, - FGP_ACCESSED, 0); - if (IS_ERR(folio)) { - ret = afs_bad(dvnode, afs_file_error_dir_missing_page); - break; - } + return len; +} + +/* + * Iterate through the directory folios. + */ +static int afs_dir_iterate_contents(struct inode *dir, struct dir_context *dir_ctx) +{ + struct afs_dir_iteration_ctx ctx = { .dir_ctx = dir_ctx }; + struct afs_vnode *dvnode = AFS_FS_I(dir); + struct iov_iter iter; + unsigned long long i_size = i_size_read(dir); - offset = round_down(ctx->pos, sizeof(*dblock)) - folio_pos(folio); - size = min_t(loff_t, folio_size(folio), - req->actual_len - folio_pos(folio)); + /* Round the file position up to the next entry boundary */ + dir_ctx->pos = round_up(dir_ctx->pos, sizeof(union afs_xdr_dirent)); - do { - dblock = kmap_local_folio(folio, offset); - ret = afs_dir_iterate_block(dvnode, ctx, dblock, - folio_pos(folio) + offset); - kunmap_local(dblock); - if (ret != 1) - goto out; + if (i_size <= 0 || dir_ctx->pos >= i_size) + return 0; - } while (offset += sizeof(*dblock), offset < size); + iov_iter_folio_queue(&iter, ITER_SOURCE, dvnode->directory, 0, 0, i_size); + iov_iter_advance(&iter, round_down(dir_ctx->pos, AFS_DIR_BLOCK_SIZE)); - ret = 0; - } + iterate_folioq(&iter, iov_iter_count(&iter), dvnode, &ctx, + afs_dir_iterate_step); + + if (ctx.error == -ESTALE) + afs_invalidate_dir(dvnode, afs_dir_invalid_iter_stale); + return ctx.error; +} + +/* + * iterate through the data blob that lists the contents of an AFS directory + */ +static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, + struct file *file, afs_dataversion_t *_dir_version) +{ + struct afs_vnode *dvnode = AFS_FS_I(dir); + int retry_limit = 100; + int ret; + + _enter("{%lu},%llx,,", dir->i_ino, ctx->pos); + + do { + if (--retry_limit < 0) { + pr_warn("afs_read_dir(): Too many retries\n"); + ret = -ESTALE; + break; + } + ret = afs_read_dir(dvnode, file); + if (ret < 0) { + if (ret != -ESTALE) + break; + if (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(dir)->flags)) { + ret = -ESTALE; + break; + } + continue; + } + *_dir_version = inode_peek_iversion_raw(dir); + + ret = afs_dir_iterate_contents(dir, ctx); + up_read(&dvnode->validate_lock); + } while (ret == -ESTALE); -out: - up_read(&dvnode->validate_lock); - afs_put_read(req); _leave(" = %d", ret); return ret; } @@ -565,8 +558,7 @@ static int afs_readdir(struct file *file, struct dir_context *ctx) { afs_dataversion_t dir_version; - return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file), - &dir_version); + return afs_dir_iterate(file_inode(file), ctx, file, &dir_version); } /* @@ -606,22 +598,22 @@ static bool afs_lookup_one_filldir(struct dir_context *ctx, const char *name, * Do a lookup of a single name in a directory * - just returns the FID the dentry name maps to if found */ -static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry, - struct afs_fid *fid, struct key *key, +static int afs_do_lookup_one(struct inode *dir, const struct qstr *name, + struct afs_fid *fid, afs_dataversion_t *_dir_version) { struct afs_super_info *as = dir->i_sb->s_fs_info; struct afs_lookup_one_cookie cookie = { .ctx.actor = afs_lookup_one_filldir, - .name = dentry->d_name, + .name = *name, .fid.vid = as->volume->vid }; int ret; - _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); + _enter("{%lu},{%.*s},", dir->i_ino, name->len, name->name); /* search the directory */ - ret = afs_dir_iterate(dir, &cookie.ctx, key, _dir_version); + ret = afs_dir_iterate(dir, &cookie.ctx, NULL, _dir_version); if (ret < 0) { _leave(" = %d [iter]", ret); return ret; @@ -656,19 +648,10 @@ static bool afs_lookup_filldir(struct dir_context *ctx, const char *name, BUILD_BUG_ON(sizeof(union afs_xdr_dir_block) != 2048); BUILD_BUG_ON(sizeof(union afs_xdr_dirent) != 32); - if (cookie->found) { - if (cookie->nr_fids < 50) { - cookie->fids[cookie->nr_fids].vnode = ino; - cookie->fids[cookie->nr_fids].unique = dtype; - cookie->nr_fids++; - } - } else if (cookie->name.len == nlen && - memcmp(cookie->name.name, name, nlen) == 0) { - cookie->fids[1].vnode = ino; - cookie->fids[1].unique = dtype; - cookie->found = 1; - if (cookie->one_only) - return false; + if (cookie->nr_fids < 50) { + cookie->fids[cookie->nr_fids].vnode = ino; + cookie->fids[cookie->nr_fids].unique = dtype; + cookie->nr_fids++; } return cookie->nr_fids < 50; @@ -788,8 +771,7 @@ static bool afs_server_supports_ibulk(struct afs_vnode *dvnode) * files in one go and create inodes for them. The inode of the file we were * asked for is returned. */ -static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, - struct key *key) +static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry) { struct afs_lookup_cookie *cookie; struct afs_vnode_param *vp; @@ -797,6 +779,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode; struct inode *inode = NULL, *ti; afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version); + bool supports_ibulk; long ret; int i; @@ -813,19 +796,19 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, cookie->nr_fids = 2; /* slot 1 is saved for the fid we actually want * and slot 0 for the directory */ - if (!afs_server_supports_ibulk(dvnode)) - cookie->one_only = true; - - /* search the directory */ - ret = afs_dir_iterate(dir, &cookie->ctx, key, &data_version); + /* Search the directory for the named entry using the hash table... */ + ret = afs_dir_search(dvnode, &dentry->d_name, &cookie->fids[1], &data_version); if (ret < 0) goto out; - dentry->d_fsdata = (void *)(unsigned long)data_version; + supports_ibulk = afs_server_supports_ibulk(dvnode); + if (supports_ibulk) { + /* ...then scan linearly from that point for entries to lookup-ahead. */ + cookie->ctx.pos = (ret + 1) * AFS_DIR_DIRENT_SIZE; + afs_dir_iterate(dir, &cookie->ctx, NULL, &data_version); + } - ret = -ENOENT; - if (!cookie->found) - goto out; + dentry->d_fsdata = (void *)(unsigned long)data_version; /* Check to see if we already have an inode for the primary fid. */ inode = ilookup5(dir->i_sb, cookie->fids[1].vnode, @@ -884,7 +867,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, * the whole operation. */ afs_op_set_error(op, -ENOTSUPP); - if (!cookie->one_only) { + if (supports_ibulk) { op->ops = &afs_inline_bulk_status_operation; afs_begin_vnode_operation(op); afs_wait_for_operation(op); @@ -926,8 +909,7 @@ out: /* * Look up an entry in a directory with @sys substitution. */ -static struct dentry *afs_lookup_atsys(struct inode *dir, struct dentry *dentry, - struct key *key) +static struct dentry *afs_lookup_atsys(struct inode *dir, struct dentry *dentry) { struct afs_sysnames *subs; struct afs_net *net = afs_i2net(dir); @@ -975,7 +957,6 @@ out_s: afs_put_sysnames(subs); kfree(buf); out_p: - key_put(key); return ret; } @@ -989,7 +970,6 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, struct afs_fid fid = {}; struct inode *inode; struct dentry *d; - struct key *key; int ret; _enter("{%llx:%llu},%p{%pd},", @@ -1007,15 +987,9 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, return ERR_PTR(-ESTALE); } - key = afs_request_key(dvnode->volume->cell); - if (IS_ERR(key)) { - _leave(" = %ld [key]", PTR_ERR(key)); - return ERR_CAST(key); - } - - ret = afs_validate(dvnode, key); + ret = afs_validate(dvnode, NULL); if (ret < 0) { - key_put(key); + afs_dir_unuse_cookie(dvnode, ret); _leave(" = %d [val]", ret); return ERR_PTR(ret); } @@ -1025,11 +999,10 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, dentry->d_name.name[dentry->d_name.len - 3] == 's' && dentry->d_name.name[dentry->d_name.len - 2] == 'y' && dentry->d_name.name[dentry->d_name.len - 1] == 's') - return afs_lookup_atsys(dir, dentry, key); + return afs_lookup_atsys(dir, dentry); afs_stat_v(dvnode, n_lookup); - inode = afs_do_lookup(dir, dentry, key); - key_put(key); + inode = afs_do_lookup(dir, dentry); if (inode == ERR_PTR(-ENOENT)) inode = afs_try_auto_mntpt(dentry, dir); @@ -1051,21 +1024,12 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, /* * Check the validity of a dentry under RCU conditions. */ -static int afs_d_revalidate_rcu(struct dentry *dentry) +static int afs_d_revalidate_rcu(struct afs_vnode *dvnode, struct dentry *dentry) { - struct afs_vnode *dvnode; - struct dentry *parent; - struct inode *dir; long dir_version, de_version; _enter("%p", dentry); - /* Check the parent directory is still valid first. */ - parent = READ_ONCE(dentry->d_parent); - dir = d_inode_rcu(parent); - if (!dir) - return -ECHILD; - dvnode = AFS_FS_I(dir); if (test_bit(AFS_VNODE_DELETED, &dvnode->flags)) return -ECHILD; @@ -1093,11 +1057,11 @@ static int afs_d_revalidate_rcu(struct dentry *dentry) * - NOTE! the hit can be a negative hit too, so we can't assume we have an * inode */ -static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) +static int afs_d_revalidate(struct inode *parent_dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { - struct afs_vnode *vnode, *dir; + struct afs_vnode *vnode, *dir = AFS_FS_I(parent_dir); struct afs_fid fid; - struct dentry *parent; struct inode *inode; struct key *key; afs_dataversion_t dir_version, invalid_before; @@ -1105,7 +1069,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) int ret; if (flags & LOOKUP_RCU) - return afs_d_revalidate_rcu(dentry); + return afs_d_revalidate_rcu(dir, dentry); if (d_really_is_positive(dentry)) { vnode = AFS_FS_I(d_inode(dentry)); @@ -1120,14 +1084,9 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) if (IS_ERR(key)) key = NULL; - /* Hold the parent dentry so we can peer at it */ - parent = dget_parent(dentry); - dir = AFS_FS_I(d_inode(parent)); - /* validate the parent directory */ ret = afs_validate(dir, key); if (ret == -ERESTARTSYS) { - dput(parent); key_put(key); return ret; } @@ -1155,7 +1114,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) afs_stat_v(dir, n_reval); /* search the directory for this vnode */ - ret = afs_do_lookup_one(&dir->netfs.inode, dentry, &fid, key, &dir_version); + ret = afs_do_lookup_one(&dir->netfs.inode, name, &fid, &dir_version); switch (ret) { case 0: /* the filename maps to something */ @@ -1199,22 +1158,19 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) goto out_valid; default: - _debug("failed to iterate dir %pd: %d", - parent, ret); + _debug("failed to iterate parent %pd2: %d", dentry, ret); goto not_found; } out_valid: dentry->d_fsdata = (void *)(unsigned long)dir_version; out_valid_noupdate: - dput(parent); key_put(key); _leave(" = 1 [valid]"); return 1; not_found: _debug("dropping dentry %pd2", dentry); - dput(parent); key_put(key); _leave(" = 0 [bad]"); @@ -1282,6 +1238,7 @@ void afs_check_for_remote_deletion(struct afs_operation *op) */ static void afs_vnode_new_inode(struct afs_operation *op) { + struct afs_vnode_param *dvp = &op->file[0]; struct afs_vnode_param *vp = &op->file[1]; struct afs_vnode *vnode; struct inode *inode; @@ -1301,6 +1258,10 @@ static void afs_vnode_new_inode(struct afs_operation *op) vnode = AFS_FS_I(inode); set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); + if (S_ISDIR(inode->i_mode)) + afs_mkdir_init_dir(vnode, dvp->vnode); + else if (S_ISLNK(inode->i_mode)) + afs_init_new_symlink(vnode, op); if (!afs_op_error(op)) afs_cache_permit(vnode, op->key, vnode->cb_break, &vp->scb); d_instantiate(op->dentry, inode); @@ -1317,18 +1278,21 @@ static void afs_create_success(struct afs_operation *op) static void afs_create_edit_dir(struct afs_operation *op) { + struct netfs_cache_resources cres = {}; struct afs_vnode_param *dvp = &op->file[0]; struct afs_vnode_param *vp = &op->file[1]; struct afs_vnode *dvnode = dvp->vnode; _enter("op=%08x", op->debug_id); + fscache_begin_write_operation(&cres, afs_vnode_cache(dvnode)); down_write(&dvnode->validate_lock); if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && dvnode->status.data_version == dvp->dv_before + dvp->dv_delta) afs_edit_dir_add(dvnode, &op->dentry->d_name, &vp->fid, op->create.reason); up_write(&dvnode->validate_lock); + fscache_end_operation(&cres); } static void afs_create_put(struct afs_operation *op) @@ -1356,6 +1320,7 @@ static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir, { struct afs_operation *op; struct afs_vnode *dvnode = AFS_FS_I(dir); + int ret; _enter("{%llx:%llu},{%pd},%ho", dvnode->fid.vid, dvnode->fid.vnode, dentry, mode); @@ -1366,6 +1331,8 @@ static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir, return PTR_ERR(op); } + fscache_use_cookie(afs_vnode_cache(dvnode), true); + afs_op_set_vnode(op, 0, dvnode); op->file[0].dv_delta = 1; op->file[0].modification = true; @@ -1375,7 +1342,9 @@ static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir, op->create.reason = afs_edit_dir_for_mkdir; op->mtime = current_time(dir); op->ops = &afs_mkdir_operation; - return afs_do_sync_operation(op); + ret = afs_do_sync_operation(op); + afs_dir_unuse_cookie(dvnode, ret); + return ret; } /* @@ -1388,8 +1357,8 @@ static void afs_dir_remove_subdir(struct dentry *dentry) clear_nlink(&vnode->netfs.inode); set_bit(AFS_VNODE_DELETED, &vnode->flags); - atomic64_set(&vnode->cb_expires_at, AFS_NO_CB_PROMISE); - clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); + afs_clear_cb_promise(vnode, afs_cb_promise_clear_rmdir); + afs_invalidate_dir(vnode, afs_dir_invalid_subdir_removed); } } @@ -1403,18 +1372,21 @@ static void afs_rmdir_success(struct afs_operation *op) static void afs_rmdir_edit_dir(struct afs_operation *op) { + struct netfs_cache_resources cres = {}; struct afs_vnode_param *dvp = &op->file[0]; struct afs_vnode *dvnode = dvp->vnode; _enter("op=%08x", op->debug_id); afs_dir_remove_subdir(op->dentry); + fscache_begin_write_operation(&cres, afs_vnode_cache(dvnode)); down_write(&dvnode->validate_lock); if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && dvnode->status.data_version == dvp->dv_before + dvp->dv_delta) afs_edit_dir_remove(dvnode, &op->dentry->d_name, afs_edit_dir_for_rmdir); up_write(&dvnode->validate_lock); + fscache_end_operation(&cres); } static void afs_rmdir_put(struct afs_operation *op) @@ -1449,6 +1421,8 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry) if (IS_ERR(op)) return PTR_ERR(op); + fscache_use_cookie(afs_vnode_cache(dvnode), true); + afs_op_set_vnode(op, 0, dvnode); op->file[0].dv_delta = 1; op->file[0].modification = true; @@ -1472,10 +1446,18 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry) op->file[1].vnode = vnode; } - return afs_do_sync_operation(op); + ret = afs_do_sync_operation(op); + + /* Not all systems that can host afs servers have ENOTEMPTY. */ + if (ret == -EEXIST) + ret = -ENOTEMPTY; +out: + afs_dir_unuse_cookie(dvnode, ret); + return ret; error: - return afs_put_operation(op); + ret = afs_put_operation(op); + goto out; } /* @@ -1538,16 +1520,19 @@ static void afs_unlink_success(struct afs_operation *op) static void afs_unlink_edit_dir(struct afs_operation *op) { + struct netfs_cache_resources cres = {}; struct afs_vnode_param *dvp = &op->file[0]; struct afs_vnode *dvnode = dvp->vnode; _enter("op=%08x", op->debug_id); + fscache_begin_write_operation(&cres, afs_vnode_cache(dvnode)); down_write(&dvnode->validate_lock); if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) && dvnode->status.data_version == dvp->dv_before + dvp->dv_delta) afs_edit_dir_remove(dvnode, &op->dentry->d_name, afs_edit_dir_for_unlink); up_write(&dvnode->validate_lock); + fscache_end_operation(&cres); } static void afs_unlink_put(struct afs_operation *op) @@ -1586,6 +1571,8 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry) if (IS_ERR(op)) return PTR_ERR(op); + fscache_use_cookie(afs_vnode_cache(dvnode), true); + afs_op_set_vnode(op, 0, dvnode); op->file[0].dv_delta = 1; op->file[0].modification = true; @@ -1632,10 +1619,10 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry) afs_wait_for_operation(op); } - return afs_put_operation(op); - error: - return afs_put_operation(op); + ret = afs_put_operation(op); + afs_dir_unuse_cookie(dvnode, ret); + return ret; } static const struct afs_operation_ops afs_create_operation = { @@ -1669,6 +1656,8 @@ static int afs_create(struct mnt_idmap *idmap, struct inode *dir, goto error; } + fscache_use_cookie(afs_vnode_cache(dvnode), true); + afs_op_set_vnode(op, 0, dvnode); op->file[0].dv_delta = 1; op->file[0].modification = true; @@ -1679,7 +1668,9 @@ static int afs_create(struct mnt_idmap *idmap, struct inode *dir, op->create.reason = afs_edit_dir_for_create; op->mtime = current_time(dir); op->ops = &afs_create_operation; - return afs_do_sync_operation(op); + ret = afs_do_sync_operation(op); + afs_dir_unuse_cookie(dvnode, ret); + return ret; error: d_drop(dentry); @@ -1744,6 +1735,8 @@ static int afs_link(struct dentry *from, struct inode *dir, goto error; } + fscache_use_cookie(afs_vnode_cache(dvnode), true); + ret = afs_validate(vnode, op->key); if (ret < 0) goto error_op; @@ -1759,10 +1752,13 @@ static int afs_link(struct dentry *from, struct inode *dir, op->dentry_2 = from; op->ops = &afs_link_operation; op->create.reason = afs_edit_dir_for_link; - return afs_do_sync_operation(op); + ret = afs_do_sync_operation(op); + afs_dir_unuse_cookie(dvnode, ret); + return ret; error_op: afs_put_operation(op); + afs_dir_unuse_cookie(dvnode, ret); error: d_drop(dentry); _leave(" = %d", ret); @@ -1806,6 +1802,8 @@ static int afs_symlink(struct mnt_idmap *idmap, struct inode *dir, goto error; } + fscache_use_cookie(afs_vnode_cache(dvnode), true); + afs_op_set_vnode(op, 0, dvnode); op->file[0].dv_delta = 1; @@ -1814,7 +1812,9 @@ static int afs_symlink(struct mnt_idmap *idmap, struct inode *dir, op->create.reason = afs_edit_dir_for_symlink; op->create.symlink = content; op->mtime = current_time(dir); - return afs_do_sync_operation(op); + ret = afs_do_sync_operation(op); + afs_dir_unuse_cookie(dvnode, ret); + return ret; error: d_drop(dentry); @@ -1846,6 +1846,7 @@ static void afs_rename_success(struct afs_operation *op) write_seqlock(&vnode->cb_lock); new_dv = vnode->status.data_version + 1; + trace_afs_set_dv(vnode, new_dv); vnode->status.data_version = new_dv; inode_set_iversion_raw(&vnode->netfs.inode, new_dv); @@ -1855,6 +1856,7 @@ static void afs_rename_success(struct afs_operation *op) static void afs_rename_edit_dir(struct afs_operation *op) { + struct netfs_cache_resources orig_cres = {}, new_cres = {}; struct afs_vnode_param *orig_dvp = &op->file[0]; struct afs_vnode_param *new_dvp = &op->file[1]; struct afs_vnode *orig_dvnode = orig_dvp->vnode; @@ -1871,6 +1873,10 @@ static void afs_rename_edit_dir(struct afs_operation *op) op->rename.rehash = NULL; } + fscache_begin_write_operation(&orig_cres, afs_vnode_cache(orig_dvnode)); + if (new_dvnode != orig_dvnode) + fscache_begin_write_operation(&new_cres, afs_vnode_cache(new_dvnode)); + down_write(&orig_dvnode->validate_lock); if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags) && orig_dvnode->status.data_version == orig_dvp->dv_before + orig_dvp->dv_delta) @@ -1920,6 +1926,9 @@ static void afs_rename_edit_dir(struct afs_operation *op) d_move(old_dentry, new_dentry); up_write(&new_dvnode->validate_lock); + fscache_end_operation(&orig_cres); + if (new_dvnode != orig_dvnode) + fscache_end_operation(&new_cres); } static void afs_rename_put(struct afs_operation *op) @@ -1972,6 +1981,10 @@ static int afs_rename(struct mnt_idmap *idmap, struct inode *old_dir, if (IS_ERR(op)) return PTR_ERR(op); + fscache_use_cookie(afs_vnode_cache(orig_dvnode), true); + if (new_dvnode != orig_dvnode) + fscache_use_cookie(afs_vnode_cache(new_dvnode), true); + ret = afs_validate(vnode, op->key); afs_op_set_error(op, ret); if (ret < 0) @@ -2039,47 +2052,43 @@ static int afs_rename(struct mnt_idmap *idmap, struct inode *old_dir, */ d_drop(old_dentry); - return afs_do_sync_operation(op); + ret = afs_do_sync_operation(op); +out: + afs_dir_unuse_cookie(orig_dvnode, ret); + if (new_dvnode != orig_dvnode) + afs_dir_unuse_cookie(new_dvnode, ret); + return ret; error: - return afs_put_operation(op); -} - -/* - * Release a directory folio and clean up its private state if it's not busy - * - return true if the folio can now be released, false if not - */ -static bool afs_dir_release_folio(struct folio *folio, gfp_t gfp_flags) -{ - struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio)); - - _enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, folio->index); - - folio_detach_private(folio); - - /* The directory will need reloading. */ - if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) - afs_stat_v(dvnode, n_relpg); - return true; + ret = afs_put_operation(op); + goto out; } /* - * Invalidate part or all of a folio. + * Write the file contents to the cache as a single blob. */ -static void afs_dir_invalidate_folio(struct folio *folio, size_t offset, - size_t length) +int afs_single_writepages(struct address_space *mapping, + struct writeback_control *wbc) { - struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio)); - - _enter("{%lu},%zu,%zu", folio->index, offset, length); - - BUG_ON(!folio_test_locked(folio)); + struct afs_vnode *dvnode = AFS_FS_I(mapping->host); + struct iov_iter iter; + bool is_dir = (S_ISDIR(dvnode->netfs.inode.i_mode) && + !test_bit(AFS_VNODE_MOUNTPOINT, &dvnode->flags)); + int ret = 0; - /* The directory will need reloading. */ - if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) - afs_stat_v(dvnode, n_inval); + /* Need to lock to prevent the folio queue and folios from being thrown + * away. + */ + down_read(&dvnode->validate_lock); + + if (is_dir ? + test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) : + atomic64_read(&dvnode->cb_expires_at) != AFS_NO_CB_PROMISE) { + iov_iter_folio_queue(&iter, ITER_SOURCE, dvnode->directory, 0, 0, + i_size_read(&dvnode->netfs.inode)); + ret = netfs_writeback_single(mapping, wbc, &iter); + } - /* we clean up only if the entire folio is being invalidated */ - if (offset == 0 && length == folio_size(folio)) - folio_detach_private(folio); + up_read(&dvnode->validate_lock); + return ret; } diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c index fe223fb78111..60a549f1d9c5 100644 --- a/fs/afs/dir_edit.c +++ b/fs/afs/dir_edit.c @@ -10,6 +10,7 @@ #include <linux/namei.h> #include <linux/pagemap.h> #include <linux/iversion.h> +#include <linux/folio_queue.h> #include "internal.h" #include "xdr_fs.h" @@ -105,23 +106,57 @@ static void afs_clear_contig_bits(union afs_xdr_dir_block *block, } /* - * Get a new directory folio. + * Get a specific block, extending the directory storage to cover it as needed. */ -static struct folio *afs_dir_get_folio(struct afs_vnode *vnode, pgoff_t index) +static union afs_xdr_dir_block *afs_dir_get_block(struct afs_dir_iter *iter, size_t block) { - struct address_space *mapping = vnode->netfs.inode.i_mapping; + struct folio_queue *fq; + struct afs_vnode *dvnode = iter->dvnode; struct folio *folio; + size_t blpos = block * AFS_DIR_BLOCK_SIZE; + size_t blend = (block + 1) * AFS_DIR_BLOCK_SIZE, fpos = iter->fpos; + int ret; + + if (dvnode->directory_size < blend) { + size_t cur_size = dvnode->directory_size; + + ret = netfs_alloc_folioq_buffer( + NULL, &dvnode->directory, &cur_size, blend, + mapping_gfp_mask(dvnode->netfs.inode.i_mapping)); + dvnode->directory_size = cur_size; + if (ret < 0) + goto fail; + } - folio = __filemap_get_folio(mapping, index, - FGP_LOCK | FGP_ACCESSED | FGP_CREAT, - mapping->gfp_mask); - if (IS_ERR(folio)) { - clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); - return NULL; + fq = iter->fq; + if (!fq) + fq = dvnode->directory; + + /* Search the folio queue for the folio containing the block... */ + for (; fq; fq = fq->next) { + for (int s = iter->fq_slot; s < folioq_count(fq); s++) { + size_t fsize = folioq_folio_size(fq, s); + + if (blend <= fpos + fsize) { + /* ... and then return the mapped block. */ + folio = folioq_folio(fq, s); + if (WARN_ON_ONCE(folio_pos(folio) != fpos)) + goto fail; + iter->fq = fq; + iter->fq_slot = s; + iter->fpos = fpos; + return kmap_local_folio(folio, blpos - fpos); + } + fpos += fsize; + } + iter->fq_slot = 0; } - if (!folio_test_private(folio)) - folio_attach_private(folio, (void *)1); - return folio; + +fail: + iter->fq = NULL; + iter->fq_slot = 0; + afs_invalidate_dir(dvnode, afs_dir_invalid_edit_get_block); + return NULL; } /* @@ -209,9 +244,8 @@ void afs_edit_dir_add(struct afs_vnode *vnode, { union afs_xdr_dir_block *meta, *block; union afs_xdr_dirent *de; - struct folio *folio0, *folio; - unsigned int need_slots, nr_blocks, b; - pgoff_t index; + struct afs_dir_iter iter = { .dvnode = vnode }; + unsigned int nr_blocks, b, entry; loff_t i_size; int slot; @@ -220,20 +254,17 @@ void afs_edit_dir_add(struct afs_vnode *vnode, i_size = i_size_read(&vnode->netfs.inode); if (i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS || (i_size & (AFS_DIR_BLOCK_SIZE - 1))) { - clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); + afs_invalidate_dir(vnode, afs_dir_invalid_edit_add_bad_size); return; } - folio0 = afs_dir_get_folio(vnode, 0); - if (!folio0) { - _leave(" [fgp]"); + meta = afs_dir_get_block(&iter, 0); + if (!meta) return; - } /* Work out how many slots we're going to need. */ - need_slots = afs_dir_calc_slots(name->len); + iter.nr_slots = afs_dir_calc_slots(name->len); - meta = kmap_local_folio(folio0, 0); if (i_size == 0) goto new_directory; nr_blocks = i_size / AFS_DIR_BLOCK_SIZE; @@ -245,22 +276,21 @@ void afs_edit_dir_add(struct afs_vnode *vnode, /* If the directory extended into a new folio, then we need to * tack a new folio on the end. */ - index = b / AFS_DIR_BLOCKS_PER_PAGE; if (nr_blocks >= AFS_DIR_MAX_BLOCKS) - goto error; - if (index >= folio_nr_pages(folio0)) { - folio = afs_dir_get_folio(vnode, index); - if (!folio) - goto error; - } else { - folio = folio0; - } + goto error_too_many_blocks; - block = kmap_local_folio(folio, b * AFS_DIR_BLOCK_SIZE - folio_pos(folio)); + /* Lower dir blocks have a counter in the header we can check. */ + if (b < AFS_DIR_BLOCKS_WITH_CTR && + meta->meta.alloc_ctrs[b] < iter.nr_slots) + continue; + + block = afs_dir_get_block(&iter, b); + if (!block) + goto error; /* Abandon the edit if we got a callback break. */ if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags)) - goto invalidated; + goto already_invalidated; _debug("block %u: %2u %3u %u", b, @@ -275,31 +305,23 @@ void afs_edit_dir_add(struct afs_vnode *vnode, afs_set_i_size(vnode, (b + 1) * AFS_DIR_BLOCK_SIZE); } - /* Only lower dir blocks have a counter in the header. */ - if (b >= AFS_DIR_BLOCKS_WITH_CTR || - meta->meta.alloc_ctrs[b] >= need_slots) { - /* We need to try and find one or more consecutive - * slots to hold the entry. - */ - slot = afs_find_contig_bits(block, need_slots); - if (slot >= 0) { - _debug("slot %u", slot); - goto found_space; - } + /* We need to try and find one or more consecutive slots to + * hold the entry. + */ + slot = afs_find_contig_bits(block, iter.nr_slots); + if (slot >= 0) { + _debug("slot %u", slot); + goto found_space; } kunmap_local(block); - if (folio != folio0) { - folio_unlock(folio); - folio_put(folio); - } } /* There are no spare slots of sufficient size, yet the operation * succeeded. Download the directory again. */ trace_afs_edit_dir(vnode, why, afs_edit_dir_create_nospc, 0, 0, 0, 0, name->name); - clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); + afs_invalidate_dir(vnode, afs_dir_invalid_edit_add_no_slots); goto out_unmap; new_directory: @@ -307,8 +329,7 @@ new_directory: i_size = AFS_DIR_BLOCK_SIZE; afs_set_i_size(vnode, i_size); slot = AFS_DIR_RESV_BLOCKS0; - folio = folio0; - block = kmap_local_folio(folio, 0); + block = afs_dir_get_block(&iter, 0); nr_blocks = 1; b = 0; @@ -326,41 +347,39 @@ found_space: de->u.name[name->len] = 0; /* Adjust the bitmap. */ - afs_set_contig_bits(block, slot, need_slots); - kunmap_local(block); - if (folio != folio0) { - folio_unlock(folio); - folio_put(folio); - } + afs_set_contig_bits(block, slot, iter.nr_slots); /* Adjust the allocation counter. */ if (b < AFS_DIR_BLOCKS_WITH_CTR) - meta->meta.alloc_ctrs[b] -= need_slots; + meta->meta.alloc_ctrs[b] -= iter.nr_slots; + + /* Adjust the hash chain. */ + entry = b * AFS_DIR_SLOTS_PER_BLOCK + slot; + iter.bucket = afs_dir_hash_name(name); + de->u.hash_next = meta->meta.hashtable[iter.bucket]; + meta->meta.hashtable[iter.bucket] = htons(entry); + kunmap_local(block); inode_inc_iversion_raw(&vnode->netfs.inode); afs_stat_v(vnode, n_dir_cr); _debug("Insert %s in %u[%u]", name->name, b, slot); + netfs_single_mark_inode_dirty(&vnode->netfs.inode); + out_unmap: kunmap_local(meta); - folio_unlock(folio0); - folio_put(folio0); _leave(""); return; -invalidated: +already_invalidated: trace_afs_edit_dir(vnode, why, afs_edit_dir_create_inval, 0, 0, 0, 0, name->name); - clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); kunmap_local(block); - if (folio != folio0) { - folio_unlock(folio); - folio_put(folio); - } goto out_unmap; +error_too_many_blocks: + afs_invalidate_dir(vnode, afs_dir_invalid_edit_add_too_many_blocks); error: trace_afs_edit_dir(vnode, why, afs_edit_dir_create_error, 0, 0, 0, 0, name->name); - clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); goto out_unmap; } @@ -374,13 +393,14 @@ error: void afs_edit_dir_remove(struct afs_vnode *vnode, struct qstr *name, enum afs_edit_dir_reason why) { - union afs_xdr_dir_block *meta, *block; - union afs_xdr_dirent *de; - struct folio *folio0, *folio; - unsigned int need_slots, nr_blocks, b; - pgoff_t index; + union afs_xdr_dir_block *meta, *block, *pblock; + union afs_xdr_dirent *de, *pde; + struct afs_dir_iter iter = { .dvnode = vnode }; + struct afs_fid fid; + unsigned int b, slot, entry; loff_t i_size; - int slot; + __be16 next; + int found; _enter(",,{%d,%s},", name->len, name->name); @@ -388,81 +408,95 @@ void afs_edit_dir_remove(struct afs_vnode *vnode, if (i_size < AFS_DIR_BLOCK_SIZE || i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS || (i_size & (AFS_DIR_BLOCK_SIZE - 1))) { - clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); + afs_invalidate_dir(vnode, afs_dir_invalid_edit_rem_bad_size); return; } - nr_blocks = i_size / AFS_DIR_BLOCK_SIZE; - folio0 = afs_dir_get_folio(vnode, 0); - if (!folio0) { - _leave(" [fgp]"); + if (!afs_dir_init_iter(&iter, name)) return; - } - - /* Work out how many slots we're going to discard. */ - need_slots = afs_dir_calc_slots(name->len); - - meta = kmap_local_folio(folio0, 0); - - /* Find a block that has sufficient slots available. Each folio - * contains two or more directory blocks. - */ - for (b = 0; b < nr_blocks; b++) { - index = b / AFS_DIR_BLOCKS_PER_PAGE; - if (index >= folio_nr_pages(folio0)) { - folio = afs_dir_get_folio(vnode, index); - if (!folio) - goto error; - } else { - folio = folio0; - } - block = kmap_local_folio(folio, b * AFS_DIR_BLOCK_SIZE - folio_pos(folio)); - - /* Abandon the edit if we got a callback break. */ - if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags)) - goto invalidated; - - if (b > AFS_DIR_BLOCKS_WITH_CTR || - meta->meta.alloc_ctrs[b] <= AFS_DIR_SLOTS_PER_BLOCK - 1 - need_slots) { - slot = afs_dir_scan_block(block, name, b); - if (slot >= 0) - goto found_dirent; - } + meta = afs_dir_find_block(&iter, 0); + if (!meta) + return; - kunmap_local(block); - if (folio != folio0) { - folio_unlock(folio); - folio_put(folio); - } + /* Find the entry in the blob. */ + found = afs_dir_search_bucket(&iter, name, &fid); + if (found < 0) { + /* Didn't find the dirent to clobber. Re-download. */ + trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_noent, + 0, 0, 0, 0, name->name); + afs_invalidate_dir(vnode, afs_dir_invalid_edit_rem_wrong_name); + goto out_unmap; } - /* Didn't find the dirent to clobber. Download the directory again. */ - trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_noent, - 0, 0, 0, 0, name->name); - clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); - goto out_unmap; + entry = found; + b = entry / AFS_DIR_SLOTS_PER_BLOCK; + slot = entry % AFS_DIR_SLOTS_PER_BLOCK; -found_dirent: + block = afs_dir_find_block(&iter, b); + if (!block) + goto error; + if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags)) + goto already_invalidated; + + /* Check and clear the entry. */ de = &block->dirents[slot]; + if (de->u.valid != 1) + goto error_unmap; trace_afs_edit_dir(vnode, why, afs_edit_dir_delete, b, slot, ntohl(de->u.vnode), ntohl(de->u.unique), name->name); - memset(de, 0, sizeof(*de) * need_slots); - /* Adjust the bitmap. */ - afs_clear_contig_bits(block, slot, need_slots); - kunmap_local(block); - if (folio != folio0) { - folio_unlock(folio); - folio_put(folio); - } + afs_clear_contig_bits(block, slot, iter.nr_slots); /* Adjust the allocation counter. */ if (b < AFS_DIR_BLOCKS_WITH_CTR) - meta->meta.alloc_ctrs[b] += need_slots; + meta->meta.alloc_ctrs[b] += iter.nr_slots; + + /* Clear the constituent entries. */ + next = de->u.hash_next; + memset(de, 0, sizeof(*de) * iter.nr_slots); + kunmap_local(block); + + /* Adjust the hash chain: if iter->prev_entry is 0, the hashtable head + * index is previous; otherwise it's slot number of the previous entry. + */ + if (!iter.prev_entry) { + __be16 prev_next = meta->meta.hashtable[iter.bucket]; + + if (unlikely(prev_next != htons(entry))) { + pr_warn("%llx:%llx:%x: not head of chain b=%x p=%x,%x e=%x %*s", + vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique, + iter.bucket, iter.prev_entry, prev_next, entry, + name->len, name->name); + goto error; + } + meta->meta.hashtable[iter.bucket] = next; + } else { + unsigned int pb = iter.prev_entry / AFS_DIR_SLOTS_PER_BLOCK; + unsigned int ps = iter.prev_entry % AFS_DIR_SLOTS_PER_BLOCK; + __be16 prev_next; + + pblock = afs_dir_find_block(&iter, pb); + if (!pblock) + goto error; + pde = &pblock->dirents[ps]; + prev_next = pde->u.hash_next; + if (prev_next != htons(entry)) { + kunmap_local(pblock); + pr_warn("%llx:%llx:%x: not prev in chain b=%x p=%x,%x e=%x %*s", + vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique, + iter.bucket, iter.prev_entry, prev_next, entry, + name->len, name->name); + goto error; + } + pde->u.hash_next = next; + kunmap_local(pblock); + } + + netfs_single_mark_inode_dirty(&vnode->netfs.inode); inode_set_iversion_raw(&vnode->netfs.inode, vnode->status.data_version); afs_stat_v(vnode, n_dir_rm); @@ -470,26 +504,20 @@ found_dirent: out_unmap: kunmap_local(meta); - folio_unlock(folio0); - folio_put(folio0); _leave(""); return; -invalidated: +already_invalidated: + kunmap_local(block); trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_inval, 0, 0, 0, 0, name->name); - clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); - kunmap_local(block); - if (folio != folio0) { - folio_unlock(folio); - folio_put(folio); - } goto out_unmap; +error_unmap: + kunmap_local(block); error: trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_error, 0, 0, 0, 0, name->name); - clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); goto out_unmap; } @@ -502,9 +530,8 @@ void afs_edit_dir_update_dotdot(struct afs_vnode *vnode, struct afs_vnode *new_d { union afs_xdr_dir_block *block; union afs_xdr_dirent *de; - struct folio *folio; + struct afs_dir_iter iter = { .dvnode = vnode }; unsigned int nr_blocks, b; - pgoff_t index; loff_t i_size; int slot; @@ -512,39 +539,35 @@ void afs_edit_dir_update_dotdot(struct afs_vnode *vnode, struct afs_vnode *new_d i_size = i_size_read(&vnode->netfs.inode); if (i_size < AFS_DIR_BLOCK_SIZE) { - clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); + afs_invalidate_dir(vnode, afs_dir_invalid_edit_upd_bad_size); return; } + nr_blocks = i_size / AFS_DIR_BLOCK_SIZE; /* Find a block that has sufficient slots available. Each folio * contains two or more directory blocks. */ for (b = 0; b < nr_blocks; b++) { - index = b / AFS_DIR_BLOCKS_PER_PAGE; - folio = afs_dir_get_folio(vnode, index); - if (!folio) + block = afs_dir_get_block(&iter, b); + if (!block) goto error; - block = kmap_local_folio(folio, b * AFS_DIR_BLOCK_SIZE - folio_pos(folio)); - /* Abandon the edit if we got a callback break. */ if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags)) - goto invalidated; + goto already_invalidated; slot = afs_dir_scan_block(block, &dotdot_name, b); if (slot >= 0) goto found_dirent; kunmap_local(block); - folio_unlock(folio); - folio_put(folio); } /* Didn't find the dirent to clobber. Download the directory again. */ trace_afs_edit_dir(vnode, why, afs_edit_dir_update_nodd, 0, 0, 0, 0, ".."); - clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); + afs_invalidate_dir(vnode, afs_dir_invalid_edit_upd_no_dd); goto out; found_dirent: @@ -556,26 +579,70 @@ found_dirent: ntohl(de->u.vnode), ntohl(de->u.unique), ".."); kunmap_local(block); - folio_unlock(folio); - folio_put(folio); + netfs_single_mark_inode_dirty(&vnode->netfs.inode); inode_set_iversion_raw(&vnode->netfs.inode, vnode->status.data_version); out: _leave(""); return; -invalidated: +already_invalidated: kunmap_local(block); - folio_unlock(folio); - folio_put(folio); trace_afs_edit_dir(vnode, why, afs_edit_dir_update_inval, 0, 0, 0, 0, ".."); - clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); goto out; error: trace_afs_edit_dir(vnode, why, afs_edit_dir_update_error, 0, 0, 0, 0, ".."); - clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); goto out; } + +/* + * Initialise a new directory. We need to fill in the "." and ".." entries. + */ +void afs_mkdir_init_dir(struct afs_vnode *dvnode, struct afs_vnode *parent_dvnode) +{ + union afs_xdr_dir_block *meta; + struct afs_dir_iter iter = { .dvnode = dvnode }; + union afs_xdr_dirent *de; + unsigned int slot = AFS_DIR_RESV_BLOCKS0; + loff_t i_size; + + i_size = i_size_read(&dvnode->netfs.inode); + if (i_size != AFS_DIR_BLOCK_SIZE) { + afs_invalidate_dir(dvnode, afs_dir_invalid_edit_add_bad_size); + return; + } + + meta = afs_dir_get_block(&iter, 0); + if (!meta) + return; + + afs_edit_init_block(meta, meta, 0); + + de = &meta->dirents[slot]; + de->u.valid = 1; + de->u.vnode = htonl(dvnode->fid.vnode); + de->u.unique = htonl(dvnode->fid.unique); + memcpy(de->u.name, ".", 2); + trace_afs_edit_dir(dvnode, afs_edit_dir_for_mkdir, afs_edit_dir_mkdir, 0, slot, + dvnode->fid.vnode, dvnode->fid.unique, "."); + slot++; + + de = &meta->dirents[slot]; + de->u.valid = 1; + de->u.vnode = htonl(parent_dvnode->fid.vnode); + de->u.unique = htonl(parent_dvnode->fid.unique); + memcpy(de->u.name, "..", 3); + trace_afs_edit_dir(dvnode, afs_edit_dir_for_mkdir, afs_edit_dir_mkdir, 0, slot, + parent_dvnode->fid.vnode, parent_dvnode->fid.unique, ".."); + + afs_set_contig_bits(meta, AFS_DIR_RESV_BLOCKS0, 2); + meta->meta.alloc_ctrs[0] -= 2; + kunmap_local(meta); + + netfs_single_mark_inode_dirty(&dvnode->netfs.inode); + set_bit(AFS_VNODE_DIR_VALID, &dvnode->flags); + set_bit(AFS_VNODE_DIR_READ, &dvnode->flags); +} diff --git a/fs/afs/dir_search.c b/fs/afs/dir_search.c new file mode 100644 index 000000000000..b25bd892db4d --- /dev/null +++ b/fs/afs/dir_search.c @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Search a directory's hash table. + * + * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * https://tools.ietf.org/html/draft-keiser-afs3-directory-object-00 + */ + +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/namei.h> +#include <linux/iversion.h> +#include "internal.h" +#include "afs_fs.h" +#include "xdr_fs.h" + +/* + * Calculate the name hash. + */ +unsigned int afs_dir_hash_name(const struct qstr *name) +{ + const unsigned char *p = name->name; + unsigned int hash = 0, i; + int bucket; + + for (i = 0; i < name->len; i++) + hash = (hash * 173) + p[i]; + bucket = hash & (AFS_DIR_HASHTBL_SIZE - 1); + if (hash > INT_MAX) { + bucket = AFS_DIR_HASHTBL_SIZE - bucket; + bucket &= (AFS_DIR_HASHTBL_SIZE - 1); + } + return bucket; +} + +/* + * Reset a directory iterator. + */ +static bool afs_dir_reset_iter(struct afs_dir_iter *iter) +{ + unsigned long long i_size = i_size_read(&iter->dvnode->netfs.inode); + unsigned int nblocks; + + /* Work out the maximum number of steps we can take. */ + nblocks = umin(i_size / AFS_DIR_BLOCK_SIZE, AFS_DIR_MAX_BLOCKS); + if (!nblocks) + return false; + iter->loop_check = nblocks * (AFS_DIR_SLOTS_PER_BLOCK - AFS_DIR_RESV_BLOCKS); + iter->prev_entry = 0; /* Hash head is previous */ + return true; +} + +/* + * Initialise a directory iterator for looking up a name. + */ +bool afs_dir_init_iter(struct afs_dir_iter *iter, const struct qstr *name) +{ + iter->nr_slots = afs_dir_calc_slots(name->len); + iter->bucket = afs_dir_hash_name(name); + return afs_dir_reset_iter(iter); +} + +/* + * Get a specific block. + */ +union afs_xdr_dir_block *afs_dir_find_block(struct afs_dir_iter *iter, size_t block) +{ + struct folio_queue *fq = iter->fq; + struct afs_vnode *dvnode = iter->dvnode; + struct folio *folio; + size_t blpos = block * AFS_DIR_BLOCK_SIZE; + size_t blend = (block + 1) * AFS_DIR_BLOCK_SIZE, fpos = iter->fpos; + int slot = iter->fq_slot; + + _enter("%zx,%d", block, slot); + + if (iter->block) { + kunmap_local(iter->block); + iter->block = NULL; + } + + if (dvnode->directory_size < blend) + goto fail; + + if (!fq || blpos < fpos) { + fq = dvnode->directory; + slot = 0; + fpos = 0; + } + + /* Search the folio queue for the folio containing the block... */ + for (; fq; fq = fq->next) { + for (; slot < folioq_count(fq); slot++) { + size_t fsize = folioq_folio_size(fq, slot); + + if (blend <= fpos + fsize) { + /* ... and then return the mapped block. */ + folio = folioq_folio(fq, slot); + if (WARN_ON_ONCE(folio_pos(folio) != fpos)) + goto fail; + iter->fq = fq; + iter->fq_slot = slot; + iter->fpos = fpos; + iter->block = kmap_local_folio(folio, blpos - fpos); + return iter->block; + } + fpos += fsize; + } + slot = 0; + } + +fail: + iter->fq = NULL; + iter->fq_slot = 0; + afs_invalidate_dir(dvnode, afs_dir_invalid_edit_get_block); + return NULL; +} + +/* + * Search through a directory bucket. + */ +int afs_dir_search_bucket(struct afs_dir_iter *iter, const struct qstr *name, + struct afs_fid *_fid) +{ + const union afs_xdr_dir_block *meta; + unsigned int entry; + int ret = -ESTALE; + + meta = afs_dir_find_block(iter, 0); + if (!meta) + return -ESTALE; + + entry = ntohs(meta->meta.hashtable[iter->bucket & (AFS_DIR_HASHTBL_SIZE - 1)]); + _enter("%x,%x", iter->bucket, entry); + + while (entry) { + const union afs_xdr_dir_block *block; + const union afs_xdr_dirent *dire; + unsigned int blnum = entry / AFS_DIR_SLOTS_PER_BLOCK; + unsigned int slot = entry % AFS_DIR_SLOTS_PER_BLOCK; + unsigned int resv = (blnum == 0 ? AFS_DIR_RESV_BLOCKS0 : AFS_DIR_RESV_BLOCKS); + + _debug("search %x", entry); + + if (slot < resv) { + kdebug("slot out of range h=%x rs=%2x sl=%2x-%2x", + iter->bucket, resv, slot, slot + iter->nr_slots - 1); + goto bad; + } + + block = afs_dir_find_block(iter, blnum); + if (!block) + goto bad; + dire = &block->dirents[slot]; + + if (slot + iter->nr_slots <= AFS_DIR_SLOTS_PER_BLOCK && + memcmp(dire->u.name, name->name, name->len) == 0 && + dire->u.name[name->len] == '\0') { + _fid->vnode = ntohl(dire->u.vnode); + _fid->unique = ntohl(dire->u.unique); + ret = entry; + goto found; + } + + iter->prev_entry = entry; + entry = ntohs(dire->u.hash_next); + if (!--iter->loop_check) { + kdebug("dir chain loop h=%x", iter->bucket); + goto bad; + } + } + + ret = -ENOENT; +found: + if (iter->block) { + kunmap_local(iter->block); + iter->block = NULL; + } + +bad: + if (ret == -ESTALE) + afs_invalidate_dir(iter->dvnode, afs_dir_invalid_iter_stale); + _leave(" = %d", ret); + return ret; +} + +/* + * Search the appropriate hash chain in the contents of an AFS directory. + */ +int afs_dir_search(struct afs_vnode *dvnode, struct qstr *name, + struct afs_fid *_fid, afs_dataversion_t *_dir_version) +{ + struct afs_dir_iter iter = { .dvnode = dvnode, }; + int ret, retry_limit = 3; + + _enter("{%lu},,,", dvnode->netfs.inode.i_ino); + + if (!afs_dir_init_iter(&iter, name)) + return -ENOENT; + do { + if (--retry_limit < 0) { + pr_warn("afs_read_dir(): Too many retries\n"); + ret = -ESTALE; + break; + } + ret = afs_read_dir(dvnode, NULL); + if (ret < 0) { + if (ret != -ESTALE) + break; + if (test_bit(AFS_VNODE_DELETED, &dvnode->flags)) { + ret = -ESTALE; + break; + } + continue; + } + *_dir_version = inode_peek_iversion_raw(&dvnode->netfs.inode); + + ret = afs_dir_search_bucket(&iter, name, _fid); + up_read(&dvnode->validate_lock); + if (ret == -ESTALE) + afs_dir_reset_iter(&iter); + } while (ret == -ESTALE); + + _leave(" = %d", ret); + return ret; +} diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c index c4d2711e20ad..d8bf52f77d93 100644 --- a/fs/afs/dynroot.c +++ b/fs/afs/dynroot.c @@ -186,50 +186,6 @@ out: } /* - * Look up @cell in a dynroot directory. This is a substitution for the - * local cell name for the net namespace. - */ -static struct dentry *afs_lookup_atcell(struct dentry *dentry) -{ - struct afs_cell *cell; - struct afs_net *net = afs_d2net(dentry); - struct dentry *ret; - char *name; - int len; - - if (!net->ws_cell) - return ERR_PTR(-ENOENT); - - ret = ERR_PTR(-ENOMEM); - name = kmalloc(AFS_MAXCELLNAME + 1, GFP_KERNEL); - if (!name) - goto out_p; - - down_read(&net->cells_lock); - cell = net->ws_cell; - if (cell) { - len = cell->name_len; - memcpy(name, cell->name, len + 1); - } - up_read(&net->cells_lock); - - ret = ERR_PTR(-ENOENT); - if (!cell) - goto out_n; - - ret = lookup_one_len(name, dentry->d_parent, len); - - /* We don't want to d_add() the @cell dentry here as we don't want to - * the cached dentry to hide changes to the local cell name. - */ - -out_n: - kfree(name); -out_p: - return ret; -} - -/* * Look up an entry in a dynroot directory. */ static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentry, @@ -247,10 +203,6 @@ static struct dentry *afs_dynroot_lookup(struct inode *dir, struct dentry *dentr return ERR_PTR(-ENAMETOOLONG); } - if (dentry->d_name.len == 5 && - memcmp(dentry->d_name.name, "@cell", 5) == 0) - return afs_lookup_atcell(dentry); - return d_splice_alias(afs_try_auto_mntpt(dentry, dir), dentry); } @@ -271,7 +223,8 @@ const struct dentry_operations afs_dynroot_dentry_operations = { int afs_dynroot_mkdir(struct afs_net *net, struct afs_cell *cell) { struct super_block *sb = net->dynroot_sb; - struct dentry *root, *subdir; + struct dentry *root, *subdir, *dsubdir; + char *dotname = cell->name - 1; int ret; if (!sb || atomic_read(&sb->s_active) == 0) @@ -286,14 +239,43 @@ int afs_dynroot_mkdir(struct afs_net *net, struct afs_cell *cell) goto unlock; } - /* Note that we're retaining an extra ref on the dentry */ + dsubdir = lookup_one_len(dotname, root, cell->name_len + 1); + if (IS_ERR(dsubdir)) { + ret = PTR_ERR(dsubdir); + dput(subdir); + goto unlock; + } + + /* Note that we're retaining extra refs on the dentries. */ subdir->d_fsdata = (void *)1UL; + dsubdir->d_fsdata = (void *)1UL; ret = 0; unlock: inode_unlock(root->d_inode); return ret; } +static void afs_dynroot_rm_one_dir(struct dentry *root, const char *name, size_t name_len) +{ + struct dentry *subdir; + + /* Don't want to trigger a lookup call, which will re-add the cell */ + subdir = try_lookup_one_len(name, root, name_len); + if (IS_ERR_OR_NULL(subdir)) { + _debug("lookup %ld", PTR_ERR(subdir)); + return; + } + + _debug("rmdir %pd %u", subdir, d_count(subdir)); + + if (subdir->d_fsdata) { + _debug("unpin %u", d_count(subdir)); + subdir->d_fsdata = NULL; + dput(subdir); + } + dput(subdir); +} + /* * Remove a manually added cell mount directory. * - The caller must hold net->proc_cells_lock @@ -301,32 +283,141 @@ unlock: void afs_dynroot_rmdir(struct afs_net *net, struct afs_cell *cell) { struct super_block *sb = net->dynroot_sb; - struct dentry *root, *subdir; + char *dotname = cell->name - 1; if (!sb || atomic_read(&sb->s_active) == 0) return; - root = sb->s_root; - inode_lock(root->d_inode); + inode_lock(sb->s_root->d_inode); + afs_dynroot_rm_one_dir(sb->s_root, cell->name, cell->name_len); + afs_dynroot_rm_one_dir(sb->s_root, dotname, cell->name_len + 1); + inode_unlock(sb->s_root->d_inode); + _leave(""); +} - /* Don't want to trigger a lookup call, which will re-add the cell */ - subdir = try_lookup_one_len(cell->name, root, cell->name_len); - if (IS_ERR_OR_NULL(subdir)) { - _debug("lookup %ld", PTR_ERR(subdir)); - goto no_dentry; +static void afs_atcell_delayed_put_cell(void *arg) +{ + struct afs_cell *cell = arg; + + afs_put_cell(cell, afs_cell_trace_put_atcell); +} + +/* + * Read @cell or .@cell symlinks. + */ +static const char *afs_atcell_get_link(struct dentry *dentry, struct inode *inode, + struct delayed_call *done) +{ + struct afs_vnode *vnode = AFS_FS_I(inode); + struct afs_cell *cell; + struct afs_net *net = afs_i2net(inode); + const char *name; + bool dotted = vnode->fid.vnode == 3; + + if (!net->ws_cell) + return ERR_PTR(-ENOENT); + + down_read(&net->cells_lock); + + cell = net->ws_cell; + if (dotted) + name = cell->name - 1; + else + name = cell->name; + afs_get_cell(cell, afs_cell_trace_get_atcell); + set_delayed_call(done, afs_atcell_delayed_put_cell, cell); + + up_read(&net->cells_lock); + return name; +} + +static const struct inode_operations afs_atcell_inode_operations = { + .get_link = afs_atcell_get_link, +}; + +/* + * Look up @cell or .@cell in a dynroot directory. This is a substitution for + * the local cell name for the net namespace. + */ +static struct dentry *afs_dynroot_create_symlink(struct dentry *root, const char *name) +{ + struct afs_vnode *vnode; + struct afs_fid fid = { .vnode = 2, .unique = 1, }; + struct dentry *dentry; + struct inode *inode; + + if (name[0] == '.') + fid.vnode = 3; + + dentry = d_alloc_name(root, name); + if (!dentry) + return ERR_PTR(-ENOMEM); + + inode = iget5_locked(dentry->d_sb, fid.vnode, + afs_iget5_pseudo_test, afs_iget5_pseudo_set, &fid); + if (!inode) { + dput(dentry); + return ERR_PTR(-ENOMEM); } - _debug("rmdir %pd %u", subdir, d_count(subdir)); + vnode = AFS_FS_I(inode); - if (subdir->d_fsdata) { - _debug("unpin %u", d_count(subdir)); - subdir->d_fsdata = NULL; - dput(subdir); + /* there shouldn't be an existing inode */ + if (WARN_ON_ONCE(!(inode->i_state & I_NEW))) { + iput(inode); + dput(dentry); + return ERR_PTR(-EIO); } - dput(subdir); -no_dentry: + + netfs_inode_init(&vnode->netfs, NULL, false); + simple_inode_init_ts(inode); + set_nlink(inode, 1); + inode->i_size = 0; + inode->i_mode = S_IFLNK | 0555; + inode->i_op = &afs_atcell_inode_operations; + inode->i_uid = GLOBAL_ROOT_UID; + inode->i_gid = GLOBAL_ROOT_GID; + inode->i_blocks = 0; + inode->i_generation = 0; + inode->i_flags |= S_NOATIME; + + unlock_new_inode(inode); + d_splice_alias(inode, dentry); + return dentry; +} + +/* + * Create @cell and .@cell symlinks. + */ +static int afs_dynroot_symlink(struct afs_net *net) +{ + struct super_block *sb = net->dynroot_sb; + struct dentry *root, *symlink, *dsymlink; + int ret; + + /* Let the ->lookup op do the creation */ + root = sb->s_root; + inode_lock(root->d_inode); + symlink = afs_dynroot_create_symlink(root, "@cell"); + if (IS_ERR(symlink)) { + ret = PTR_ERR(symlink); + goto unlock; + } + + dsymlink = afs_dynroot_create_symlink(root, ".@cell"); + if (IS_ERR(dsymlink)) { + ret = PTR_ERR(dsymlink); + dput(symlink); + goto unlock; + } + + /* Note that we're retaining extra refs on the dentries. */ + symlink->d_fsdata = (void *)1UL; + dsymlink->d_fsdata = (void *)1UL; + ret = 0; +unlock: inode_unlock(root->d_inode); - _leave(""); + return ret; } /* @@ -341,6 +432,10 @@ int afs_dynroot_populate(struct super_block *sb) mutex_lock(&net->proc_cells_lock); net->dynroot_sb = sb; + ret = afs_dynroot_symlink(net); + if (ret < 0) + goto error; + hlist_for_each_entry(cell, &net->proc_cells, proc_link) { ret = afs_dynroot_mkdir(net, cell); if (ret < 0) diff --git a/fs/afs/file.c b/fs/afs/file.c index 6762eff97517..fc15497608c6 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -20,7 +20,6 @@ #include "internal.h" static int afs_file_mmap(struct file *file, struct vm_area_struct *vma); -static int afs_symlink_read_folio(struct file *file, struct folio *folio); static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter); static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos, @@ -61,13 +60,6 @@ const struct address_space_operations afs_file_aops = { .writepages = afs_writepages, }; -const struct address_space_operations afs_symlink_aops = { - .read_folio = afs_symlink_read_folio, - .release_folio = netfs_release_folio, - .invalidate_folio = netfs_invalidate_folio, - .migrate_folio = filemap_migrate_folio, -}; - static const struct vm_operations_struct afs_vm_ops = { .open = afs_vm_open, .close = afs_vm_close, @@ -208,49 +200,12 @@ int afs_release(struct inode *inode, struct file *file) return ret; } -/* - * Allocate a new read record. - */ -struct afs_read *afs_alloc_read(gfp_t gfp) -{ - struct afs_read *req; - - req = kzalloc(sizeof(struct afs_read), gfp); - if (req) - refcount_set(&req->usage, 1); - - return req; -} - -/* - * Dispose of a ref to a read record. - */ -void afs_put_read(struct afs_read *req) -{ - if (refcount_dec_and_test(&req->usage)) { - if (req->cleanup) - req->cleanup(req); - key_put(req->key); - kfree(req); - } -} - static void afs_fetch_data_notify(struct afs_operation *op) { - struct afs_read *req = op->fetch.req; - struct netfs_io_subrequest *subreq = req->subreq; - int error = afs_op_error(op); - - req->error = error; - if (subreq) { - subreq->rreq->i_size = req->file_size; - if (req->pos + req->actual_len >= req->file_size) - __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags); - netfs_read_subreq_terminated(subreq, error, false); - req->subreq = NULL; - } else if (req->done) { - req->done(req); - } + struct netfs_io_subrequest *subreq = op->fetch.subreq; + + subreq->error = afs_op_error(op); + netfs_read_subreq_terminated(subreq); } static void afs_fetch_data_success(struct afs_operation *op) @@ -260,7 +215,7 @@ static void afs_fetch_data_success(struct afs_operation *op) _enter("op=%08x", op->debug_id); afs_vnode_commit_status(op, &op->file[0]); afs_stat_v(vnode, n_fetches); - atomic_long_add(op->fetch.req->actual_len, &op->net->n_fetch_bytes); + atomic_long_add(op->fetch.subreq->transferred, &op->net->n_fetch_bytes); afs_fetch_data_notify(op); } @@ -270,107 +225,188 @@ static void afs_fetch_data_aborted(struct afs_operation *op) afs_fetch_data_notify(op); } -static void afs_fetch_data_put(struct afs_operation *op) -{ - op->fetch.req->error = afs_op_error(op); - afs_put_read(op->fetch.req); -} - -static const struct afs_operation_ops afs_fetch_data_operation = { +const struct afs_operation_ops afs_fetch_data_operation = { .issue_afs_rpc = afs_fs_fetch_data, .issue_yfs_rpc = yfs_fs_fetch_data, .success = afs_fetch_data_success, .aborted = afs_fetch_data_aborted, .failed = afs_fetch_data_notify, - .put = afs_fetch_data_put, }; +static void afs_issue_read_call(struct afs_operation *op) +{ + op->call_responded = false; + op->call_error = 0; + op->call_abort_code = 0; + if (test_bit(AFS_SERVER_FL_IS_YFS, &op->server->flags)) + yfs_fs_fetch_data(op); + else + afs_fs_fetch_data(op); +} + +static void afs_end_read(struct afs_operation *op) +{ + if (op->call_responded && op->server) + set_bit(AFS_SERVER_FL_RESPONDING, &op->server->flags); + + if (!afs_op_error(op)) + afs_fetch_data_success(op); + else if (op->cumul_error.aborted) + afs_fetch_data_aborted(op); + else + afs_fetch_data_notify(op); + + afs_end_vnode_operation(op); + afs_put_operation(op); +} + +/* + * Perform I/O processing on an asynchronous call. The work item carries a ref + * to the call struct that we either need to release or to pass on. + */ +static void afs_read_receive(struct afs_call *call) +{ + struct afs_operation *op = call->op; + enum afs_call_state state; + + _enter(""); + + state = READ_ONCE(call->state); + if (state == AFS_CALL_COMPLETE) + return; + trace_afs_read_recv(op, call); + + while (state < AFS_CALL_COMPLETE && READ_ONCE(call->need_attention)) { + WRITE_ONCE(call->need_attention, false); + afs_deliver_to_call(call); + state = READ_ONCE(call->state); + } + + if (state < AFS_CALL_COMPLETE) { + netfs_read_subreq_progress(op->fetch.subreq); + if (rxrpc_kernel_check_life(call->net->socket, call->rxcall)) + return; + /* rxrpc terminated the call. */ + afs_set_call_complete(call, call->error, call->abort_code); + } + + op->call_abort_code = call->abort_code; + op->call_error = call->error; + op->call_responded = call->responded; + op->call = NULL; + call->op = NULL; + afs_put_call(call); + + /* If the call failed, then we need to crank the server rotation + * handle and try the next. + */ + if (afs_select_fileserver(op)) { + afs_issue_read_call(op); + return; + } + + afs_end_read(op); +} + +void afs_fetch_data_async_rx(struct work_struct *work) +{ + struct afs_call *call = container_of(work, struct afs_call, async_work); + + afs_read_receive(call); + afs_put_call(call); +} + +void afs_fetch_data_immediate_cancel(struct afs_call *call) +{ + if (call->async) { + afs_get_call(call, afs_call_trace_wake); + if (!queue_work(afs_async_calls, &call->async_work)) + afs_deferred_put_call(call); + flush_work(&call->async_work); + } +} + /* * Fetch file data from the volume. */ -int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req) +static void afs_issue_read(struct netfs_io_subrequest *subreq) { struct afs_operation *op; + struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode); + struct key *key = subreq->rreq->netfs_priv; _enter("%s{%llx:%llu.%u},%x,,,", vnode->volume->name, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique, - key_serial(req->key)); + key_serial(key)); - op = afs_alloc_operation(req->key, vnode->volume); + op = afs_alloc_operation(key, vnode->volume); if (IS_ERR(op)) { - if (req->subreq) - netfs_read_subreq_terminated(req->subreq, PTR_ERR(op), false); - return PTR_ERR(op); + subreq->error = PTR_ERR(op); + netfs_read_subreq_terminated(subreq); + return; } afs_op_set_vnode(op, 0, vnode); - op->fetch.req = afs_get_read(req); + op->fetch.subreq = subreq; op->ops = &afs_fetch_data_operation; - return afs_do_sync_operation(op); -} - -static void afs_read_worker(struct work_struct *work) -{ - struct netfs_io_subrequest *subreq = container_of(work, struct netfs_io_subrequest, work); - struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode); - struct afs_read *fsreq; - - fsreq = afs_alloc_read(GFP_NOFS); - if (!fsreq) - return netfs_read_subreq_terminated(subreq, -ENOMEM, false); - - fsreq->subreq = subreq; - fsreq->pos = subreq->start + subreq->transferred; - fsreq->len = subreq->len - subreq->transferred; - fsreq->key = key_get(subreq->rreq->netfs_priv); - fsreq->vnode = vnode; - fsreq->iter = &subreq->io_iter; trace_netfs_sreq(subreq, netfs_sreq_trace_submit); - afs_fetch_data(fsreq->vnode, fsreq); - afs_put_read(fsreq); -} - -static void afs_issue_read(struct netfs_io_subrequest *subreq) -{ - INIT_WORK(&subreq->work, afs_read_worker); - queue_work(system_long_wq, &subreq->work); -} -static int afs_symlink_read_folio(struct file *file, struct folio *folio) -{ - struct afs_vnode *vnode = AFS_FS_I(folio->mapping->host); - struct afs_read *fsreq; - int ret; + if (subreq->rreq->origin == NETFS_READAHEAD || + subreq->rreq->iocb) { + op->flags |= AFS_OPERATION_ASYNC; - fsreq = afs_alloc_read(GFP_NOFS); - if (!fsreq) - return -ENOMEM; + if (!afs_begin_vnode_operation(op)) { + subreq->error = afs_put_operation(op); + netfs_read_subreq_terminated(subreq); + return; + } - fsreq->pos = folio_pos(folio); - fsreq->len = folio_size(folio); - fsreq->vnode = vnode; - fsreq->iter = &fsreq->def_iter; - iov_iter_xarray(&fsreq->def_iter, ITER_DEST, &folio->mapping->i_pages, - fsreq->pos, fsreq->len); + if (!afs_select_fileserver(op)) { + afs_end_read(op); + return; + } - ret = afs_fetch_data(fsreq->vnode, fsreq); - if (ret == 0) - folio_mark_uptodate(folio); - folio_unlock(folio); - return ret; + afs_issue_read_call(op); + } else { + afs_do_sync_operation(op); + } } static int afs_init_request(struct netfs_io_request *rreq, struct file *file) { + struct afs_vnode *vnode = AFS_FS_I(rreq->inode); + if (file) rreq->netfs_priv = key_get(afs_file_key(file)); rreq->rsize = 256 * 1024; rreq->wsize = 256 * 1024 * 1024; + + switch (rreq->origin) { + case NETFS_READ_SINGLE: + if (!file) { + struct key *key = afs_request_key(vnode->volume->cell); + + if (IS_ERR(key)) + return PTR_ERR(key); + rreq->netfs_priv = key; + } + break; + case NETFS_WRITEBACK: + case NETFS_WRITETHROUGH: + case NETFS_UNBUFFERED_WRITE: + case NETFS_DIO_WRITE: + if (S_ISREG(rreq->inode->i_mode)) + rreq->io_streams[0].avail = true; + break; + case NETFS_WRITEBACK_SINGLE: + default: + break; + } return 0; } diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c index 428721bbe4f6..8418813ee043 100644 --- a/fs/afs/fs_operation.c +++ b/fs/afs/fs_operation.c @@ -49,6 +49,105 @@ struct afs_operation *afs_alloc_operation(struct key *key, struct afs_volume *vo return op; } +struct afs_io_locker { + struct list_head link; + struct task_struct *task; + unsigned long have_lock; +}; + +/* + * Unlock the I/O lock on a vnode. + */ +static void afs_unlock_for_io(struct afs_vnode *vnode) +{ + struct afs_io_locker *locker; + + spin_lock(&vnode->lock); + locker = list_first_entry_or_null(&vnode->io_lock_waiters, + struct afs_io_locker, link); + if (locker) { + list_del(&locker->link); + smp_store_release(&locker->have_lock, 1); /* The unlock barrier. */ + smp_mb__after_atomic(); /* Store have_lock before task state */ + wake_up_process(locker->task); + } else { + clear_bit(AFS_VNODE_IO_LOCK, &vnode->flags); + } + spin_unlock(&vnode->lock); +} + +/* + * Lock the I/O lock on a vnode uninterruptibly. We can't use an ordinary + * mutex as lockdep will complain if we unlock it in the wrong thread. + */ +static void afs_lock_for_io(struct afs_vnode *vnode) +{ + struct afs_io_locker myself = { .task = current, }; + + spin_lock(&vnode->lock); + + if (!test_and_set_bit(AFS_VNODE_IO_LOCK, &vnode->flags)) { + spin_unlock(&vnode->lock); + return; + } + + list_add_tail(&myself.link, &vnode->io_lock_waiters); + spin_unlock(&vnode->lock); + + for (;;) { + set_current_state(TASK_UNINTERRUPTIBLE); + if (smp_load_acquire(&myself.have_lock)) /* The lock barrier */ + break; + schedule(); + } + __set_current_state(TASK_RUNNING); +} + +/* + * Lock the I/O lock on a vnode interruptibly. We can't use an ordinary mutex + * as lockdep will complain if we unlock it in the wrong thread. + */ +static int afs_lock_for_io_interruptible(struct afs_vnode *vnode) +{ + struct afs_io_locker myself = { .task = current, }; + int ret = 0; + + spin_lock(&vnode->lock); + + if (!test_and_set_bit(AFS_VNODE_IO_LOCK, &vnode->flags)) { + spin_unlock(&vnode->lock); + return 0; + } + + list_add_tail(&myself.link, &vnode->io_lock_waiters); + spin_unlock(&vnode->lock); + + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + if (smp_load_acquire(&myself.have_lock) || /* The lock barrier */ + signal_pending(current)) + break; + schedule(); + } + __set_current_state(TASK_RUNNING); + + /* If we got a signal, try to transfer the lock onto the next + * waiter. + */ + if (unlikely(signal_pending(current))) { + spin_lock(&vnode->lock); + if (myself.have_lock) { + spin_unlock(&vnode->lock); + afs_unlock_for_io(vnode); + } else { + list_del(&myself.link); + spin_unlock(&vnode->lock); + } + ret = -ERESTARTSYS; + } + return ret; +} + /* * Lock the vnode(s) being operated upon. */ @@ -60,7 +159,7 @@ static bool afs_get_io_locks(struct afs_operation *op) _enter(""); if (op->flags & AFS_OPERATION_UNINTR) { - mutex_lock(&vnode->io_lock); + afs_lock_for_io(vnode); op->flags |= AFS_OPERATION_LOCK_0; _leave(" = t [1]"); return true; @@ -72,7 +171,7 @@ static bool afs_get_io_locks(struct afs_operation *op) if (vnode2 > vnode) swap(vnode, vnode2); - if (mutex_lock_interruptible(&vnode->io_lock) < 0) { + if (afs_lock_for_io_interruptible(vnode) < 0) { afs_op_set_error(op, -ERESTARTSYS); op->flags |= AFS_OPERATION_STOP; _leave(" = f [I 0]"); @@ -81,10 +180,10 @@ static bool afs_get_io_locks(struct afs_operation *op) op->flags |= AFS_OPERATION_LOCK_0; if (vnode2) { - if (mutex_lock_interruptible_nested(&vnode2->io_lock, 1) < 0) { + if (afs_lock_for_io_interruptible(vnode2) < 0) { afs_op_set_error(op, -ERESTARTSYS); op->flags |= AFS_OPERATION_STOP; - mutex_unlock(&vnode->io_lock); + afs_unlock_for_io(vnode); op->flags &= ~AFS_OPERATION_LOCK_0; _leave(" = f [I 1]"); return false; @@ -104,9 +203,9 @@ static void afs_drop_io_locks(struct afs_operation *op) _enter(""); if (op->flags & AFS_OPERATION_LOCK_1) - mutex_unlock(&vnode2->io_lock); + afs_unlock_for_io(vnode2); if (op->flags & AFS_OPERATION_LOCK_0) - mutex_unlock(&vnode->io_lock); + afs_unlock_for_io(vnode); } static void afs_prepare_vnode(struct afs_operation *op, struct afs_vnode_param *vp, @@ -157,7 +256,7 @@ bool afs_begin_vnode_operation(struct afs_operation *op) /* * Tidy up a filesystem cursor and unlock the vnode. */ -static void afs_end_vnode_operation(struct afs_operation *op) +void afs_end_vnode_operation(struct afs_operation *op) { _enter(""); diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index 098fa034a1cc..1d9ecd5418d8 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -301,19 +301,19 @@ void afs_fs_fetch_status(struct afs_operation *op) static int afs_deliver_fs_fetch_data(struct afs_call *call) { struct afs_operation *op = call->op; + struct netfs_io_subrequest *subreq = op->fetch.subreq; struct afs_vnode_param *vp = &op->file[0]; - struct afs_read *req = op->fetch.req; const __be32 *bp; size_t count_before; int ret; _enter("{%u,%zu,%zu/%llu}", call->unmarshall, call->iov_len, iov_iter_count(call->iter), - req->actual_len); + call->remaining); switch (call->unmarshall) { case 0: - req->actual_len = 0; + call->remaining = 0; call->unmarshall++; if (call->operation_ID == FSFETCHDATA64) { afs_extract_to_tmp64(call); @@ -323,8 +323,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) } fallthrough; - /* Extract the returned data length into - * ->actual_len. This may indicate more or less data than was + /* Extract the returned data length into ->remaining. + * This may indicate more or less data than was * requested will be returned. */ case 1: @@ -333,42 +333,40 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) if (ret < 0) return ret; - req->actual_len = be64_to_cpu(call->tmp64); - _debug("DATA length: %llu", req->actual_len); + call->remaining = be64_to_cpu(call->tmp64); + _debug("DATA length: %llu", call->remaining); - if (req->actual_len == 0) + if (call->remaining == 0) goto no_more_data; - call->iter = req->iter; - call->iov_len = min(req->actual_len, req->len); + call->iter = &subreq->io_iter; + call->iov_len = umin(call->remaining, subreq->len - subreq->transferred); call->unmarshall++; fallthrough; /* extract the returned data */ case 2: count_before = call->iov_len; - _debug("extract data %zu/%llu", count_before, req->actual_len); + _debug("extract data %zu/%llu", count_before, call->remaining); ret = afs_extract_data(call, true); - if (req->subreq) { - req->subreq->transferred += count_before - call->iov_len; - netfs_read_subreq_progress(req->subreq, false); - } + subreq->transferred += count_before - call->iov_len; + call->remaining -= count_before - call->iov_len; if (ret < 0) return ret; call->iter = &call->def_iter; - if (req->actual_len <= req->len) + if (call->remaining) goto no_more_data; /* Discard any excess data the server gave us */ - afs_extract_discard(call, req->actual_len - req->len); + afs_extract_discard(call, call->remaining); call->unmarshall = 3; fallthrough; case 3: _debug("extract discard %zu/%llu", - iov_iter_count(call->iter), req->actual_len - req->len); + iov_iter_count(call->iter), call->remaining); ret = afs_extract_data(call, true); if (ret < 0) @@ -390,8 +388,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) xdr_decode_AFSCallBack(&bp, call, &vp->scb); xdr_decode_AFSVolSync(&bp, &op->volsync); - req->data_version = vp->scb.status.data_version; - req->file_size = vp->scb.status.size; + if (subreq->start + subreq->transferred >= vp->scb.status.size) + __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags); call->unmarshall++; fallthrough; @@ -410,14 +408,18 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) static const struct afs_call_type afs_RXFSFetchData = { .name = "FS.FetchData", .op = afs_FS_FetchData, + .async_rx = afs_fetch_data_async_rx, .deliver = afs_deliver_fs_fetch_data, + .immediate_cancel = afs_fetch_data_immediate_cancel, .destructor = afs_flat_call_destructor, }; static const struct afs_call_type afs_RXFSFetchData64 = { .name = "FS.FetchData64", .op = afs_FS_FetchData64, + .async_rx = afs_fetch_data_async_rx, .deliver = afs_deliver_fs_fetch_data, + .immediate_cancel = afs_fetch_data_immediate_cancel, .destructor = afs_flat_call_destructor, }; @@ -426,8 +428,8 @@ static const struct afs_call_type afs_RXFSFetchData64 = { */ static void afs_fs_fetch_data64(struct afs_operation *op) { + struct netfs_io_subrequest *subreq = op->fetch.subreq; struct afs_vnode_param *vp = &op->file[0]; - struct afs_read *req = op->fetch.req; struct afs_call *call; __be32 *bp; @@ -437,16 +439,19 @@ static void afs_fs_fetch_data64(struct afs_operation *op) if (!call) return afs_op_nomem(op); + if (op->flags & AFS_OPERATION_ASYNC) + call->async = true; + /* marshall the parameters */ bp = call->request; bp[0] = htonl(FSFETCHDATA64); bp[1] = htonl(vp->fid.vid); bp[2] = htonl(vp->fid.vnode); bp[3] = htonl(vp->fid.unique); - bp[4] = htonl(upper_32_bits(req->pos)); - bp[5] = htonl(lower_32_bits(req->pos)); + bp[4] = htonl(upper_32_bits(subreq->start + subreq->transferred)); + bp[5] = htonl(lower_32_bits(subreq->start + subreq->transferred)); bp[6] = 0; - bp[7] = htonl(lower_32_bits(req->len)); + bp[7] = htonl(lower_32_bits(subreq->len - subreq->transferred)); call->fid = vp->fid; trace_afs_make_fs_call(call, &vp->fid); @@ -458,9 +463,9 @@ static void afs_fs_fetch_data64(struct afs_operation *op) */ void afs_fs_fetch_data(struct afs_operation *op) { + struct netfs_io_subrequest *subreq = op->fetch.subreq; struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - struct afs_read *req = op->fetch.req; __be32 *bp; if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags)) @@ -472,16 +477,14 @@ void afs_fs_fetch_data(struct afs_operation *op) if (!call) return afs_op_nomem(op); - req->call_debug_id = call->debug_id; - /* marshall the parameters */ bp = call->request; bp[0] = htonl(FSFETCHDATA); bp[1] = htonl(vp->fid.vid); bp[2] = htonl(vp->fid.vnode); bp[3] = htonl(vp->fid.unique); - bp[4] = htonl(lower_32_bits(req->pos)); - bp[5] = htonl(lower_32_bits(req->len)); + bp[4] = htonl(lower_32_bits(subreq->start + subreq->transferred)); + bp[5] = htonl(lower_32_bits(subreq->len + subreq->transferred)); call->fid = vp->fid; trace_afs_make_fs_call(call, &vp->fid); @@ -1733,6 +1736,7 @@ static const struct afs_call_type afs_RXFSGetCapabilities = { .op = afs_FS_GetCapabilities, .deliver = afs_deliver_fs_get_capabilities, .done = afs_fileserver_probe_result, + .immediate_cancel = afs_fileserver_probe_result, .destructor = afs_fs_get_capabilities_destructor, }; diff --git a/fs/afs/inode.c b/fs/afs/inode.c index a95e77670b49..e9538e91f848 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -25,8 +25,94 @@ #include "internal.h" #include "afs_fs.h" +void afs_init_new_symlink(struct afs_vnode *vnode, struct afs_operation *op) +{ + size_t size = strlen(op->create.symlink) + 1; + size_t dsize = 0; + char *p; + + if (netfs_alloc_folioq_buffer(NULL, &vnode->directory, &dsize, size, + mapping_gfp_mask(vnode->netfs.inode.i_mapping)) < 0) + return; + + vnode->directory_size = dsize; + p = kmap_local_folio(folioq_folio(vnode->directory, 0), 0); + memcpy(p, op->create.symlink, size); + kunmap_local(p); + set_bit(AFS_VNODE_DIR_READ, &vnode->flags); + netfs_single_mark_inode_dirty(&vnode->netfs.inode); +} + +static void afs_put_link(void *arg) +{ + struct folio *folio = virt_to_folio(arg); + + kunmap_local(arg); + folio_put(folio); +} + +const char *afs_get_link(struct dentry *dentry, struct inode *inode, + struct delayed_call *callback) +{ + struct afs_vnode *vnode = AFS_FS_I(inode); + struct folio *folio; + char *content; + ssize_t ret; + + if (!dentry) { + /* RCU pathwalk. */ + if (!test_bit(AFS_VNODE_DIR_READ, &vnode->flags) || !afs_check_validity(vnode)) + return ERR_PTR(-ECHILD); + goto good; + } + + if (test_bit(AFS_VNODE_DIR_READ, &vnode->flags)) + goto fetch; + + ret = afs_validate(vnode, NULL); + if (ret < 0) + return ERR_PTR(ret); + + if (!test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) && + test_bit(AFS_VNODE_DIR_READ, &vnode->flags)) + goto good; + +fetch: + ret = afs_read_single(vnode, NULL); + if (ret < 0) + return ERR_PTR(ret); + set_bit(AFS_VNODE_DIR_READ, &vnode->flags); + +good: + folio = folioq_folio(vnode->directory, 0); + folio_get(folio); + content = kmap_local_folio(folio, 0); + set_delayed_call(callback, afs_put_link, content); + return content; +} + +int afs_readlink(struct dentry *dentry, char __user *buffer, int buflen) +{ + DEFINE_DELAYED_CALL(done); + const char *content; + int len; + + content = afs_get_link(dentry, d_inode(dentry), &done); + if (IS_ERR(content)) { + do_delayed_call(&done); + return PTR_ERR(content); + } + + len = umin(strlen(content), buflen); + if (copy_to_user(buffer, content, len)) + len = -EFAULT; + do_delayed_call(&done); + return len; +} + static const struct inode_operations afs_symlink_inode_operations = { - .get_link = page_get_link, + .get_link = afs_get_link, + .readlink = afs_readlink, }; static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *parent_vnode) @@ -110,7 +196,9 @@ static int afs_inode_init_from_status(struct afs_operation *op, inode->i_op = &afs_dir_inode_operations; inode->i_fop = &afs_dir_file_operations; inode->i_mapping->a_ops = &afs_dir_aops; - mapping_set_large_folios(inode->i_mapping); + __set_bit(NETFS_ICTX_SINGLE_NO_UPLOAD, &vnode->netfs.flags); + /* Assume locally cached directory data will be valid. */ + __set_bit(AFS_VNODE_DIR_VALID, &vnode->flags); break; case AFS_FTYPE_SYMLINK: /* Symlinks with a mode of 0644 are actually mountpoints. */ @@ -122,13 +210,13 @@ static int afs_inode_init_from_status(struct afs_operation *op, inode->i_mode = S_IFDIR | 0555; inode->i_op = &afs_mntpt_inode_operations; inode->i_fop = &afs_mntpt_file_operations; - inode->i_mapping->a_ops = &afs_symlink_aops; } else { inode->i_mode = S_IFLNK | status->mode; inode->i_op = &afs_symlink_inode_operations; - inode->i_mapping->a_ops = &afs_symlink_aops; } + inode->i_mapping->a_ops = &afs_dir_aops; inode_nohighmem(inode); + mapping_set_release_always(inode->i_mapping); break; default: dump_vnode(vnode, op->file[0].vnode != vnode ? op->file[0].vnode : NULL); @@ -140,15 +228,17 @@ static int afs_inode_init_from_status(struct afs_operation *op, afs_set_netfs_context(vnode); vnode->invalid_before = status->data_version; + trace_afs_set_dv(vnode, status->data_version); inode_set_iversion_raw(&vnode->netfs.inode, status->data_version); if (!vp->scb.have_cb) { /* it's a symlink we just created (the fileserver * didn't give us a callback) */ - atomic64_set(&vnode->cb_expires_at, AFS_NO_CB_PROMISE); + afs_clear_cb_promise(vnode, afs_cb_promise_set_new_symlink); } else { vnode->cb_server = op->server; - atomic64_set(&vnode->cb_expires_at, vp->scb.callback.expires_at); + afs_set_cb_promise(vnode, vp->scb.callback.expires_at, + afs_cb_promise_set_new_inode); } write_sequnlock(&vnode->cb_lock); @@ -207,12 +297,17 @@ static void afs_apply_status(struct afs_operation *op, if (vp->update_ctime) inode_set_ctime_to_ts(inode, op->ctime); - if (vnode->status.data_version != status->data_version) + if (vnode->status.data_version != status->data_version) { + trace_afs_set_dv(vnode, status->data_version); data_changed = true; + } vnode->status = *status; if (vp->dv_before + vp->dv_delta != status->data_version) { + trace_afs_dv_mismatch(vnode, vp->dv_before, vp->dv_delta, + status->data_version); + if (vnode->cb_ro_snapshot == atomic_read(&vnode->volume->cb_ro_snapshot) && atomic64_read(&vnode->cb_expires_at) != AFS_NO_CB_PROMISE) pr_warn("kAFS: vnode modified {%llx:%llu} %llx->%llx %s (op=%x)\n", @@ -223,12 +318,10 @@ static void afs_apply_status(struct afs_operation *op, op->debug_id); vnode->invalid_before = status->data_version; - if (vnode->status.type == AFS_FTYPE_DIR) { - if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags)) - afs_stat_v(vnode, n_inval); - } else { + if (vnode->status.type == AFS_FTYPE_DIR) + afs_invalidate_dir(vnode, afs_dir_invalid_dv_mismatch); + else set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags); - } change_size = true; data_changed = true; unexpected_jump = true; @@ -258,6 +351,8 @@ static void afs_apply_status(struct afs_operation *op, inode_set_ctime_to_ts(inode, t); inode_set_atime_to_ts(inode, t); } + if (op->ops == &afs_fetch_data_operation) + op->fetch.subreq->rreq->i_size = status->size; } } @@ -273,7 +368,7 @@ static void afs_apply_callback(struct afs_operation *op, if (!afs_cb_is_broken(vp->cb_break_before, vnode)) { if (op->volume->type == AFSVL_RWVOL) vnode->cb_server = op->server; - atomic64_set(&vnode->cb_expires_at, cb->expires_at); + afs_set_cb_promise(vnode, cb->expires_at, afs_cb_promise_set_apply_cb); } } @@ -435,7 +530,9 @@ static void afs_get_inode_cache(struct afs_vnode *vnode) } __packed key; struct afs_vnode_cache_aux aux; - if (vnode->status.type != AFS_FTYPE_FILE) { + if (vnode->status.type != AFS_FTYPE_FILE && + vnode->status.type != AFS_FTYPE_DIR && + vnode->status.type != AFS_FTYPE_SYMLINK) { vnode->netfs.cache = NULL; return; } @@ -637,6 +734,7 @@ int afs_drop_inode(struct inode *inode) void afs_evict_inode(struct inode *inode) { struct afs_vnode_cache_aux aux; + struct afs_super_info *sbi = AFS_FS_S(inode->i_sb); struct afs_vnode *vnode = AFS_FS_I(inode); _enter("{%llx:%llu.%d}", @@ -648,8 +746,22 @@ void afs_evict_inode(struct inode *inode) ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode); + if ((S_ISDIR(inode->i_mode) || + S_ISLNK(inode->i_mode)) && + (inode->i_state & I_DIRTY) && + !sbi->dyn_root) { + struct writeback_control wbc = { + .sync_mode = WB_SYNC_ALL, + .for_sync = true, + .range_end = LLONG_MAX, + }; + + afs_single_writepages(inode->i_mapping, &wbc); + } + netfs_wait_for_outstanding_io(inode); truncate_inode_pages_final(&inode->i_data); + netfs_free_folioq_buffer(vnode->directory); afs_set_cache_aux(vnode, &aux); netfs_clear_inode_writeback(inode, &aux); diff --git a/fs/afs/internal.h b/fs/afs/internal.h index c9d620175e80..90f407774a9a 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -163,6 +163,7 @@ struct afs_call { spinlock_t state_lock; int error; /* error code */ u32 abort_code; /* Remote abort ID or 0 */ + unsigned long long remaining; /* How much is left to receive */ unsigned int max_lifespan; /* Maximum lifespan in secs to set if not 0 */ unsigned request_size; /* size of request data */ unsigned reply_max; /* maximum size of reply */ @@ -201,11 +202,17 @@ struct afs_call_type { /* clean up a call */ void (*destructor)(struct afs_call *call); + /* Async receive processing function */ + void (*async_rx)(struct work_struct *work); + /* Work function */ void (*work)(struct work_struct *work); /* Call done function (gets called immediately on success or failure) */ void (*done)(struct afs_call *call); + + /* Handle a call being immediately cancelled. */ + void (*immediate_cancel)(struct afs_call *call); }; /* @@ -233,28 +240,6 @@ static inline struct key *afs_file_key(struct file *file) } /* - * Record of an outstanding read operation on a vnode. - */ -struct afs_read { - loff_t pos; /* Where to start reading */ - loff_t len; /* How much we're asking for */ - loff_t actual_len; /* How much we're actually getting */ - loff_t file_size; /* File size returned by server */ - struct key *key; /* The key to use to reissue the read */ - struct afs_vnode *vnode; /* The file being read into. */ - struct netfs_io_subrequest *subreq; /* Fscache helper read request this belongs to */ - afs_dataversion_t data_version; /* Version number returned by server */ - refcount_t usage; - unsigned int call_debug_id; - unsigned int nr_pages; - int error; - void (*done)(struct afs_read *); - void (*cleanup)(struct afs_read *); - struct iov_iter *iter; /* Iterator representing the buffer */ - struct iov_iter def_iter; /* Default iterator */ -}; - -/* * AFS superblock private data * - there's one superblock per volume */ @@ -702,13 +687,14 @@ struct afs_vnode { struct afs_file_status status; /* AFS status info for this file */ afs_dataversion_t invalid_before; /* Child dentries are invalid before this */ struct afs_permits __rcu *permit_cache; /* cache of permits so far obtained */ - struct mutex io_lock; /* Lock for serialising I/O on this mutex */ + struct list_head io_lock_waiters; /* Threads waiting for the I/O lock */ struct rw_semaphore validate_lock; /* lock for validating this vnode */ struct rw_semaphore rmdir_lock; /* Lock for rmdir vs sillyrename */ struct key *silly_key; /* Silly rename key */ spinlock_t wb_lock; /* lock for wb_keys */ spinlock_t lock; /* waitqueue/flags lock */ unsigned long flags; +#define AFS_VNODE_IO_LOCK 0 /* Set if the I/O serialisation lock is held */ #define AFS_VNODE_UNSET 1 /* set if vnode attributes not yet set */ #define AFS_VNODE_DIR_VALID 2 /* Set if dir contents are valid */ #define AFS_VNODE_ZAP_DATA 3 /* set if vnode's data should be invalidated */ @@ -719,7 +705,9 @@ struct afs_vnode { #define AFS_VNODE_NEW_CONTENT 8 /* Set if file has new content (create/trunc-0) */ #define AFS_VNODE_SILLY_DELETED 9 /* Set if file has been silly-deleted */ #define AFS_VNODE_MODIFYING 10 /* Set if we're performing a modification op */ +#define AFS_VNODE_DIR_READ 11 /* Set if we've read a dir's contents */ + struct folio_queue *directory; /* Directory contents */ struct list_head wb_keys; /* List of keys available for writeback */ struct list_head pending_locks; /* locks waiting to be granted */ struct list_head granted_locks; /* locks granted on this file */ @@ -728,6 +716,7 @@ struct afs_vnode { ktime_t locked_at; /* Time at which lock obtained */ enum afs_lock_state lock_state : 8; afs_lock_type_t lock_type : 8; + unsigned int directory_size; /* Amount of space in ->directory */ /* outstanding callback notification on this file */ struct work_struct cb_work; /* Work for mmap'd files */ @@ -907,7 +896,7 @@ struct afs_operation { bool new_negative; } rename; struct { - struct afs_read *req; + struct netfs_io_subrequest *subreq; } fetch; struct { afs_lock_type_t type; @@ -959,6 +948,7 @@ struct afs_operation { #define AFS_OPERATION_TRIED_ALL 0x0400 /* Set if we've tried all the fileservers */ #define AFS_OPERATION_RETRY_SERVER 0x0800 /* Set if we should retry the current server */ #define AFS_OPERATION_DIR_CONFLICT 0x1000 /* Set if we detected a 3rd-party dir change */ +#define AFS_OPERATION_ASYNC 0x2000 /* Set if should run asynchronously */ }; /* @@ -983,6 +973,21 @@ static inline void afs_invalidate_cache(struct afs_vnode *vnode, unsigned int fl i_size_read(&vnode->netfs.inode), flags); } +/* + * Directory iteration management. + */ +struct afs_dir_iter { + struct afs_vnode *dvnode; + union afs_xdr_dir_block *block; + struct folio_queue *fq; + unsigned int fpos; + int fq_slot; + unsigned int loop_check; + u8 nr_slots; + u8 bucket; + unsigned int prev_entry; +}; + #include <trace/events/afs.h> /*****************************************************************************/ @@ -1064,8 +1069,13 @@ extern const struct inode_operations afs_dir_inode_operations; extern const struct address_space_operations afs_dir_aops; extern const struct dentry_operations afs_fs_dentry_operations; +ssize_t afs_read_single(struct afs_vnode *dvnode, struct file *file); +ssize_t afs_read_dir(struct afs_vnode *dvnode, struct file *file) + __acquires(&dvnode->validate_lock); extern void afs_d_release(struct dentry *); extern void afs_check_for_remote_deletion(struct afs_operation *); +int afs_single_writepages(struct address_space *mapping, + struct writeback_control *wbc); /* * dir_edit.c @@ -1075,6 +1085,18 @@ extern void afs_edit_dir_add(struct afs_vnode *, struct qstr *, struct afs_fid * extern void afs_edit_dir_remove(struct afs_vnode *, struct qstr *, enum afs_edit_dir_reason); void afs_edit_dir_update_dotdot(struct afs_vnode *vnode, struct afs_vnode *new_dvnode, enum afs_edit_dir_reason why); +void afs_mkdir_init_dir(struct afs_vnode *dvnode, struct afs_vnode *parent_vnode); + +/* + * dir_search.c + */ +unsigned int afs_dir_hash_name(const struct qstr *name); +bool afs_dir_init_iter(struct afs_dir_iter *iter, const struct qstr *name); +union afs_xdr_dir_block *afs_dir_find_block(struct afs_dir_iter *iter, size_t block); +int afs_dir_search_bucket(struct afs_dir_iter *iter, const struct qstr *name, + struct afs_fid *_fid); +int afs_dir_search(struct afs_vnode *dvnode, struct qstr *name, + struct afs_fid *_fid, afs_dataversion_t *_dir_version); /* * dir_silly.c @@ -1099,24 +1121,17 @@ extern void afs_dynroot_depopulate(struct super_block *); * file.c */ extern const struct address_space_operations afs_file_aops; -extern const struct address_space_operations afs_symlink_aops; extern const struct inode_operations afs_file_inode_operations; extern const struct file_operations afs_file_operations; +extern const struct afs_operation_ops afs_fetch_data_operation; extern const struct netfs_request_ops afs_req_ops; extern int afs_cache_wb_key(struct afs_vnode *, struct afs_file *); extern void afs_put_wb_key(struct afs_wb_key *); extern int afs_open(struct inode *, struct file *); extern int afs_release(struct inode *, struct file *); -extern int afs_fetch_data(struct afs_vnode *, struct afs_read *); -extern struct afs_read *afs_alloc_read(gfp_t); -extern void afs_put_read(struct afs_read *); - -static inline struct afs_read *afs_get_read(struct afs_read *req) -{ - refcount_inc(&req->usage); - return req; -} +void afs_fetch_data_async_rx(struct work_struct *work); +void afs_fetch_data_immediate_cancel(struct afs_call *call); /* * flock.c @@ -1168,6 +1183,7 @@ extern void afs_fs_store_acl(struct afs_operation *); extern struct afs_operation *afs_alloc_operation(struct key *, struct afs_volume *); extern int afs_put_operation(struct afs_operation *); extern bool afs_begin_vnode_operation(struct afs_operation *); +extern void afs_end_vnode_operation(struct afs_operation *op); extern void afs_wait_for_operation(struct afs_operation *); extern int afs_do_sync_operation(struct afs_operation *); @@ -1205,6 +1221,10 @@ extern void afs_fs_probe_cleanup(struct afs_net *); */ extern const struct afs_operation_ops afs_fetch_status_operation; +void afs_init_new_symlink(struct afs_vnode *vnode, struct afs_operation *op); +const char *afs_get_link(struct dentry *dentry, struct inode *inode, + struct delayed_call *callback); +int afs_readlink(struct dentry *dentry, char __user *buffer, int buflen); extern void afs_vnode_commit_status(struct afs_operation *, struct afs_vnode_param *); extern int afs_fetch_status(struct afs_vnode *, struct key *, bool, afs_access_t *); extern int afs_ilookup5_test_by_fid(struct inode *, void *); @@ -1336,6 +1356,7 @@ extern void afs_charge_preallocation(struct work_struct *); extern void afs_put_call(struct afs_call *); void afs_deferred_put_call(struct afs_call *call); void afs_make_call(struct afs_call *call, gfp_t gfp); +void afs_deliver_to_call(struct afs_call *call); void afs_wait_for_call_to_complete(struct afs_call *call); extern struct afs_call *afs_alloc_flat_call(struct afs_net *, const struct afs_call_type *, @@ -1346,6 +1367,28 @@ extern void afs_send_simple_reply(struct afs_call *, const void *, size_t); extern int afs_extract_data(struct afs_call *, bool); extern int afs_protocol_error(struct afs_call *, enum afs_eproto_cause); +static inline struct afs_call *afs_get_call(struct afs_call *call, + enum afs_call_trace why) +{ + int r; + + __refcount_inc(&call->ref, &r); + + trace_afs_call(call->debug_id, why, r + 1, + atomic_read(&call->net->nr_outstanding_calls), + __builtin_return_address(0)); + return call; +} + +static inline void afs_see_call(struct afs_call *call, enum afs_call_trace why) +{ + int r = refcount_read(&call->ref); + + trace_afs_call(call->debug_id, why, r, + atomic_read(&call->net->nr_outstanding_calls), + __builtin_return_address(0)); +} + static inline void afs_make_op_call(struct afs_operation *op, struct afs_call *call, gfp_t gfp) { @@ -1712,6 +1755,38 @@ static inline int afs_bad(struct afs_vnode *vnode, enum afs_file_error where) return -EIO; } +/* + * Set the callback promise on a vnode. + */ +static inline void afs_set_cb_promise(struct afs_vnode *vnode, time64_t expires_at, + enum afs_cb_promise_trace trace) +{ + atomic64_set(&vnode->cb_expires_at, expires_at); + trace_afs_cb_promise(vnode, trace); +} + +/* + * Clear the callback promise on a vnode, returning true if it was promised. + */ +static inline bool afs_clear_cb_promise(struct afs_vnode *vnode, + enum afs_cb_promise_trace trace) +{ + trace_afs_cb_promise(vnode, trace); + return atomic64_xchg(&vnode->cb_expires_at, AFS_NO_CB_PROMISE) != AFS_NO_CB_PROMISE; +} + +/* + * Mark a directory as being invalid. + */ +static inline void afs_invalidate_dir(struct afs_vnode *dvnode, + enum afs_dir_invalid_trace trace) +{ + if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) { + trace_afs_dir_invalid(dvnode, trace); + afs_stat_v(dvnode, n_inval); + } +} + /*****************************************************************************/ /* * debug tracing diff --git a/fs/afs/main.c b/fs/afs/main.c index a14f6013e316..1ae0067f772d 100644 --- a/fs/afs/main.c +++ b/fs/afs/main.c @@ -177,7 +177,7 @@ static int __init afs_init(void) afs_wq = alloc_workqueue("afs", 0, 0); if (!afs_wq) goto error_afs_wq; - afs_async_calls = alloc_workqueue("kafsd", WQ_MEM_RECLAIM, 0); + afs_async_calls = alloc_workqueue("kafsd", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); if (!afs_async_calls) goto error_async; afs_lock_manager = alloc_workqueue("kafs_lockd", WQ_MEM_RECLAIM, 0); diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index 297487ee8323..507c25a5b2cb 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c @@ -30,7 +30,7 @@ const struct file_operations afs_mntpt_file_operations = { const struct inode_operations afs_mntpt_inode_operations = { .lookup = afs_mntpt_lookup, - .readlink = page_readlink, + .readlink = afs_readlink, .getattr = afs_getattr, }; @@ -118,9 +118,9 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt) ctx->volnamesz = sizeof(afs_root_volume) - 1; } else { /* read the contents of the AFS special symlink */ - struct page *page; + DEFINE_DELAYED_CALL(cleanup); + const char *content; loff_t size = i_size_read(d_inode(mntpt)); - char *buf; if (src_as->cell) ctx->cell = afs_use_cell(src_as->cell, afs_cell_trace_use_mntpt); @@ -128,16 +128,16 @@ static int afs_mntpt_set_params(struct fs_context *fc, struct dentry *mntpt) if (size < 2 || size > PAGE_SIZE - 1) return -EINVAL; - page = read_mapping_page(d_inode(mntpt)->i_mapping, 0, NULL); - if (IS_ERR(page)) - return PTR_ERR(page); + content = afs_get_link(mntpt, d_inode(mntpt), &cleanup); + if (IS_ERR(content)) { + do_delayed_call(&cleanup); + return PTR_ERR(content); + } - buf = kmap(page); ret = -EINVAL; - if (buf[size - 1] == '.') - ret = vfs_parse_fs_string(fc, "source", buf, size - 1); - kunmap(page); - put_page(page); + if (content[size - 1] == '.') + ret = vfs_parse_fs_string(fc, "source", content, size - 1); + do_delayed_call(&cleanup); if (ret < 0) return ret; diff --git a/fs/afs/proc.c b/fs/afs/proc.c index 15eab053af6d..e7614f4f30c2 100644 --- a/fs/afs/proc.c +++ b/fs/afs/proc.c @@ -240,7 +240,13 @@ static int afs_proc_rootcell_write(struct file *file, char *buf, size_t size) /* determine command to perform */ _debug("rootcell=%s", buf); - ret = afs_cell_init(net, buf); + ret = -EEXIST; + inode_lock(file_inode(file)); + if (!net->ws_cell) + ret = afs_cell_init(net, buf); + else + printk("busy\n"); + inode_unlock(file_inode(file)); out: _leave(" = %d", ret); diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c index d612983d6f38..a1c24f589d9e 100644 --- a/fs/afs/rotate.c +++ b/fs/afs/rotate.c @@ -99,7 +99,7 @@ static bool afs_start_fs_iteration(struct afs_operation *op, write_seqlock(&vnode->cb_lock); ASSERTCMP(cb_server, ==, vnode->cb_server); vnode->cb_server = NULL; - if (atomic64_xchg(&vnode->cb_expires_at, AFS_NO_CB_PROMISE) != AFS_NO_CB_PROMISE) + if (afs_clear_cb_promise(vnode, afs_cb_promise_clear_rotate_server)) vnode->cb_break++; write_sequnlock(&vnode->cb_lock); } @@ -583,7 +583,7 @@ selected_server: if (vnode->cb_server != server) { vnode->cb_server = server; vnode->cb_v_check = atomic_read(&vnode->volume->cb_v_break); - atomic64_set(&vnode->cb_expires_at, AFS_NO_CB_PROMISE); + afs_clear_cb_promise(vnode, afs_cb_promise_clear_server_change); } retry_server: diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 9f2a3bb56ec6..886416ea1d96 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -149,7 +149,8 @@ static struct afs_call *afs_alloc_call(struct afs_net *net, call->net = net; call->debug_id = atomic_inc_return(&rxrpc_debug_id); refcount_set(&call->ref, 1); - INIT_WORK(&call->async_work, afs_process_async_call); + INIT_WORK(&call->async_work, type->async_rx ?: afs_process_async_call); + INIT_WORK(&call->work, call->type->work); INIT_WORK(&call->free_work, afs_deferred_free_worker); init_waitqueue_head(&call->waitq); spin_lock_init(&call->state_lock); @@ -235,27 +236,12 @@ void afs_deferred_put_call(struct afs_call *call) schedule_work(&call->free_work); } -static struct afs_call *afs_get_call(struct afs_call *call, - enum afs_call_trace why) -{ - int r; - - __refcount_inc(&call->ref, &r); - - trace_afs_call(call->debug_id, why, r + 1, - atomic_read(&call->net->nr_outstanding_calls), - __builtin_return_address(0)); - return call; -} - /* * Queue the call for actual work. */ static void afs_queue_call_work(struct afs_call *call) { if (call->type->work) { - INIT_WORK(&call->work, call->type->work); - afs_get_call(call, afs_call_trace_work); if (!queue_work(afs_wq, &call->work)) afs_put_call(call); @@ -430,11 +416,16 @@ void afs_make_call(struct afs_call *call, gfp_t gfp) return; error_do_abort: - if (ret != -ECONNABORTED) { + if (ret != -ECONNABORTED) rxrpc_kernel_abort_call(call->net->socket, rxcall, RX_USER_ABORT, ret, afs_abort_send_data_error); - } else { + if (call->async) { + afs_see_call(call, afs_call_trace_async_abort); + return; + } + + if (ret == -ECONNABORTED) { len = 0; iov_iter_kvec(&msg.msg_iter, ITER_DEST, NULL, 0, 0); rxrpc_kernel_recv_data(call->net->socket, rxcall, @@ -445,8 +436,10 @@ error_do_abort: call->error = ret; trace_afs_call_done(call); error_kill_call: - if (call->type->done) - call->type->done(call); + if (call->async) + afs_see_call(call, afs_call_trace_async_kill); + if (call->type->immediate_cancel) + call->type->immediate_cancel(call); /* We need to dispose of the extra ref we grabbed for an async call. * The call, however, might be queued on afs_async_calls and we need to @@ -501,7 +494,7 @@ static void afs_log_error(struct afs_call *call, s32 remote_abort) /* * deliver messages to a call */ -static void afs_deliver_to_call(struct afs_call *call) +void afs_deliver_to_call(struct afs_call *call) { enum afs_call_state state; size_t len; @@ -602,7 +595,6 @@ local_abort: abort_code = 0; call_complete: afs_set_call_complete(call, ret, remote_abort); - state = AFS_CALL_COMPLETE; goto done; } @@ -803,6 +795,7 @@ static int afs_deliver_cm_op_id(struct afs_call *call) return -ENOTSUPP; trace_afs_cb_call(call); + call->work.func = call->type->work; /* pass responsibility for the remainer of this message off to the * cache manager op */ diff --git a/fs/afs/super.c b/fs/afs/super.c index f3ba1c3e72f5..a9bee610674e 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -663,7 +663,7 @@ static void afs_i_init_once(void *_vnode) memset(vnode, 0, sizeof(*vnode)); inode_init_once(&vnode->netfs.inode); - mutex_init(&vnode->io_lock); + INIT_LIST_HEAD(&vnode->io_lock_waiters); init_rwsem(&vnode->validate_lock); spin_lock_init(&vnode->wb_lock); spin_lock_init(&vnode->lock); @@ -696,6 +696,8 @@ static struct inode *afs_alloc_inode(struct super_block *sb) vnode->volume = NULL; vnode->lock_key = NULL; vnode->permit_cache = NULL; + vnode->directory = NULL; + vnode->directory_size = 0; vnode->flags = 1 << AFS_VNODE_UNSET; vnode->lock_state = AFS_VNODE_LOCK_NONE; diff --git a/fs/afs/validation.c b/fs/afs/validation.c index bef8af12ebe2..0ba8336c9025 100644 --- a/fs/afs/validation.c +++ b/fs/afs/validation.c @@ -120,22 +120,31 @@ bool afs_check_validity(const struct afs_vnode *vnode) { const struct afs_volume *volume = vnode->volume; + enum afs_vnode_invalid_trace trace = afs_vnode_valid_trace; + time64_t cb_expires_at = atomic64_read(&vnode->cb_expires_at); time64_t deadline = ktime_get_real_seconds() + 10; if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) return true; - if (atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break) || - atomic64_read(&vnode->cb_expires_at) <= deadline || - volume->cb_expires_at <= deadline || - vnode->cb_ro_snapshot != atomic_read(&volume->cb_ro_snapshot) || - vnode->cb_scrub != atomic_read(&volume->cb_scrub) || - test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) { - _debug("inval"); - return false; - } - - return true; + if (atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break)) + trace = afs_vnode_invalid_trace_cb_v_break; + else if (cb_expires_at == AFS_NO_CB_PROMISE) + trace = afs_vnode_invalid_trace_no_cb_promise; + else if (cb_expires_at <= deadline) + trace = afs_vnode_invalid_trace_expired; + else if (volume->cb_expires_at <= deadline) + trace = afs_vnode_invalid_trace_vol_expired; + else if (vnode->cb_ro_snapshot != atomic_read(&volume->cb_ro_snapshot)) + trace = afs_vnode_invalid_trace_cb_ro_snapshot; + else if (vnode->cb_scrub != atomic_read(&volume->cb_scrub)) + trace = afs_vnode_invalid_trace_cb_scrub; + else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) + trace = afs_vnode_invalid_trace_zap_data; + else + return true; + trace_afs_vnode_invalid(vnode, trace); + return false; } /* diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c index 55dd0fc5aad7..3a23c0b08eb6 100644 --- a/fs/afs/vlclient.c +++ b/fs/afs/vlclient.c @@ -370,6 +370,7 @@ static const struct afs_call_type afs_RXVLGetCapabilities = { .name = "VL.GetCapabilities", .op = afs_VL_GetCapabilities, .deliver = afs_deliver_vl_get_capabilities, + .immediate_cancel = afs_vlserver_probe_result, .done = afs_vlserver_probe_result, .destructor = afs_destroy_vl_get_capabilities, }; diff --git a/fs/afs/write.c b/fs/afs/write.c index ccb6aa8027c5..18b0a9f1615e 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -182,8 +182,8 @@ void afs_issue_write(struct netfs_io_subrequest *subreq) */ void afs_begin_writeback(struct netfs_io_request *wreq) { - afs_get_writeback_key(wreq); - wreq->io_streams[0].avail = true; + if (S_ISREG(wreq->inode->i_mode)) + afs_get_writeback_key(wreq); } /* @@ -196,6 +196,18 @@ void afs_retry_request(struct netfs_io_request *wreq, struct netfs_io_stream *st list_first_entry(&stream->subrequests, struct netfs_io_subrequest, rreq_link); + switch (wreq->origin) { + case NETFS_READAHEAD: + case NETFS_READPAGE: + case NETFS_READ_GAPS: + case NETFS_READ_SINGLE: + case NETFS_READ_FOR_WRITE: + case NETFS_DIO_READ: + return; + default: + break; + } + switch (subreq->error) { case -EACCES: case -EPERM: diff --git a/fs/afs/xdr_fs.h b/fs/afs/xdr_fs.h index 8ca868164507..cc5f143d21a3 100644 --- a/fs/afs/xdr_fs.h +++ b/fs/afs/xdr_fs.h @@ -88,7 +88,7 @@ union afs_xdr_dir_block { struct { struct afs_xdr_dir_hdr hdr; - u8 alloc_ctrs[AFS_DIR_MAX_BLOCKS]; + u8 alloc_ctrs[AFS_DIR_BLOCKS_WITH_CTR]; __be16 hashtable[AFS_DIR_HASHTBL_SIZE]; } meta; diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 024227aba4cd..257af259c04a 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -352,19 +352,19 @@ static int yfs_deliver_status_and_volsync(struct afs_call *call) static int yfs_deliver_fs_fetch_data64(struct afs_call *call) { struct afs_operation *op = call->op; + struct netfs_io_subrequest *subreq = op->fetch.subreq; struct afs_vnode_param *vp = &op->file[0]; - struct afs_read *req = op->fetch.req; const __be32 *bp; size_t count_before; int ret; _enter("{%u,%zu, %zu/%llu}", call->unmarshall, call->iov_len, iov_iter_count(call->iter), - req->actual_len); + call->remaining); switch (call->unmarshall) { case 0: - req->actual_len = 0; + call->remaining = 0; afs_extract_to_tmp64(call); call->unmarshall++; fallthrough; @@ -379,42 +379,39 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) if (ret < 0) return ret; - req->actual_len = be64_to_cpu(call->tmp64); - _debug("DATA length: %llu", req->actual_len); + call->remaining = be64_to_cpu(call->tmp64); + _debug("DATA length: %llu", call->remaining); - if (req->actual_len == 0) + if (call->remaining == 0) goto no_more_data; - call->iter = req->iter; - call->iov_len = min(req->actual_len, req->len); + call->iter = &subreq->io_iter; + call->iov_len = min(call->remaining, subreq->len - subreq->transferred); call->unmarshall++; fallthrough; /* extract the returned data */ case 2: count_before = call->iov_len; - _debug("extract data %zu/%llu", count_before, req->actual_len); + _debug("extract data %zu/%llu", count_before, call->remaining); ret = afs_extract_data(call, true); - if (req->subreq) { - req->subreq->transferred += count_before - call->iov_len; - netfs_read_subreq_progress(req->subreq, false); - } + subreq->transferred += count_before - call->iov_len; if (ret < 0) return ret; call->iter = &call->def_iter; - if (req->actual_len <= req->len) + if (call->remaining) goto no_more_data; /* Discard any excess data the server gave us */ - afs_extract_discard(call, req->actual_len - req->len); + afs_extract_discard(call, call->remaining); call->unmarshall = 3; fallthrough; case 3: _debug("extract discard %zu/%llu", - iov_iter_count(call->iter), req->actual_len - req->len); + iov_iter_count(call->iter), call->remaining); ret = afs_extract_data(call, true); if (ret < 0) @@ -439,8 +436,8 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) xdr_decode_YFSCallBack(&bp, call, &vp->scb); xdr_decode_YFSVolSync(&bp, &op->volsync); - req->data_version = vp->scb.status.data_version; - req->file_size = vp->scb.status.size; + if (subreq->start + subreq->transferred >= vp->scb.status.size) + __set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags); call->unmarshall++; fallthrough; @@ -459,7 +456,9 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) static const struct afs_call_type yfs_RXYFSFetchData64 = { .name = "YFS.FetchData64", .op = yfs_FS_FetchData64, + .async_rx = afs_fetch_data_async_rx, .deliver = yfs_deliver_fs_fetch_data64, + .immediate_cancel = afs_fetch_data_immediate_cancel, .destructor = afs_flat_call_destructor, }; @@ -468,14 +467,15 @@ static const struct afs_call_type yfs_RXYFSFetchData64 = { */ void yfs_fs_fetch_data(struct afs_operation *op) { + struct netfs_io_subrequest *subreq = op->fetch.subreq; struct afs_vnode_param *vp = &op->file[0]; - struct afs_read *req = op->fetch.req; struct afs_call *call; __be32 *bp; - _enter(",%x,{%llx:%llu},%llx,%llx", + _enter(",%x,{%llx:%llu},%llx,%zx", key_serial(op->key), vp->fid.vid, vp->fid.vnode, - req->pos, req->len); + subreq->start + subreq->transferred, + subreq->len - subreq->transferred); call = afs_alloc_flat_call(op->net, &yfs_RXYFSFetchData64, sizeof(__be32) * 2 + @@ -487,15 +487,16 @@ void yfs_fs_fetch_data(struct afs_operation *op) if (!call) return afs_op_nomem(op); - req->call_debug_id = call->debug_id; + if (op->flags & AFS_OPERATION_ASYNC) + call->async = true; /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSFETCHDATA64); bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_YFSFid(bp, &vp->fid); - bp = xdr_encode_u64(bp, req->pos); - bp = xdr_encode_u64(bp, req->len); + bp = xdr_encode_u64(bp, subreq->start + subreq->transferred); + bp = xdr_encode_u64(bp, subreq->len - subreq->transferred); yfs_check_req(call, bp); call->fid = vp->fid; @@ -666,8 +667,9 @@ static int yfs_deliver_fs_remove_file2(struct afs_call *call) static void yfs_done_fs_remove_file2(struct afs_call *call) { if (call->error == -ECONNABORTED && - call->abort_code == RX_INVALID_OPERATION) { - set_bit(AFS_SERVER_FL_NO_RM2, &call->server->flags); + (call->abort_code == RX_INVALID_OPERATION || + call->abort_code == RXGEN_OPCODE)) { + set_bit(AFS_SERVER_FL_NO_RM2, &call->op->server->flags); call->op->flags |= AFS_OPERATION_DOWNGRADE; } } @@ -224,7 +224,7 @@ static unsigned long aio_nr; /* current system wide number of aio requests */ static unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ /*----end sysctl variables---*/ #ifdef CONFIG_SYSCTL -static struct ctl_table aio_sysctls[] = { +static const struct ctl_table aio_sysctls[] = { { .procname = "aio-nr", .data = &aio_nr, diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index 42bd1cb7c9cd..583ac81669c2 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c @@ -60,14 +60,14 @@ static struct inode *anon_inode_make_secure_inode( const struct inode *context_inode) { struct inode *inode; - const struct qstr qname = QSTR_INIT(name, strlen(name)); int error; inode = alloc_anon_inode(anon_inode_mnt->mnt_sb); if (IS_ERR(inode)) return inode; inode->i_flags &= ~S_PRIVATE; - error = security_inode_init_security_anon(inode, &qname, context_inode); + error = security_inode_init_security_anon(inode, &QSTR(name), + context_inode); if (error) { iput(inode); return ERR_PTR(error); diff --git a/fs/backing-file.c b/fs/backing-file.c index cbdad8b68474..763fbe9b72b2 100644 --- a/fs/backing-file.c +++ b/fs/backing-file.c @@ -176,7 +176,7 @@ ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter, !(file->f_mode & FMODE_CAN_ODIRECT)) return -EINVAL; - old_cred = override_creds_light(ctx->cred); + old_cred = override_creds(ctx->cred); if (is_sync_kiocb(iocb)) { rwf_t rwf = iocb_to_rw_flags(flags); @@ -197,7 +197,7 @@ ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter, backing_aio_cleanup(aio, ret); } out: - revert_creds_light(old_cred); + revert_creds(old_cred); if (ctx->accessed) ctx->accessed(iocb->ki_filp); @@ -233,7 +233,7 @@ ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter, */ flags &= ~IOCB_DIO_CALLER_COMP; - old_cred = override_creds_light(ctx->cred); + old_cred = override_creds(ctx->cred); if (is_sync_kiocb(iocb)) { rwf_t rwf = iocb_to_rw_flags(flags); @@ -264,7 +264,7 @@ ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter, backing_aio_cleanup(aio, ret); } out: - revert_creds_light(old_cred); + revert_creds(old_cred); return ret; } @@ -281,9 +281,9 @@ ssize_t backing_file_splice_read(struct file *in, struct kiocb *iocb, if (WARN_ON_ONCE(!(in->f_mode & FMODE_BACKING))) return -EIO; - old_cred = override_creds_light(ctx->cred); + old_cred = override_creds(ctx->cred); ret = vfs_splice_read(in, &iocb->ki_pos, pipe, len, flags); - revert_creds_light(old_cred); + revert_creds(old_cred); if (ctx->accessed) ctx->accessed(iocb->ki_filp); @@ -310,11 +310,11 @@ ssize_t backing_file_splice_write(struct pipe_inode_info *pipe, if (ret) return ret; - old_cred = override_creds_light(ctx->cred); + old_cred = override_creds(ctx->cred); file_start_write(out); ret = out->f_op->splice_write(pipe, out, &iocb->ki_pos, len, flags); file_end_write(out); - revert_creds_light(old_cred); + revert_creds(old_cred); if (ctx->end_write) ctx->end_write(iocb, ret); @@ -338,9 +338,9 @@ int backing_file_mmap(struct file *file, struct vm_area_struct *vma, vma_set_file(vma, file); - old_cred = override_creds_light(ctx->cred); + old_cred = override_creds(ctx->cred); ret = call_mmap(vma->vm_file, vma); - revert_creds_light(old_cred); + revert_creds(old_cred); if (ctx->accessed) ctx->accessed(user_file); diff --git a/fs/bcachefs/Kconfig b/fs/bcachefs/Kconfig index ab6c95b895b3..fc7efd0a7525 100644 --- a/fs/bcachefs/Kconfig +++ b/fs/bcachefs/Kconfig @@ -15,6 +15,7 @@ config BCACHEFS_FS select ZLIB_INFLATE select ZSTD_COMPRESS select ZSTD_DECOMPRESS + select CRYPTO select CRYPTO_SHA256 select CRYPTO_CHACHA20 select CRYPTO_POLY1305 @@ -60,6 +61,13 @@ config BCACHEFS_DEBUG The resulting code will be significantly slower than normal; you probably shouldn't select this option unless you're a developer. +config BCACHEFS_INJECT_TRANSACTION_RESTARTS + bool "Randomly inject transaction restarts" + depends on BCACHEFS_DEBUG + help + Randomly inject transaction restarts in a few core paths - may have a + significant performance penalty + config BCACHEFS_TESTS bool "bcachefs unit and performance tests" depends on BCACHEFS_FS @@ -90,7 +98,7 @@ config BCACHEFS_SIX_OPTIMISTIC_SPIN config BCACHEFS_PATH_TRACEPOINTS bool "Extra btree_path tracepoints" - depends on BCACHEFS_FS + depends on BCACHEFS_FS && TRACING help Enable extra tracepoints for debugging btree_path operations; we don't normally want these enabled because they happen at very high rates. diff --git a/fs/bcachefs/Makefile b/fs/bcachefs/Makefile index 56d20e219f59..d2689388d5e8 100644 --- a/fs/bcachefs/Makefile +++ b/fs/bcachefs/Makefile @@ -82,6 +82,7 @@ bcachefs-y := \ siphash.o \ six.o \ snapshot.o \ + str_hash.o \ subvolume.o \ super.o \ super-io.o \ diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c index 87f1be9d4db4..99487727ae64 100644 --- a/fs/bcachefs/acl.c +++ b/fs/bcachefs/acl.c @@ -184,11 +184,6 @@ invalid: return ERR_PTR(-EINVAL); } -#define acl_for_each_entry(acl, acl_e) \ - for (acl_e = acl->a_entries; \ - acl_e < acl->a_entries + acl->a_count; \ - acl_e++) - /* * Convert from in-memory to filesystem representation. */ @@ -199,11 +194,11 @@ bch2_acl_to_xattr(struct btree_trans *trans, { struct bkey_i_xattr *xattr; bch_acl_header *acl_header; - const struct posix_acl_entry *acl_e; + const struct posix_acl_entry *acl_e, *pe; void *outptr; unsigned nr_short = 0, nr_long = 0, acl_len, u64s; - acl_for_each_entry(acl, acl_e) { + FOREACH_ACL_ENTRY(acl_e, acl, pe) { switch (acl_e->e_tag) { case ACL_USER: case ACL_GROUP: @@ -241,7 +236,7 @@ bch2_acl_to_xattr(struct btree_trans *trans, outptr = (void *) acl_header + sizeof(*acl_header); - acl_for_each_entry(acl, acl_e) { + FOREACH_ACL_ENTRY(acl_e, acl, pe) { bch_acl_entry *entry = outptr; entry->e_tag = cpu_to_le16(acl_e->e_tag); diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index c84a91572a1d..3ea809990ef1 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -198,7 +198,7 @@ static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a) } int bch2_alloc_v1_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k); int ret = 0; @@ -213,7 +213,7 @@ fsck_err: } int bch2_alloc_v2_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_alloc_unpacked u; int ret = 0; @@ -226,7 +226,7 @@ fsck_err: } int bch2_alloc_v3_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_alloc_unpacked u; int ret = 0; @@ -239,7 +239,7 @@ fsck_err: } int bch2_alloc_v4_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bch_alloc_v4 a; int ret = 0; @@ -322,9 +322,9 @@ fsck_err: void bch2_alloc_v4_swab(struct bkey_s k) { struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v; - struct bch_backpointer *bp, *bps; - a->journal_seq = swab64(a->journal_seq); + a->journal_seq_nonempty = swab64(a->journal_seq_nonempty); + a->journal_seq_empty = swab64(a->journal_seq_empty); a->flags = swab32(a->flags); a->dirty_sectors = swab32(a->dirty_sectors); a->cached_sectors = swab32(a->cached_sectors); @@ -333,13 +333,6 @@ void bch2_alloc_v4_swab(struct bkey_s k) a->stripe = swab32(a->stripe); a->nr_external_backpointers = swab32(a->nr_external_backpointers); a->stripe_sectors = swab32(a->stripe_sectors); - - bps = alloc_v4_backpointers(a); - for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) { - bp->bucket_offset = swab40(bp->bucket_offset); - bp->bucket_len = swab32(bp->bucket_len); - bch2_bpos_swab(&bp->pos); - } } void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) @@ -354,16 +347,17 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c prt_printf(out, "gen %u oldest_gen %u data_type ", a->gen, a->oldest_gen); bch2_prt_data_type(out, a->data_type); prt_newline(out); - prt_printf(out, "journal_seq %llu\n", a->journal_seq); - prt_printf(out, "need_discard %llu\n", BCH_ALLOC_V4_NEED_DISCARD(a)); - prt_printf(out, "need_inc_gen %llu\n", BCH_ALLOC_V4_NEED_INC_GEN(a)); - prt_printf(out, "dirty_sectors %u\n", a->dirty_sectors); - prt_printf(out, "stripe_sectors %u\n", a->stripe_sectors); - prt_printf(out, "cached_sectors %u\n", a->cached_sectors); - prt_printf(out, "stripe %u\n", a->stripe); - prt_printf(out, "stripe_redundancy %u\n", a->stripe_redundancy); - prt_printf(out, "io_time[READ] %llu\n", a->io_time[READ]); - prt_printf(out, "io_time[WRITE] %llu\n", a->io_time[WRITE]); + prt_printf(out, "journal_seq_nonempty %llu\n", a->journal_seq_nonempty); + prt_printf(out, "journal_seq_empty %llu\n", a->journal_seq_empty); + prt_printf(out, "need_discard %llu\n", BCH_ALLOC_V4_NEED_DISCARD(a)); + prt_printf(out, "need_inc_gen %llu\n", BCH_ALLOC_V4_NEED_INC_GEN(a)); + prt_printf(out, "dirty_sectors %u\n", a->dirty_sectors); + prt_printf(out, "stripe_sectors %u\n", a->stripe_sectors); + prt_printf(out, "cached_sectors %u\n", a->cached_sectors); + prt_printf(out, "stripe %u\n", a->stripe); + prt_printf(out, "stripe_redundancy %u\n", a->stripe_redundancy); + prt_printf(out, "io_time[READ] %llu\n", a->io_time[READ]); + prt_printf(out, "io_time[WRITE] %llu\n", a->io_time[WRITE]); if (ca) prt_printf(out, "fragmentation %llu\n", alloc_lru_idx_fragmentation(*a, ca)); @@ -392,7 +386,7 @@ void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out) struct bkey_alloc_unpacked u = bch2_alloc_unpack(k); *out = (struct bch_alloc_v4) { - .journal_seq = u.journal_seq, + .journal_seq_nonempty = u.journal_seq, .flags = u.need_discard, .gen = u.gen, .oldest_gen = u.oldest_gen, @@ -517,7 +511,7 @@ static unsigned alloc_gen(struct bkey_s_c k, unsigned offset) } int bch2_bucket_gens_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { int ret = 0; @@ -664,74 +658,80 @@ int bch2_alloc_read(struct bch_fs *c) /* Free space/discard btree: */ +static int __need_discard_or_freespace_err(struct btree_trans *trans, + struct bkey_s_c alloc_k, + bool set, bool discard, bool repair) +{ + struct bch_fs *c = trans->c; + enum bch_fsck_flags flags = FSCK_CAN_IGNORE|(repair ? FSCK_CAN_FIX : 0); + enum bch_sb_error_id err_id = discard + ? BCH_FSCK_ERR_need_discard_key_wrong + : BCH_FSCK_ERR_freespace_key_wrong; + enum btree_id btree = discard ? BTREE_ID_need_discard : BTREE_ID_freespace; + struct printbuf buf = PRINTBUF; + + bch2_bkey_val_to_text(&buf, c, alloc_k); + + int ret = __bch2_fsck_err(NULL, trans, flags, err_id, + "bucket incorrectly %sset in %s btree\n" + " %s", + set ? "" : "un", + bch2_btree_id_str(btree), + buf.buf); + if (ret == -BCH_ERR_fsck_ignore || + ret == -BCH_ERR_fsck_errors_not_fixed) + ret = 0; + + printbuf_exit(&buf); + return ret; +} + +#define need_discard_or_freespace_err(...) \ + fsck_err_wrap(__need_discard_or_freespace_err(__VA_ARGS__)) + +#define need_discard_or_freespace_err_on(cond, ...) \ + (unlikely(cond) ? need_discard_or_freespace_err(__VA_ARGS__) : false) + static int bch2_bucket_do_index(struct btree_trans *trans, struct bch_dev *ca, struct bkey_s_c alloc_k, const struct bch_alloc_v4 *a, bool set) { - struct bch_fs *c = trans->c; - struct btree_iter iter; - struct bkey_s_c old; - struct bkey_i *k; enum btree_id btree; - enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted; - enum bch_bkey_type new_type = set ? KEY_TYPE_set : KEY_TYPE_deleted; - struct printbuf buf = PRINTBUF; - int ret; + struct bpos pos; if (a->data_type != BCH_DATA_free && a->data_type != BCH_DATA_need_discard) return 0; - k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k)); - if (IS_ERR(k)) - return PTR_ERR(k); - - bkey_init(&k->k); - k->k.type = new_type; - switch (a->data_type) { case BCH_DATA_free: btree = BTREE_ID_freespace; - k->k.p = alloc_freespace_pos(alloc_k.k->p, *a); - bch2_key_resize(&k->k, 1); + pos = alloc_freespace_pos(alloc_k.k->p, *a); break; case BCH_DATA_need_discard: btree = BTREE_ID_need_discard; - k->k.p = alloc_k.k->p; + pos = alloc_k.k->p; break; default: return 0; } - old = bch2_bkey_get_iter(trans, &iter, btree, - bkey_start_pos(&k->k), - BTREE_ITER_intent); - ret = bkey_err(old); + struct btree_iter iter; + struct bkey_s_c old = bch2_bkey_get_iter(trans, &iter, btree, pos, BTREE_ITER_intent); + int ret = bkey_err(old); if (ret) return ret; - if (ca->mi.freespace_initialized && - c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info && - bch2_trans_inconsistent_on(old.k->type != old_type, trans, - "incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n" - " for %s", - set ? "setting" : "clearing", - bch2_btree_id_str(btree), - iter.pos.inode, - iter.pos.offset, - bch2_bkey_types[old.k->type], - bch2_bkey_types[old_type], - (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { - ret = -EIO; - goto err; - } + need_discard_or_freespace_err_on(ca->mi.freespace_initialized && + !old.k->type != set, + trans, alloc_k, set, + btree == BTREE_ID_need_discard, false); - ret = bch2_trans_update(trans, &iter, k, 0); -err: + ret = bch2_btree_bit_mod_iter(trans, &iter, set); +fsck_err: bch2_trans_iter_exit(trans, &iter); - printbuf_exit(&buf); return ret; } @@ -858,7 +858,10 @@ int bch2_trigger_alloc(struct btree_trans *trans, if (flags & BTREE_TRIGGER_transactional) { alloc_data_type_set(new_a, new_a->data_type); - if (bch2_bucket_sectors_total(*new_a) > bch2_bucket_sectors_total(*old_a)) { + int is_empty_delta = (int) data_type_is_empty(new_a->data_type) - + (int) data_type_is_empty(old_a->data_type); + + if (is_empty_delta < 0) { new_a->io_time[READ] = bch2_current_io_time(c, READ); new_a->io_time[WRITE]= bch2_current_io_time(c, WRITE); SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true); @@ -928,37 +931,55 @@ int bch2_trigger_alloc(struct btree_trans *trans, } if ((flags & BTREE_TRIGGER_atomic) && (flags & BTREE_TRIGGER_insert)) { - u64 journal_seq = trans->journal_res.seq; - u64 bucket_journal_seq = new_a->journal_seq; + u64 transaction_seq = trans->journal_res.seq; + BUG_ON(!transaction_seq); - if ((flags & BTREE_TRIGGER_insert) && - data_type_is_empty(old_a->data_type) != - data_type_is_empty(new_a->data_type) && - new.k->type == KEY_TYPE_alloc_v4) { - struct bch_alloc_v4 *v = bkey_s_to_alloc_v4(new).v; + if (log_fsck_err_on(transaction_seq && new_a->journal_seq_nonempty > transaction_seq, + trans, alloc_key_journal_seq_in_future, + "bucket journal seq in future (currently at %llu)\n%s", + journal_cur_seq(&c->journal), + (bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf))) + new_a->journal_seq_nonempty = transaction_seq; - /* - * If the btree updates referring to a bucket weren't flushed - * before the bucket became empty again, then the we don't have - * to wait on a journal flush before we can reuse the bucket: - */ - v->journal_seq = bucket_journal_seq = - data_type_is_empty(new_a->data_type) && - (journal_seq == v->journal_seq || - bch2_journal_noflush_seq(&c->journal, v->journal_seq)) - ? 0 : journal_seq; + int is_empty_delta = (int) data_type_is_empty(new_a->data_type) - + (int) data_type_is_empty(old_a->data_type); + + /* + * Record journal sequence number of empty -> nonempty transition: + * Note that there may be multiple empty -> nonempty + * transitions, data in a bucket may be overwritten while we're + * still writing to it - so be careful to only record the first: + * */ + if (is_empty_delta < 0 && + new_a->journal_seq_empty <= c->journal.flushed_seq_ondisk) { + new_a->journal_seq_nonempty = transaction_seq; + new_a->journal_seq_empty = 0; } - if (!data_type_is_empty(old_a->data_type) && - data_type_is_empty(new_a->data_type) && - bucket_journal_seq) { - ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal, - c->journal.flushed_seq_ondisk, - new.k->p.inode, new.k->p.offset, - bucket_journal_seq); - if (bch2_fs_fatal_err_on(ret, c, - "setting bucket_needs_journal_commit: %s", bch2_err_str(ret))) - goto err; + /* + * Bucket becomes empty: mark it as waiting for a journal flush, + * unless updates since empty -> nonempty transition were never + * flushed - we may need to ask the journal not to flush + * intermediate sequence numbers: + */ + if (is_empty_delta > 0) { + if (new_a->journal_seq_nonempty == transaction_seq || + bch2_journal_noflush_seq(&c->journal, + new_a->journal_seq_nonempty, + transaction_seq)) { + new_a->journal_seq_nonempty = new_a->journal_seq_empty = 0; + } else { + new_a->journal_seq_empty = transaction_seq; + + ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal, + c->journal.flushed_seq_ondisk, + new.k->p.inode, new.k->p.offset, + transaction_seq); + if (bch2_fs_fatal_err_on(ret, c, + "setting bucket_needs_journal_commit: %s", + bch2_err_str(ret))) + goto err; + } } if (new_a->gen != old_a->gen) { @@ -974,7 +995,7 @@ int bch2_trigger_alloc(struct btree_trans *trans, #define eval_state(_a, expr) ({ const struct bch_alloc_v4 *a = _a; expr; }) #define statechange(expr) !eval_state(old_a, expr) && eval_state(new_a, expr) -#define bucket_flushed(a) (!a->journal_seq || a->journal_seq <= c->journal.flushed_seq_ondisk) +#define bucket_flushed(a) (a->journal_seq_empty <= c->journal.flushed_seq_ondisk) if (statechange(a->data_type == BCH_DATA_free) && bucket_flushed(new_a)) @@ -1006,6 +1027,7 @@ int bch2_trigger_alloc(struct btree_trans *trans, rcu_read_unlock(); } err: +fsck_err: printbuf_exit(&buf); bch2_dev_put(ca); return ret; @@ -1045,7 +1067,7 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos * btree node min/max is a closed interval, upto takes a half * open interval: */ - k = bch2_btree_iter_peek_upto(&iter2, end); + k = bch2_btree_iter_peek_max(&iter2, end); next = iter2.pos; bch2_trans_iter_exit(iter->trans, &iter2); @@ -1129,7 +1151,6 @@ int bch2_check_alloc_key(struct btree_trans *trans, struct bch_fs *c = trans->c; struct bch_alloc_v4 a_convert; const struct bch_alloc_v4 *a; - unsigned discard_key_type, freespace_key_type; unsigned gens_offset; struct bkey_s_c k; struct printbuf buf = PRINTBUF; @@ -1149,64 +1170,30 @@ int bch2_check_alloc_key(struct btree_trans *trans, a = bch2_alloc_to_v4(alloc_k, &a_convert); - discard_key_type = a->data_type == BCH_DATA_need_discard ? KEY_TYPE_set : 0; bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p); k = bch2_btree_iter_peek_slot(discard_iter); ret = bkey_err(k); if (ret) goto err; - if (fsck_err_on(k.k->type != discard_key_type, - trans, need_discard_key_wrong, - "incorrect key in need_discard btree (got %s should be %s)\n" - " %s", - bch2_bkey_types[k.k->type], - bch2_bkey_types[discard_key_type], - (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { - struct bkey_i *update = - bch2_trans_kmalloc(trans, sizeof(*update)); - - ret = PTR_ERR_OR_ZERO(update); - if (ret) - goto err; - - bkey_init(&update->k); - update->k.type = discard_key_type; - update->k.p = discard_iter->pos; - - ret = bch2_trans_update(trans, discard_iter, update, 0); + bool is_discarded = a->data_type == BCH_DATA_need_discard; + if (need_discard_or_freespace_err_on(!!k.k->type != is_discarded, + trans, alloc_k, !is_discarded, true, true)) { + ret = bch2_btree_bit_mod_iter(trans, discard_iter, is_discarded); if (ret) goto err; } - freespace_key_type = a->data_type == BCH_DATA_free ? KEY_TYPE_set : 0; bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a)); k = bch2_btree_iter_peek_slot(freespace_iter); ret = bkey_err(k); if (ret) goto err; - if (fsck_err_on(k.k->type != freespace_key_type, - trans, freespace_key_wrong, - "incorrect key in freespace btree (got %s should be %s)\n" - " %s", - bch2_bkey_types[k.k->type], - bch2_bkey_types[freespace_key_type], - (printbuf_reset(&buf), - bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { - struct bkey_i *update = - bch2_trans_kmalloc(trans, sizeof(*update)); - - ret = PTR_ERR_OR_ZERO(update); - if (ret) - goto err; - - bkey_init(&update->k); - update->k.type = freespace_key_type; - update->k.p = freespace_iter->pos; - bch2_key_resize(&update->k, 1); - - ret = bch2_trans_update(trans, freespace_iter, update, 0); + bool is_free = a->data_type == BCH_DATA_free; + if (need_discard_or_freespace_err_on(!!k.k->type != is_free, + trans, alloc_k, !is_free, false, true)) { + ret = bch2_btree_bit_mod_iter(trans, freespace_iter, is_free); if (ret) goto err; } @@ -1368,51 +1355,88 @@ fsck_err: return ret; } -static noinline_for_stack int bch2_check_discard_freespace_key(struct btree_trans *trans, - struct btree_iter *iter) +struct check_discard_freespace_key_async { + struct work_struct work; + struct bch_fs *c; + struct bbpos pos; +}; + +static int bch2_recheck_discard_freespace_key(struct btree_trans *trans, struct bbpos pos) +{ + struct btree_iter iter; + struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, pos.btree, pos.pos, 0); + int ret = bkey_err(k); + if (ret) + return ret; + + u8 gen; + ret = k.k->type != KEY_TYPE_set + ? bch2_check_discard_freespace_key(trans, &iter, &gen, false) + : 0; + bch2_trans_iter_exit(trans, &iter); + return ret; +} + +static void check_discard_freespace_key_work(struct work_struct *work) +{ + struct check_discard_freespace_key_async *w = + container_of(work, struct check_discard_freespace_key_async, work); + + bch2_trans_do(w->c, bch2_recheck_discard_freespace_key(trans, w->pos)); + bch2_write_ref_put(w->c, BCH_WRITE_REF_check_discard_freespace_key); + kfree(w); +} + +int bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_iter *iter, u8 *gen, + bool async_repair) { struct bch_fs *c = trans->c; - struct btree_iter alloc_iter; - struct bkey_s_c alloc_k; - struct bch_alloc_v4 a_convert; - const struct bch_alloc_v4 *a; - u64 genbits; - struct bpos pos; enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard ? BCH_DATA_need_discard : BCH_DATA_free; struct printbuf buf = PRINTBUF; - int ret; - pos = iter->pos; - pos.offset &= ~(~0ULL << 56); - genbits = iter->pos.offset & (~0ULL << 56); + struct bpos bucket = iter->pos; + bucket.offset &= ~(~0ULL << 56); + u64 genbits = iter->pos.offset & (~0ULL << 56); - alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, pos, 0); - ret = bkey_err(alloc_k); + struct btree_iter alloc_iter; + struct bkey_s_c alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, + BTREE_ID_alloc, bucket, + async_repair ? BTREE_ITER_cached : 0); + int ret = bkey_err(alloc_k); if (ret) return ret; - if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), - trans, need_discard_freespace_key_to_invalid_dev_bucket, - "entry in %s btree for nonexistant dev:bucket %llu:%llu", - bch2_btree_id_str(iter->btree_id), pos.inode, pos.offset)) - goto delete; + if (!bch2_dev_bucket_exists(c, bucket)) { + if (fsck_err(trans, need_discard_freespace_key_to_invalid_dev_bucket, + "entry in %s btree for nonexistant dev:bucket %llu:%llu", + bch2_btree_id_str(iter->btree_id), bucket.inode, bucket.offset)) + goto delete; + ret = 1; + goto out; + } - a = bch2_alloc_to_v4(alloc_k, &a_convert); + struct bch_alloc_v4 a_convert; + const struct bch_alloc_v4 *a = bch2_alloc_to_v4(alloc_k, &a_convert); + + if (a->data_type != state || + (state == BCH_DATA_free && + genbits != alloc_freespace_genbits(*a))) { + if (fsck_err(trans, need_discard_freespace_key_bad, + "%s\n incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)", + (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf), + bch2_btree_id_str(iter->btree_id), + iter->pos.inode, + iter->pos.offset, + a->data_type == state, + genbits >> 56, alloc_freespace_genbits(*a) >> 56)) + goto delete; + ret = 1; + goto out; + } - if (fsck_err_on(a->data_type != state || - (state == BCH_DATA_free && - genbits != alloc_freespace_genbits(*a)), - trans, need_discard_freespace_key_bad, - "%s\n incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)", - (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf), - bch2_btree_id_str(iter->btree_id), - iter->pos.inode, - iter->pos.offset, - a->data_type == state, - genbits >> 56, alloc_freespace_genbits(*a) >> 56)) - goto delete; + *gen = a->gen; out: fsck_err: bch2_set_btree_iter_dontneed(&alloc_iter); @@ -1420,11 +1444,40 @@ fsck_err: printbuf_exit(&buf); return ret; delete: - ret = bch2_btree_delete_extent_at(trans, iter, - iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0) ?: - bch2_trans_commit(trans, NULL, NULL, - BCH_TRANS_COMMIT_no_enospc); - goto out; + if (!async_repair) { + ret = bch2_btree_bit_mod_iter(trans, iter, false) ?: + bch2_trans_commit(trans, NULL, NULL, + BCH_TRANS_COMMIT_no_enospc) ?: + -BCH_ERR_transaction_restart_commit; + goto out; + } else { + /* + * We can't repair here when called from the allocator path: the + * commit will recurse back into the allocator + */ + struct check_discard_freespace_key_async *w = + kzalloc(sizeof(*w), GFP_KERNEL); + if (!w) + goto out; + + if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_check_discard_freespace_key)) { + kfree(w); + goto out; + } + + INIT_WORK(&w->work, check_discard_freespace_key_work); + w->c = c; + w->pos = BBPOS(iter->btree_id, iter->pos); + queue_work(c->write_ref_wq, &w->work); + goto out; + } +} + +static int bch2_check_discard_freespace_key_fsck(struct btree_trans *trans, struct btree_iter *iter) +{ + u8 gen; + int ret = bch2_check_discard_freespace_key(trans, iter, &gen, false); + return ret < 0 ? ret : 0; } /* @@ -1581,7 +1634,7 @@ bkey_err: ret = for_each_btree_key(trans, iter, BTREE_ID_need_discard, POS_MIN, BTREE_ITER_prefetch, k, - bch2_check_discard_freespace_key(trans, &iter)); + bch2_check_discard_freespace_key_fsck(trans, &iter)); if (ret) goto err; @@ -1594,7 +1647,7 @@ bkey_err: break; ret = bkey_err(k) ?: - bch2_check_discard_freespace_key(trans, &iter); + bch2_check_discard_freespace_key_fsck(trans, &iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) { ret = 0; continue; @@ -1750,14 +1803,14 @@ struct discard_buckets_state { u64 open; u64 need_journal_commit; u64 discarded; - u64 need_journal_commit_this_dev; }; static int bch2_discard_one_bucket(struct btree_trans *trans, struct bch_dev *ca, struct btree_iter *need_discard_iter, struct bpos *discard_pos_done, - struct discard_buckets_state *s) + struct discard_buckets_state *s, + bool fastpath) { struct bch_fs *c = trans->c; struct bpos pos = need_discard_iter->pos; @@ -1773,11 +1826,11 @@ static int bch2_discard_one_bucket(struct btree_trans *trans, goto out; } - if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal, - c->journal.flushed_seq_ondisk, - pos.inode, pos.offset)) { - s->need_journal_commit++; - s->need_journal_commit_this_dev++; + u64 seq_ready = bch2_bucket_journal_seq_ready(&c->buckets_waiting_for_journal, + pos.inode, pos.offset); + if (seq_ready > c->journal.flushed_seq_ondisk) { + if (seq_ready > c->journal.flushing_seq) + s->need_journal_commit++; goto out; } @@ -1793,80 +1846,64 @@ static int bch2_discard_one_bucket(struct btree_trans *trans, if (ret) goto out; - if (bch2_bucket_sectors_total(a->v)) { - if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info, - trans, "attempting to discard bucket with dirty data\n%s", - (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) - ret = -EIO; - goto out; - } - if (a->v.data_type != BCH_DATA_need_discard) { - if (data_type_is_empty(a->v.data_type) && - BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) { - a->v.gen++; - SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false); - goto write; + if (need_discard_or_freespace_err(trans, k, true, true, true)) { + ret = bch2_btree_bit_mod_iter(trans, need_discard_iter, false); + if (ret) + goto out; + goto commit; } - if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info, - trans, "bucket incorrectly set in need_discard btree\n" - "%s", - (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) - ret = -EIO; goto out; } - if (a->v.journal_seq > c->journal.flushed_seq_ondisk) { - if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info, - trans, "clearing need_discard but journal_seq %llu > flushed_seq %llu\n%s", - a->v.journal_seq, - c->journal.flushed_seq_ondisk, - (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) - ret = -EIO; - goto out; - } - - if (discard_in_flight_add(ca, iter.pos.offset, true)) - goto out; + if (!fastpath) { + if (discard_in_flight_add(ca, iter.pos.offset, true)) + goto out; - discard_locked = true; + discard_locked = true; + } - if (!bkey_eq(*discard_pos_done, iter.pos) && - ca->mi.discard && !c->opts.nochanges) { - /* - * This works without any other locks because this is the only - * thread that removes items from the need_discard tree - */ - bch2_trans_unlock_long(trans); - blkdev_issue_discard(ca->disk_sb.bdev, - k.k->p.offset * ca->mi.bucket_size, - ca->mi.bucket_size, - GFP_KERNEL); + if (!bkey_eq(*discard_pos_done, iter.pos)) { + s->discarded++; *discard_pos_done = iter.pos; - ret = bch2_trans_relock_notrace(trans); - if (ret) - goto out; + if (ca->mi.discard && !c->opts.nochanges) { + /* + * This works without any other locks because this is the only + * thread that removes items from the need_discard tree + */ + bch2_trans_unlock_long(trans); + blkdev_issue_discard(ca->disk_sb.bdev, + k.k->p.offset * ca->mi.bucket_size, + ca->mi.bucket_size, + GFP_KERNEL); + ret = bch2_trans_relock_notrace(trans); + if (ret) + goto out; + } } SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false); -write: alloc_data_type_set(&a->v, a->v.data_type); - ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?: - bch2_trans_commit(trans, NULL, NULL, - BCH_WATERMARK_btree| - BCH_TRANS_COMMIT_no_enospc); + ret = bch2_trans_update(trans, &iter, &a->k_i, 0); + if (ret) + goto out; +commit: + ret = bch2_trans_commit(trans, NULL, NULL, + BCH_WATERMARK_btree| + BCH_TRANS_COMMIT_no_enospc); if (ret) goto out; count_event(c, bucket_discard); - s->discarded++; out: +fsck_err: if (discard_locked) discard_in_flight_remove(ca, iter.pos.offset); - s->seen++; + if (!ret) + s->seen++; bch2_trans_iter_exit(trans, &iter); printbuf_exit(&buf); return ret; @@ -1886,11 +1923,14 @@ static void bch2_do_discards_work(struct work_struct *work) * successful commit: */ ret = bch2_trans_run(c, - for_each_btree_key_upto(trans, iter, + for_each_btree_key_max(trans, iter, BTREE_ID_need_discard, POS(ca->dev_idx, 0), POS(ca->dev_idx, U64_MAX), 0, k, - bch2_discard_one_bucket(trans, ca, &iter, &discard_pos_done, &s))); + bch2_discard_one_bucket(trans, ca, &iter, &discard_pos_done, &s, false))); + + if (s.need_journal_commit > dev_buckets_available(ca, BCH_WATERMARK_normal)) + bch2_journal_flush_async(&c->journal, NULL); trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded, bch2_err_str(ret)); @@ -1923,27 +1963,29 @@ void bch2_do_discards(struct bch_fs *c) bch2_dev_do_discards(ca); } -static int bch2_clear_bucket_needs_discard(struct btree_trans *trans, struct bpos bucket) +static int bch2_do_discards_fast_one(struct btree_trans *trans, + struct bch_dev *ca, + u64 bucket, + struct bpos *discard_pos_done, + struct discard_buckets_state *s) { - struct btree_iter iter; - bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, bucket, BTREE_ITER_intent); - struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); - int ret = bkey_err(k); + struct btree_iter need_discard_iter; + struct bkey_s_c discard_k = bch2_bkey_get_iter(trans, &need_discard_iter, + BTREE_ID_need_discard, POS(ca->dev_idx, bucket), 0); + int ret = bkey_err(discard_k); if (ret) - goto err; - - struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut(trans, k); - ret = PTR_ERR_OR_ZERO(a); - if (ret) - goto err; + return ret; - BUG_ON(a->v.dirty_sectors); - SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false); - alloc_data_type_set(&a->v, a->v.data_type); + if (log_fsck_err_on(discard_k.k->type != KEY_TYPE_set, + trans, discarding_bucket_not_in_need_discard_btree, + "attempting to discard bucket %u:%llu not in need_discard btree", + ca->dev_idx, bucket)) + goto out; - ret = bch2_trans_update(trans, &iter, &a->k_i, 0); -err: - bch2_trans_iter_exit(trans, &iter); + ret = bch2_discard_one_bucket(trans, ca, &need_discard_iter, discard_pos_done, s, true); +out: +fsck_err: + bch2_trans_iter_exit(trans, &need_discard_iter); return ret; } @@ -1951,6 +1993,10 @@ static void bch2_do_discards_fast_work(struct work_struct *work) { struct bch_dev *ca = container_of(work, struct bch_dev, discard_fast_work); struct bch_fs *c = ca->fs; + struct discard_buckets_state s = {}; + struct bpos discard_pos_done = POS_MAX; + struct btree_trans *trans = bch2_trans_get(c); + int ret = 0; while (1) { bool got_bucket = false; @@ -1971,16 +2017,8 @@ static void bch2_do_discards_fast_work(struct work_struct *work) if (!got_bucket) break; - if (ca->mi.discard && !c->opts.nochanges) - blkdev_issue_discard(ca->disk_sb.bdev, - bucket_to_sector(ca, bucket), - ca->mi.bucket_size, - GFP_KERNEL); - - int ret = bch2_trans_commit_do(c, NULL, NULL, - BCH_WATERMARK_btree| - BCH_TRANS_COMMIT_no_enospc, - bch2_clear_bucket_needs_discard(trans, POS(ca->dev_idx, bucket))); + ret = lockrestart_do(trans, + bch2_do_discards_fast_one(trans, ca, bucket, &discard_pos_done, &s)); bch_err_fn(c, ret); discard_in_flight_remove(ca, bucket); @@ -1989,6 +2027,9 @@ static void bch2_do_discards_fast_work(struct work_struct *work) break; } + trace_discard_buckets_fast(c, s.seen, s.open, s.need_journal_commit, s.discarded, bch2_err_str(ret)); + + bch2_trans_put(trans); percpu_ref_put(&ca->io_ref); bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast); } @@ -2030,8 +2071,11 @@ static int invalidate_one_bucket(struct btree_trans *trans, return 1; if (!bch2_dev_bucket_exists(c, bucket)) { - prt_str(&buf, "lru entry points to invalid bucket"); - goto err; + if (fsck_err(trans, lru_entry_to_invalid_bucket, + "lru key points to nonexistent device:bucket %llu:%llu", + bucket.inode, bucket.offset)) + return bch2_btree_bit_mod_buffered(trans, BTREE_ID_lru, lru_iter->pos, false); + goto out; } if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset)) @@ -2072,28 +2116,9 @@ static int invalidate_one_bucket(struct btree_trans *trans, trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors); --*nr_to_invalidate; out: +fsck_err: printbuf_exit(&buf); return ret; -err: - prt_str(&buf, "\n lru key: "); - bch2_bkey_val_to_text(&buf, c, lru_k); - - prt_str(&buf, "\n lru entry: "); - bch2_lru_pos_to_text(&buf, lru_iter->pos); - - prt_str(&buf, "\n alloc key: "); - if (!a) - bch2_bpos_to_text(&buf, bucket); - else - bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i)); - - bch_err(c, "%s", buf.buf); - if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_lrus) { - bch2_inconsistent_error(c); - ret = -EINVAL; - } - - goto out; } static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter *iter, @@ -2101,7 +2126,7 @@ static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter { struct bkey_s_c k; again: - k = bch2_btree_iter_peek_upto(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX)); + k = bch2_btree_iter_peek_max(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX)); if (!k.k && !*wrapped) { bch2_btree_iter_set_pos(iter, lru_pos(ca->dev_idx, 0, 0)); *wrapped = true; diff --git a/fs/bcachefs/alloc_background.h b/fs/bcachefs/alloc_background.h index 163a67b97a40..de25ba4ee94b 100644 --- a/fs/bcachefs/alloc_background.h +++ b/fs/bcachefs/alloc_background.h @@ -8,8 +8,6 @@ #include "debug.h" #include "super.h" -enum bch_validate_flags; - /* How out of date a pointer gen is allowed to be: */ #define BUCKET_GC_GEN_MAX 96U @@ -245,10 +243,14 @@ struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int); -int bch2_alloc_v1_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); -int bch2_alloc_v2_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); -int bch2_alloc_v3_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); -int bch2_alloc_v4_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_alloc_v1_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); +int bch2_alloc_v2_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); +int bch2_alloc_v3_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); +int bch2_alloc_v4_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); void bch2_alloc_v4_swab(struct bkey_s); void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); @@ -282,7 +284,7 @@ void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); }) int bch2_bucket_gens_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); void bch2_bucket_gens_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); #define bch2_bkey_ops_bucket_gens ((struct bkey_ops) { \ @@ -307,6 +309,8 @@ int bch2_alloc_key_to_dev_counters(struct btree_trans *, struct bch_dev *, int bch2_trigger_alloc(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_s, enum btree_iter_update_trigger_flags); + +int bch2_check_discard_freespace_key(struct btree_trans *, struct btree_iter *, u8 *, bool); int bch2_check_alloc_info(struct bch_fs *); int bch2_check_alloc_to_lru_refs(struct bch_fs *); void bch2_dev_do_discards(struct bch_dev *); diff --git a/fs/bcachefs/alloc_background_format.h b/fs/bcachefs/alloc_background_format.h index befdaa95c515..740238369a5a 100644 --- a/fs/bcachefs/alloc_background_format.h +++ b/fs/bcachefs/alloc_background_format.h @@ -58,7 +58,7 @@ LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2) struct bch_alloc_v4 { struct bch_val v; - __u64 journal_seq; + __u64 journal_seq_nonempty; __u32 flags; __u8 gen; __u8 oldest_gen; @@ -70,7 +70,7 @@ struct bch_alloc_v4 { __u32 stripe; __u32 nr_external_backpointers; /* end of fields in original version of alloc_v4 */ - __u64 _fragmentation_lru; /* obsolete */ + __u64 journal_seq_empty; __u32 stripe_sectors; __u32 pad; } __packed __aligned(8); diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index 372178c8d416..5a781fb4c794 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -107,14 +107,10 @@ void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob) return; } - percpu_down_read(&c->mark_lock); spin_lock(&ob->lock); - ob->valid = false; ob->data_type = 0; - spin_unlock(&ob->lock); - percpu_up_read(&c->mark_lock); spin_lock(&c->freelist_lock); bch2_open_bucket_hash_remove(c, ob); @@ -156,6 +152,14 @@ static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c) return ob; } +static inline bool is_superblock_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b) +{ + if (c->curr_recovery_pass > BCH_RECOVERY_PASS_trans_mark_dev_sbs) + return false; + + return bch2_is_superblock_bucket(ca, b); +} + static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob) { BUG_ON(c->open_buckets_partial_nr >= @@ -175,20 +179,6 @@ static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob) closure_wake_up(&c->freelist_wait); } -/* _only_ for allocating the journal on a new device: */ -long bch2_bucket_alloc_new_fs(struct bch_dev *ca) -{ - while (ca->new_fs_bucket_idx < ca->mi.nbuckets) { - u64 b = ca->new_fs_bucket_idx++; - - if (!is_superblock_bucket(ca, b) && - (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse))) - return b; - } - - return -1; -} - static inline unsigned open_buckets_reserved(enum bch_watermark watermark) { switch (watermark) { @@ -206,33 +196,44 @@ static inline unsigned open_buckets_reserved(enum bch_watermark watermark) } } -static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, - u64 bucket, - enum bch_watermark watermark, - const struct bch_alloc_v4 *a, - struct bucket_alloc_state *s, - struct closure *cl) +static inline bool may_alloc_bucket(struct bch_fs *c, + struct bpos bucket, + struct bucket_alloc_state *s) { - struct open_bucket *ob; - - if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) { - s->skipped_nouse++; - return NULL; - } - - if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) { + if (bch2_bucket_is_open(c, bucket.inode, bucket.offset)) { s->skipped_open++; - return NULL; + return false; } - if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal, - c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) { + u64 journal_seq_ready = + bch2_bucket_journal_seq_ready(&c->buckets_waiting_for_journal, + bucket.inode, bucket.offset); + if (journal_seq_ready > c->journal.flushed_seq_ondisk) { + if (journal_seq_ready > c->journal.flushing_seq) + s->need_journal_commit++; s->skipped_need_journal_commit++; - return NULL; + return false; } - if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) { + if (bch2_bucket_nocow_is_locked(&c->nocow_locks, bucket)) { s->skipped_nocow++; + return false; + } + + return true; +} + +static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, + u64 bucket, u8 gen, + enum bch_watermark watermark, + struct bucket_alloc_state *s, + struct closure *cl) +{ + if (unlikely(is_superblock_bucket(c, ca, bucket))) + return NULL; + + if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) { + s->skipped_nouse++; return NULL; } @@ -254,14 +255,13 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev * return NULL; } - ob = bch2_open_bucket_alloc(c); + struct open_bucket *ob = bch2_open_bucket_alloc(c); spin_lock(&ob->lock); - ob->valid = true; ob->sectors_free = ca->mi.bucket_size; ob->dev = ca->dev_idx; - ob->gen = a->gen; + ob->gen = gen; ob->bucket = bucket; spin_unlock(&ob->lock); @@ -276,111 +276,29 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev * } static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca, - enum bch_watermark watermark, u64 free_entry, + enum bch_watermark watermark, struct bucket_alloc_state *s, - struct bkey_s_c freespace_k, + struct btree_iter *freespace_iter, struct closure *cl) { struct bch_fs *c = trans->c; - struct btree_iter iter = { NULL }; - struct bkey_s_c k; - struct open_bucket *ob; - struct bch_alloc_v4 a_convert; - const struct bch_alloc_v4 *a; - u64 b = free_entry & ~(~0ULL << 56); - unsigned genbits = free_entry >> 56; - struct printbuf buf = PRINTBUF; - int ret; - - if (b < ca->mi.first_bucket || b >= ca->mi.nbuckets) { - prt_printf(&buf, "freespace btree has bucket outside allowed range %u-%llu\n" - " freespace key ", - ca->mi.first_bucket, ca->mi.nbuckets); - bch2_bkey_val_to_text(&buf, c, freespace_k); - bch2_trans_inconsistent(trans, "%s", buf.buf); - ob = ERR_PTR(-EIO); - goto err; - } + u64 b = freespace_iter->pos.offset & ~(~0ULL << 56); - k = bch2_bkey_get_iter(trans, &iter, - BTREE_ID_alloc, POS(ca->dev_idx, b), - BTREE_ITER_cached); - ret = bkey_err(k); - if (ret) { - ob = ERR_PTR(ret); - goto err; - } - - a = bch2_alloc_to_v4(k, &a_convert); - - if (a->data_type != BCH_DATA_free) { - if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) { - ob = NULL; - goto err; - } - - prt_printf(&buf, "non free bucket in freespace btree\n" - " freespace key "); - bch2_bkey_val_to_text(&buf, c, freespace_k); - prt_printf(&buf, "\n "); - bch2_bkey_val_to_text(&buf, c, k); - bch2_trans_inconsistent(trans, "%s", buf.buf); - ob = ERR_PTR(-EIO); - goto err; - } - - if (genbits != (alloc_freespace_genbits(*a) >> 56) && - c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) { - prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n" - " freespace key ", - genbits, alloc_freespace_genbits(*a) >> 56); - bch2_bkey_val_to_text(&buf, c, freespace_k); - prt_printf(&buf, "\n "); - bch2_bkey_val_to_text(&buf, c, k); - bch2_trans_inconsistent(trans, "%s", buf.buf); - ob = ERR_PTR(-EIO); - goto err; - } - - if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_extents_to_backpointers) { - struct bch_backpointer bp; - struct bpos bp_pos = POS_MIN; - - ret = bch2_get_next_backpointer(trans, ca, POS(ca->dev_idx, b), -1, - &bp_pos, &bp, - BTREE_ITER_nopreserve); - if (ret) { - ob = ERR_PTR(ret); - goto err; - } + if (!may_alloc_bucket(c, POS(ca->dev_idx, b), s)) + return NULL; - if (!bkey_eq(bp_pos, POS_MAX)) { - /* - * Bucket may have data in it - we don't call - * bc2h_trans_inconnsistent() because fsck hasn't - * finished yet - */ - ob = NULL; - goto err; - } - } + u8 gen; + int ret = bch2_check_discard_freespace_key(trans, freespace_iter, &gen, true); + if (ret < 0) + return ERR_PTR(ret); + if (ret) + return NULL; - ob = __try_alloc_bucket(c, ca, b, watermark, a, s, cl); - if (!ob) - bch2_set_btree_iter_dontneed(&iter); -err: - if (iter.path) - bch2_set_btree_iter_dontneed(&iter); - bch2_trans_iter_exit(trans, &iter); - printbuf_exit(&buf); - return ob; + return __try_alloc_bucket(c, ca, b, gen, watermark, s, cl); } /* * This path is for before the freespace btree is initialized: - * - * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock & - * journal buckets - journal buckets will be < ca->new_fs_bucket_idx */ static noinline struct open_bucket * bch2_bucket_alloc_early(struct btree_trans *trans, @@ -389,10 +307,11 @@ bch2_bucket_alloc_early(struct btree_trans *trans, struct bucket_alloc_state *s, struct closure *cl) { + struct bch_fs *c = trans->c; struct btree_iter iter, citer; struct bkey_s_c k, ck; struct open_bucket *ob = NULL; - u64 first_bucket = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx); + u64 first_bucket = ca->mi.first_bucket; u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap]; u64 alloc_start = max(first_bucket, *dev_alloc_cursor); u64 alloc_cursor = alloc_start; @@ -415,10 +334,6 @@ again: if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets))) break; - if (ca->new_fs_bucket_idx && - is_superblock_bucket(ca, k.k->p.offset)) - continue; - if (s->btree_bitmap != BTREE_BITMAP_ANY && s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca, bucket_to_sector(ca, bucket), ca->mi.bucket_size)) { @@ -452,7 +367,10 @@ again: s->buckets_seen++; - ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl); + ob = may_alloc_bucket(c, k.k->p, s) + ? __try_alloc_bucket(c, ca, k.k->p.offset, a->gen, + watermark, s, cl) + : NULL; next: bch2_set_btree_iter_dontneed(&citer); bch2_trans_iter_exit(trans, &citer); @@ -489,20 +407,21 @@ static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans, u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(*dev_alloc_cursor)); u64 alloc_cursor = alloc_start; int ret; - - BUG_ON(ca->new_fs_bucket_idx); again: - for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace, - POS(ca->dev_idx, alloc_cursor), 0, k, ret) { - if (k.k->p.inode != ca->dev_idx) - break; + for_each_btree_key_max_norestart(trans, iter, BTREE_ID_freespace, + POS(ca->dev_idx, alloc_cursor), + POS(ca->dev_idx, U64_MAX), + 0, k, ret) { + /* + * peek normally dosen't trim extents - they can span iter.pos, + * which is not what we want here: + */ + iter.k.size = iter.k.p.offset - iter.pos.offset; - for (alloc_cursor = max(alloc_cursor, bkey_start_offset(k.k)); - alloc_cursor < k.k->p.offset; - alloc_cursor++) { + while (iter.k.size) { s->buckets_seen++; - u64 bucket = alloc_cursor & ~(~0ULL << 56); + u64 bucket = iter.pos.offset & ~(~0ULL << 56); if (s->btree_bitmap != BTREE_BITMAP_ANY && s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca, bucket_to_sector(ca, bucket), ca->mi.bucket_size)) { @@ -511,32 +430,36 @@ again: goto fail; bucket = sector_to_bucket(ca, - round_up(bucket_to_sector(ca, bucket) + 1, + round_up(bucket_to_sector(ca, bucket + 1), 1ULL << ca->mi.btree_bitmap_shift)); - u64 genbits = alloc_cursor >> 56; - alloc_cursor = bucket | (genbits << 56); + alloc_cursor = bucket|(iter.pos.offset & (~0ULL << 56)); - if (alloc_cursor > k.k->p.offset) - bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, alloc_cursor)); + bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, alloc_cursor)); s->skipped_mi_btree_bitmap++; - continue; + goto next; } - ob = try_alloc_bucket(trans, ca, watermark, - alloc_cursor, s, k, cl); + ob = try_alloc_bucket(trans, ca, watermark, s, &iter, cl); if (ob) { + if (!IS_ERR(ob)) + *dev_alloc_cursor = iter.pos.offset; bch2_set_btree_iter_dontneed(&iter); break; } - } + iter.k.size--; + iter.pos.offset++; + } +next: if (ob || ret) break; } fail: bch2_trans_iter_exit(trans, &iter); - if (!ob && ret) + BUG_ON(ob && ret); + + if (ret) ob = ERR_PTR(ret); if (!ob && alloc_start > ca->mi.first_bucket) { @@ -544,8 +467,6 @@ fail: goto again; } - *dev_alloc_cursor = alloc_cursor; - return ob; } @@ -595,6 +516,7 @@ static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca, * @watermark: how important is this allocation? * @data_type: BCH_DATA_journal, btree, user... * @cl: if not NULL, closure to be used to wait if buckets not available + * @nowait: if true, do not wait for buckets to become available * @usage: for secondarily also returning the current device usage * * Returns: an open_bucket on success, or an ERR_PTR() on failure. @@ -629,6 +551,10 @@ again: bch2_dev_do_invalidates(ca); if (!avail) { + if (watermark > BCH_WATERMARK_normal && + c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_allocations) + goto alloc; + if (cl && !waiting) { closure_wait(&c->freelist_wait, cl); waiting = true; @@ -648,7 +574,7 @@ alloc: ? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl) : bch2_bucket_alloc_early(trans, ca, watermark, &s, cl); - if (s.skipped_need_journal_commit * 2 > avail) + if (s.need_journal_commit * 2 > avail) bch2_journal_flush_async(&c->journal, NULL); if (!ob && s.btree_bitmap != BTREE_BITMAP_ANY) { @@ -711,9 +637,9 @@ struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c, unsigned i; for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX) - ret.devs[ret.nr++] = i; + ret.data[ret.nr++] = i; - bubble_sort(ret.devs, ret.nr, dev_stripe_cmp); + bubble_sort(ret.data, ret.nr, dev_stripe_cmp); return ret; } @@ -785,18 +711,13 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans, struct closure *cl) { struct bch_fs *c = trans->c; - struct dev_alloc_list devs_sorted = - bch2_dev_alloc_list(c, stripe, devs_may_alloc); int ret = -BCH_ERR_insufficient_devices; BUG_ON(*nr_effective >= nr_replicas); - for (unsigned i = 0; i < devs_sorted.nr; i++) { - struct bch_dev_usage usage; - struct open_bucket *ob; - - unsigned dev = devs_sorted.devs[i]; - struct bch_dev *ca = bch2_dev_tryget_noerror(c, dev); + struct dev_alloc_list devs_sorted = bch2_dev_alloc_list(c, stripe, devs_may_alloc); + darray_for_each(devs_sorted, i) { + struct bch_dev *ca = bch2_dev_tryget_noerror(c, *i); if (!ca) continue; @@ -805,8 +726,9 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans, continue; } - ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type, - cl, flags & BCH_WRITE_ALLOC_NOWAIT, &usage); + struct bch_dev_usage usage; + struct open_bucket *ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type, + cl, flags & BCH_WRITE_ALLOC_NOWAIT, &usage); if (!IS_ERR(ob)) bch2_dev_stripe_increment_inlined(ca, stripe, &usage); bch2_dev_put(ca); @@ -850,10 +772,6 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans, struct closure *cl) { struct bch_fs *c = trans->c; - struct dev_alloc_list devs_sorted; - struct ec_stripe_head *h; - struct open_bucket *ob; - unsigned i, ec_idx; int ret = 0; if (nr_replicas < 2) @@ -862,34 +780,32 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans, if (ec_open_bucket(c, ptrs)) return 0; - h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl); + struct ec_stripe_head *h = + bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl); if (IS_ERR(h)) return PTR_ERR(h); if (!h) return 0; - devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc); - - for (i = 0; i < devs_sorted.nr; i++) - for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) { + struct dev_alloc_list devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc); + darray_for_each(devs_sorted, i) + for (unsigned ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) { if (!h->s->blocks[ec_idx]) continue; - ob = c->open_buckets + h->s->blocks[ec_idx]; - if (ob->dev == devs_sorted.devs[i] && - !test_and_set_bit(ec_idx, h->s->blocks_allocated)) - goto got_bucket; + struct open_bucket *ob = c->open_buckets + h->s->blocks[ec_idx]; + if (ob->dev == *i && !test_and_set_bit(ec_idx, h->s->blocks_allocated)) { + ob->ec_idx = ec_idx; + ob->ec = h->s; + ec_stripe_new_get(h->s, STRIPE_REF_io); + + ret = add_new_bucket(c, ptrs, devs_may_alloc, + nr_replicas, nr_effective, + have_cache, ob); + goto out; + } } - goto out_put_head; -got_bucket: - ob->ec_idx = ec_idx; - ob->ec = h->s; - ec_stripe_new_get(h->s, STRIPE_REF_io); - - ret = add_new_bucket(c, ptrs, devs_may_alloc, - nr_replicas, nr_effective, - have_cache, ob); -out_put_head: +out: bch2_ec_stripe_head_put(c, h); return ret; } diff --git a/fs/bcachefs/alloc_foreground.h b/fs/bcachefs/alloc_foreground.h index 1a16fd5bd4f8..f25481a0d1a0 100644 --- a/fs/bcachefs/alloc_foreground.h +++ b/fs/bcachefs/alloc_foreground.h @@ -20,7 +20,7 @@ void bch2_reset_alloc_cursors(struct bch_fs *); struct dev_alloc_list { unsigned nr; - u8 devs[BCH_SB_MEMBERS_MAX]; + u8 data[BCH_SB_MEMBERS_MAX]; }; struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *, @@ -28,8 +28,6 @@ struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *, struct bch_devs_mask *); void bch2_dev_stripe_increment(struct bch_dev *, struct dev_stripe_state *); -long bch2_bucket_alloc_new_fs(struct bch_dev *); - static inline struct bch_dev *ob_dev(struct bch_fs *c, struct open_bucket *ob) { return bch2_dev_have_ref(c, ob->dev); diff --git a/fs/bcachefs/alloc_types.h b/fs/bcachefs/alloc_types.h index 9bbb28e90b93..4aa8ee026cb8 100644 --- a/fs/bcachefs/alloc_types.h +++ b/fs/bcachefs/alloc_types.h @@ -18,6 +18,7 @@ struct bucket_alloc_state { u64 buckets_seen; u64 skipped_open; u64 skipped_need_journal_commit; + u64 need_journal_commit; u64 skipped_nocow; u64 skipped_nouse; u64 skipped_mi_btree_bitmap; diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c index 654a58132a4d..ebeb6a5ff9d2 100644 --- a/fs/bcachefs/backpointers.c +++ b/fs/bcachefs/backpointers.c @@ -14,42 +14,8 @@ #include <linux/mm.h> -static bool extent_matches_bp(struct bch_fs *c, - enum btree_id btree_id, unsigned level, - struct bkey_s_c k, - struct bpos bucket, - struct bch_backpointer bp) -{ - struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); - const union bch_extent_entry *entry; - struct extent_ptr_decoded p; - - rcu_read_lock(); - bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { - struct bpos bucket2; - struct bch_backpointer bp2; - - if (p.ptr.cached) - continue; - - struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev); - if (!ca) - continue; - - bch2_extent_ptr_to_bp(c, ca, btree_id, level, k, p, entry, &bucket2, &bp2); - if (bpos_eq(bucket, bucket2) && - !memcmp(&bp, &bp2, sizeof(bp))) { - rcu_read_unlock(); - return true; - } - } - rcu_read_unlock(); - - return false; -} - int bch2_backpointer_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k); int ret = 0; @@ -59,67 +25,70 @@ int bch2_backpointer_validate(struct bch_fs *c, struct bkey_s_c k, "backpointer level bad: %u >= %u", bp.v->level, BTREE_MAX_DEPTH); - rcu_read_lock(); - struct bch_dev *ca = bch2_dev_rcu_noerror(c, bp.k->p.inode); - if (!ca) { - /* these will be caught by fsck */ - rcu_read_unlock(); - return 0; - } - - struct bpos bucket = bp_pos_to_bucket(ca, bp.k->p); - struct bpos bp_pos = bucket_pos_to_bp_noerror(ca, bucket, bp.v->bucket_offset); - rcu_read_unlock(); - - bkey_fsck_err_on((bp.v->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT) >= ca->mi.bucket_size || - !bpos_eq(bp.k->p, bp_pos), - c, backpointer_bucket_offset_wrong, - "backpointer bucket_offset wrong"); + bkey_fsck_err_on(bp.k->p.inode == BCH_SB_MEMBER_INVALID, + c, backpointer_dev_bad, + "backpointer for BCH_SB_MEMBER_INVALID"); fsck_err: return ret; } -void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp) +void bch2_backpointer_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) { - prt_printf(out, "btree=%s l=%u offset=%llu:%u len=%u pos=", - bch2_btree_id_str(bp->btree_id), - bp->level, - (u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT), - (u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT), - bp->bucket_len); - bch2_bpos_to_text(out, bp->pos); -} + struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k); -void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) -{ rcu_read_lock(); - struct bch_dev *ca = bch2_dev_rcu_noerror(c, k.k->p.inode); + struct bch_dev *ca = bch2_dev_rcu_noerror(c, bp.k->p.inode); if (ca) { - struct bpos bucket = bp_pos_to_bucket(ca, k.k->p); + u32 bucket_offset; + struct bpos bucket = bp_pos_to_bucket_and_offset(ca, bp.k->p, &bucket_offset); rcu_read_unlock(); - prt_str(out, "bucket="); - bch2_bpos_to_text(out, bucket); - prt_str(out, " "); + prt_printf(out, "bucket=%llu:%llu:%u ", bucket.inode, bucket.offset, bucket_offset); } else { rcu_read_unlock(); + prt_printf(out, "sector=%llu:%llu ", bp.k->p.inode, bp.k->p.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT); } - bch2_backpointer_to_text(out, bkey_s_c_to_backpointer(k).v); + bch2_btree_id_level_to_text(out, bp.v->btree_id, bp.v->level); + prt_printf(out, " suboffset=%u len=%u gen=%u pos=", + (u32) bp.k->p.offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT), + bp.v->bucket_len, + bp.v->bucket_gen); + bch2_bpos_to_text(out, bp.v->pos); } void bch2_backpointer_swab(struct bkey_s k) { struct bkey_s_backpointer bp = bkey_s_to_backpointer(k); - bp.v->bucket_offset = swab40(bp.v->bucket_offset); bp.v->bucket_len = swab32(bp.v->bucket_len); bch2_bpos_swab(&bp.v->pos); } +static bool extent_matches_bp(struct bch_fs *c, + enum btree_id btree_id, unsigned level, + struct bkey_s_c k, + struct bkey_s_c_backpointer bp) +{ + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + const union bch_extent_entry *entry; + struct extent_ptr_decoded p; + + bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { + struct bkey_i_backpointer bp2; + bch2_extent_ptr_to_bp(c, btree_id, level, k, p, entry, &bp2); + + if (bpos_eq(bp.k->p, bp2.k.p) && + !memcmp(bp.v, &bp2.v, sizeof(bp2.v))) + return true; + } + + return false; +} + static noinline int backpointer_mod_err(struct btree_trans *trans, - struct bch_backpointer bp, - struct bkey_s_c bp_k, struct bkey_s_c orig_k, + struct bkey_i_backpointer *new_bp, + struct bkey_s_c found_bp, bool insert) { struct bch_fs *c = trans->c; @@ -127,12 +96,12 @@ static noinline int backpointer_mod_err(struct btree_trans *trans, if (insert) { prt_printf(&buf, "existing backpointer found when inserting "); - bch2_backpointer_to_text(&buf, &bp); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&new_bp->k_i)); prt_newline(&buf); printbuf_indent_add(&buf, 2); prt_printf(&buf, "found "); - bch2_bkey_val_to_text(&buf, c, bp_k); + bch2_bkey_val_to_text(&buf, c, found_bp); prt_newline(&buf); prt_printf(&buf, "for "); @@ -144,11 +113,11 @@ static noinline int backpointer_mod_err(struct btree_trans *trans, printbuf_indent_add(&buf, 2); prt_printf(&buf, "searching for "); - bch2_backpointer_to_text(&buf, &bp); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&new_bp->k_i)); prt_newline(&buf); prt_printf(&buf, "got "); - bch2_bkey_val_to_text(&buf, c, bp_k); + bch2_bkey_val_to_text(&buf, c, found_bp); prt_newline(&buf); prt_printf(&buf, "for "); @@ -167,161 +136,118 @@ static noinline int backpointer_mod_err(struct btree_trans *trans, } int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans, - struct bch_dev *ca, - struct bpos bucket, - struct bch_backpointer bp, struct bkey_s_c orig_k, + struct bkey_i_backpointer *bp, bool insert) { struct btree_iter bp_iter; - struct bkey_s_c k; - struct bkey_i_backpointer *bp_k; - int ret; - - bp_k = bch2_trans_kmalloc_nomemzero(trans, sizeof(struct bkey_i_backpointer)); - ret = PTR_ERR_OR_ZERO(bp_k); - if (ret) - return ret; - - bkey_backpointer_init(&bp_k->k_i); - bp_k->k.p = bucket_pos_to_bp(ca, bucket, bp.bucket_offset); - bp_k->v = bp; - - if (!insert) { - bp_k->k.type = KEY_TYPE_deleted; - set_bkey_val_u64s(&bp_k->k, 0); - } - - k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers, - bp_k->k.p, + struct bkey_s_c k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers, + bp->k.p, BTREE_ITER_intent| BTREE_ITER_slots| BTREE_ITER_with_updates); - ret = bkey_err(k); + int ret = bkey_err(k); if (ret) - goto err; + return ret; if (insert ? k.k->type : (k.k->type != KEY_TYPE_backpointer || - memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp)))) { - ret = backpointer_mod_err(trans, bp, k, orig_k, insert); + memcmp(bkey_s_c_to_backpointer(k).v, &bp->v, sizeof(bp->v)))) { + ret = backpointer_mod_err(trans, orig_k, bp, k, insert); if (ret) goto err; } - ret = bch2_trans_update(trans, &bp_iter, &bp_k->k_i, 0); + if (!insert) { + bp->k.type = KEY_TYPE_deleted; + set_bkey_val_u64s(&bp->k, 0); + } + + ret = bch2_trans_update(trans, &bp_iter, &bp->k_i, 0); err: bch2_trans_iter_exit(trans, &bp_iter); return ret; } -/* - * Find the next backpointer >= *bp_offset: - */ -int bch2_get_next_backpointer(struct btree_trans *trans, - struct bch_dev *ca, - struct bpos bucket, int gen, - struct bpos *bp_pos, - struct bch_backpointer *bp, - unsigned iter_flags) +static int bch2_backpointer_del(struct btree_trans *trans, struct bpos pos) { - struct bpos bp_end_pos = bucket_pos_to_bp(ca, bpos_nosnap_successor(bucket), 0); - struct btree_iter alloc_iter = { NULL }, bp_iter = { NULL }; - struct bkey_s_c k; - int ret = 0; - - if (bpos_ge(*bp_pos, bp_end_pos)) - goto done; - - if (gen >= 0) { - k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, - bucket, BTREE_ITER_cached|iter_flags); - ret = bkey_err(k); - if (ret) - goto out; - - if (k.k->type != KEY_TYPE_alloc_v4 || - bkey_s_c_to_alloc_v4(k).v->gen != gen) - goto done; - } - - *bp_pos = bpos_max(*bp_pos, bucket_pos_to_bp(ca, bucket, 0)); - - for_each_btree_key_norestart(trans, bp_iter, BTREE_ID_backpointers, - *bp_pos, iter_flags, k, ret) { - if (bpos_ge(k.k->p, bp_end_pos)) - break; + return (likely(!bch2_backpointers_no_use_write_buffer) + ? bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, pos) + : bch2_btree_delete(trans, BTREE_ID_backpointers, pos, 0)) ?: + bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); +} - *bp_pos = k.k->p; - *bp = *bkey_s_c_to_backpointer(k).v; - goto out; - } -done: - *bp_pos = SPOS_MAX; -out: - bch2_trans_iter_exit(trans, &bp_iter); - bch2_trans_iter_exit(trans, &alloc_iter); - return ret; +static inline int bch2_backpointers_maybe_flush(struct btree_trans *trans, + struct bkey_s_c visiting_k, + struct bkey_buf *last_flushed) +{ + return likely(!bch2_backpointers_no_use_write_buffer) + ? bch2_btree_write_buffer_maybe_flush(trans, visiting_k, last_flushed) + : 0; } -static void backpointer_not_found(struct btree_trans *trans, - struct bpos bp_pos, - struct bch_backpointer bp, - struct bkey_s_c k) +static int backpointer_target_not_found(struct btree_trans *trans, + struct bkey_s_c_backpointer bp, + struct bkey_s_c target_k, + struct bkey_buf *last_flushed) { struct bch_fs *c = trans->c; struct printbuf buf = PRINTBUF; + int ret = 0; /* * If we're using the btree write buffer, the backpointer we were * looking at may have already been deleted - failure to find what it * pointed to is not an error: */ - if (likely(!bch2_backpointers_no_use_write_buffer)) - return; - - struct bpos bucket; - if (!bp_pos_to_bucket_nodev(c, bp_pos, &bucket)) - return; + ret = last_flushed + ? bch2_backpointers_maybe_flush(trans, bp.s_c, last_flushed) + : 0; + if (ret) + return ret; prt_printf(&buf, "backpointer doesn't match %s it points to:\n ", - bp.level ? "btree node" : "extent"); - prt_printf(&buf, "bucket: "); - bch2_bpos_to_text(&buf, bucket); - prt_printf(&buf, "\n "); + bp.v->level ? "btree node" : "extent"); + bch2_bkey_val_to_text(&buf, c, bp.s_c); - prt_printf(&buf, "backpointer pos: "); - bch2_bpos_to_text(&buf, bp_pos); prt_printf(&buf, "\n "); + bch2_bkey_val_to_text(&buf, c, target_k); - bch2_backpointer_to_text(&buf, &bp); - prt_printf(&buf, "\n "); - bch2_bkey_val_to_text(&buf, c, k); - if (c->curr_recovery_pass >= BCH_RECOVERY_PASS_check_extents_to_backpointers) - bch_err_ratelimited(c, "%s", buf.buf); - else - bch2_trans_inconsistent(trans, "%s", buf.buf); + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(target_k); + const union bch_extent_entry *entry; + struct extent_ptr_decoded p; + bkey_for_each_ptr_decode(target_k.k, ptrs, p, entry) + if (p.ptr.dev == bp.k->p.inode) { + prt_printf(&buf, "\n "); + struct bkey_i_backpointer bp2; + bch2_extent_ptr_to_bp(c, bp.v->btree_id, bp.v->level, target_k, p, entry, &bp2); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&bp2.k_i)); + } + if (fsck_err(trans, backpointer_to_missing_ptr, + "%s", buf.buf)) + ret = bch2_backpointer_del(trans, bp.k->p); +fsck_err: printbuf_exit(&buf); + return ret; } struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans, + struct bkey_s_c_backpointer bp, struct btree_iter *iter, - struct bpos bp_pos, - struct bch_backpointer bp, - unsigned iter_flags) + unsigned iter_flags, + struct bkey_buf *last_flushed) { - if (likely(!bp.level)) { - struct bch_fs *c = trans->c; + struct bch_fs *c = trans->c; - struct bpos bucket; - if (!bp_pos_to_bucket_nodev(c, bp_pos, &bucket)) - return bkey_s_c_err(-EIO); + if (unlikely(bp.v->btree_id >= btree_id_nr_alive(c))) + return bkey_s_c_null; + if (likely(!bp.v->level)) { bch2_trans_node_iter_init(trans, iter, - bp.btree_id, - bp.pos, + bp.v->btree_id, + bp.v->pos, 0, 0, iter_flags); struct bkey_s_c k = bch2_btree_iter_peek_slot(iter); @@ -330,67 +256,64 @@ struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans, return k; } - if (k.k && extent_matches_bp(c, bp.btree_id, bp.level, k, bucket, bp)) + if (k.k && + extent_matches_bp(c, bp.v->btree_id, bp.v->level, k, bp)) return k; bch2_trans_iter_exit(trans, iter); - backpointer_not_found(trans, bp_pos, bp, k); - return bkey_s_c_null; + int ret = backpointer_target_not_found(trans, bp, k, last_flushed); + return ret ? bkey_s_c_err(ret) : bkey_s_c_null; } else { - struct btree *b = bch2_backpointer_get_node(trans, iter, bp_pos, bp); + struct btree *b = bch2_backpointer_get_node(trans, bp, iter, last_flushed); + if (IS_ERR_OR_NULL(b)) + return ((struct bkey_s_c) { .k = ERR_CAST(b) }); - if (IS_ERR_OR_NULL(b)) { - bch2_trans_iter_exit(trans, iter); - return IS_ERR(b) ? bkey_s_c_err(PTR_ERR(b)) : bkey_s_c_null; - } return bkey_i_to_s_c(&b->key); } } struct btree *bch2_backpointer_get_node(struct btree_trans *trans, + struct bkey_s_c_backpointer bp, struct btree_iter *iter, - struct bpos bp_pos, - struct bch_backpointer bp) + struct bkey_buf *last_flushed) { struct bch_fs *c = trans->c; - BUG_ON(!bp.level); - - struct bpos bucket; - if (!bp_pos_to_bucket_nodev(c, bp_pos, &bucket)) - return ERR_PTR(-EIO); + BUG_ON(!bp.v->level); bch2_trans_node_iter_init(trans, iter, - bp.btree_id, - bp.pos, + bp.v->btree_id, + bp.v->pos, 0, - bp.level - 1, + bp.v->level - 1, 0); struct btree *b = bch2_btree_iter_peek_node(iter); if (IS_ERR_OR_NULL(b)) goto err; - BUG_ON(b->c.level != bp.level - 1); + BUG_ON(b->c.level != bp.v->level - 1); - if (extent_matches_bp(c, bp.btree_id, bp.level, - bkey_i_to_s_c(&b->key), - bucket, bp)) + if (extent_matches_bp(c, bp.v->btree_id, bp.v->level, + bkey_i_to_s_c(&b->key), bp)) return b; if (btree_node_will_make_reachable(b)) { b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node); } else { - backpointer_not_found(trans, bp_pos, bp, bkey_i_to_s_c(&b->key)); - b = NULL; + int ret = backpointer_target_not_found(trans, bp, bkey_i_to_s_c(&b->key), last_flushed); + b = ret ? ERR_PTR(ret) : NULL; } err: bch2_trans_iter_exit(trans, iter); return b; } -static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_iter *bp_iter, - struct bkey_s_c k) +static int bch2_check_backpointer_has_valid_bucket(struct btree_trans *trans, struct bkey_s_c k, + struct bkey_buf *last_flushed) { + if (k.k->type != KEY_TYPE_backpointer) + return 0; + struct bch_fs *c = trans->c; struct btree_iter alloc_iter = { NULL }; struct bkey_s_c alloc_k; @@ -399,10 +322,14 @@ static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_ struct bpos bucket; if (!bp_pos_to_bucket_nodev_noerror(c, k.k->p, &bucket)) { + ret = bch2_backpointers_maybe_flush(trans, k, last_flushed); + if (ret) + goto out; + if (fsck_err(trans, backpointer_to_missing_device, "backpointer for missing device:\n%s", (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) - ret = bch2_btree_delete_at(trans, bp_iter, 0); + ret = bch2_backpointer_del(trans, k.k->p); goto out; } @@ -411,13 +338,16 @@ static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_ if (ret) goto out; - if (fsck_err_on(alloc_k.k->type != KEY_TYPE_alloc_v4, - trans, backpointer_to_missing_alloc, - "backpointer for nonexistent alloc key: %llu:%llu:0\n%s", - alloc_iter.pos.inode, alloc_iter.pos.offset, - (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { - ret = bch2_btree_delete_at(trans, bp_iter, 0); - goto out; + if (alloc_k.k->type != KEY_TYPE_alloc_v4) { + ret = bch2_backpointers_maybe_flush(trans, k, last_flushed); + if (ret) + goto out; + + if (fsck_err(trans, backpointer_to_missing_alloc, + "backpointer for nonexistent alloc key: %llu:%llu:0\n%s", + alloc_iter.pos.inode, alloc_iter.pos.offset, + (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) + ret = bch2_backpointer_del(trans, k.k->p); } out: fsck_err: @@ -429,18 +359,24 @@ fsck_err: /* verify that every backpointer has a corresponding alloc key */ int bch2_check_btree_backpointers(struct bch_fs *c) { + struct bkey_buf last_flushed; + bch2_bkey_buf_init(&last_flushed); + bkey_init(&last_flushed.k->k); + int ret = bch2_trans_run(c, for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers, POS_MIN, 0, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - bch2_check_btree_backpointer(trans, &iter, k))); + bch2_check_backpointer_has_valid_bucket(trans, k, &last_flushed))); + + bch2_bkey_buf_exit(&last_flushed, c); bch_err_fn(c, ret); return ret; } struct extents_to_bp_state { - struct bpos bucket_start; - struct bpos bucket_end; + struct bpos bp_start; + struct bpos bp_end; struct bkey_buf last_flushed; }; @@ -501,9 +437,13 @@ found: goto err; prt_str(&buf, "extents pointing to same space, but first extent checksum bad:"); - prt_printf(&buf, "\n %s ", bch2_btree_id_str(btree)); + prt_printf(&buf, "\n "); + bch2_btree_id_to_text(&buf, btree); + prt_str(&buf, " "); bch2_bkey_val_to_text(&buf, c, extent); - prt_printf(&buf, "\n %s ", bch2_btree_id_str(o_btree)); + prt_printf(&buf, "\n "); + bch2_btree_id_to_text(&buf, o_btree); + prt_str(&buf, " "); bch2_bkey_val_to_text(&buf, c, extent2); struct nonce nonce = extent_nonce(extent.k->bversion, p.crc); @@ -524,41 +464,25 @@ err: static int check_bp_exists(struct btree_trans *trans, struct extents_to_bp_state *s, - struct bpos bucket, - struct bch_backpointer bp, + struct bkey_i_backpointer *bp, struct bkey_s_c orig_k) { struct bch_fs *c = trans->c; - struct btree_iter bp_iter = {}; struct btree_iter other_extent_iter = {}; struct printbuf buf = PRINTBUF; - struct bkey_s_c bp_k; - int ret = 0; - struct bch_dev *ca = bch2_dev_bucket_tryget(c, bucket); - if (!ca) { - prt_str(&buf, "extent for nonexistent device:bucket "); - bch2_bpos_to_text(&buf, bucket); - prt_str(&buf, "\n "); - bch2_bkey_val_to_text(&buf, c, orig_k); - bch_err(c, "%s", buf.buf); - ret = -BCH_ERR_fsck_repair_unimplemented; - goto err; - } - - if (bpos_lt(bucket, s->bucket_start) || - bpos_gt(bucket, s->bucket_end)) - goto out; + if (bpos_lt(bp->k.p, s->bp_start) || + bpos_gt(bp->k.p, s->bp_end)) + return 0; - bp_k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers, - bucket_pos_to_bp(ca, bucket, bp.bucket_offset), - 0); - ret = bkey_err(bp_k); + struct btree_iter bp_iter; + struct bkey_s_c bp_k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers, bp->k.p, 0); + int ret = bkey_err(bp_k); if (ret) goto err; if (bp_k.k->type != KEY_TYPE_backpointer || - memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp))) { + memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp->v, sizeof(bp->v))) { ret = bch2_btree_write_buffer_maybe_flush(trans, orig_k, &s->last_flushed); if (ret) goto err; @@ -570,7 +494,6 @@ err: fsck_err: bch2_trans_iter_exit(trans, &other_extent_iter); bch2_trans_iter_exit(trans, &bp_iter); - bch2_dev_put(ca); printbuf_exit(&buf); return ret; check_existing_bp: @@ -578,10 +501,10 @@ check_existing_bp: if (bp_k.k->type != KEY_TYPE_backpointer) goto missing; - struct bch_backpointer other_bp = *bkey_s_c_to_backpointer(bp_k).v; + struct bkey_s_c_backpointer other_bp = bkey_s_c_to_backpointer(bp_k); struct bkey_s_c other_extent = - bch2_backpointer_get_key(trans, &other_extent_iter, bp_k.k->p, other_bp, 0); + bch2_backpointer_get_key(trans, other_bp, &other_extent_iter, 0, NULL); ret = bkey_err(other_extent); if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node) ret = 0; @@ -600,19 +523,23 @@ check_existing_bp: bch_err(c, "%s", buf.buf); if (other_extent.k->size <= orig_k.k->size) { - ret = drop_dev_and_update(trans, other_bp.btree_id, other_extent, bucket.inode); + ret = drop_dev_and_update(trans, other_bp.v->btree_id, + other_extent, bp->k.p.inode); if (ret) goto err; goto out; } else { - ret = drop_dev_and_update(trans, bp.btree_id, orig_k, bucket.inode); + ret = drop_dev_and_update(trans, bp->v.btree_id, orig_k, bp->k.p.inode); if (ret) goto err; goto missing; } } - ret = check_extent_checksum(trans, other_bp.btree_id, other_extent, bp.btree_id, orig_k, bucket.inode); + ret = check_extent_checksum(trans, + other_bp.v->btree_id, other_extent, + bp->v.btree_id, orig_k, + bp->k.p.inode); if (ret < 0) goto err; if (ret) { @@ -620,7 +547,8 @@ check_existing_bp: goto missing; } - ret = check_extent_checksum(trans, bp.btree_id, orig_k, other_bp.btree_id, other_extent, bucket.inode); + ret = check_extent_checksum(trans, bp->v.btree_id, orig_k, + other_bp.v->btree_id, other_extent, bp->k.p.inode); if (ret < 0) goto err; if (ret) { @@ -629,7 +557,7 @@ check_existing_bp: } printbuf_reset(&buf); - prt_printf(&buf, "duplicate extents pointing to same space on dev %llu\n ", bucket.inode); + prt_printf(&buf, "duplicate extents pointing to same space on dev %llu\n ", bp->k.p.inode); bch2_bkey_val_to_text(&buf, c, orig_k); prt_str(&buf, "\n "); bch2_bkey_val_to_text(&buf, c, other_extent); @@ -638,21 +566,15 @@ check_existing_bp: goto err; missing: printbuf_reset(&buf); - prt_printf(&buf, "missing backpointer for btree=%s l=%u ", - bch2_btree_id_str(bp.btree_id), bp.level); + prt_str(&buf, "missing backpointer\n for: "); bch2_bkey_val_to_text(&buf, c, orig_k); - prt_printf(&buf, "\n got: "); + prt_printf(&buf, "\n want: "); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&bp->k_i)); + prt_printf(&buf, "\n got: "); bch2_bkey_val_to_text(&buf, c, bp_k); - struct bkey_i_backpointer n_bp_k; - bkey_backpointer_init(&n_bp_k.k_i); - n_bp_k.k.p = bucket_pos_to_bp(ca, bucket, bp.bucket_offset); - n_bp_k.v = bp; - prt_printf(&buf, "\n want: "); - bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&n_bp_k.k_i)); - if (fsck_err(trans, ptr_to_missing_backpointer, "%s", buf.buf)) - ret = bch2_bucket_backpointer_mod(trans, ca, bucket, bp, orig_k, true); + ret = bch2_bucket_backpointer_mod(trans, orig_k, bp, true); goto out; } @@ -663,31 +585,33 @@ static int check_extent_to_backpointers(struct btree_trans *trans, struct bkey_s_c k) { struct bch_fs *c = trans->c; - struct bkey_ptrs_c ptrs; + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); const union bch_extent_entry *entry; struct extent_ptr_decoded p; - int ret; - ptrs = bch2_bkey_ptrs_c(k); bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { - struct bpos bucket_pos = POS_MIN; - struct bch_backpointer bp; - if (p.ptr.cached) continue; + if (p.ptr.dev == BCH_SB_MEMBER_INVALID) + continue; + rcu_read_lock(); struct bch_dev *ca = bch2_dev_rcu_noerror(c, p.ptr.dev); - if (ca) - bch2_extent_ptr_to_bp(c, ca, btree, level, k, p, entry, &bucket_pos, &bp); + bool check = ca && test_bit(PTR_BUCKET_NR(ca, &p.ptr), ca->bucket_backpointer_mismatches); + bool empty = ca && test_bit(PTR_BUCKET_NR(ca, &p.ptr), ca->bucket_backpointer_empty); rcu_read_unlock(); - if (!ca) - continue; + if (check || empty) { + struct bkey_i_backpointer bp; + bch2_extent_ptr_to_bp(c, btree, level, k, p, entry, &bp); - ret = check_bp_exists(trans, s, bucket_pos, bp, k); - if (ret) - return ret; + int ret = check + ? check_bp_exists(trans, s, &bp, k) + : bch2_bucket_backpointer_mod(trans, k, &bp, true); + if (ret) + return ret; + } } return 0; @@ -896,54 +820,330 @@ static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans, return 0; } +enum alloc_sector_counter { + ALLOC_dirty, + ALLOC_cached, + ALLOC_stripe, + ALLOC_SECTORS_NR +}; + +static enum alloc_sector_counter data_type_to_alloc_counter(enum bch_data_type t) +{ + switch (t) { + case BCH_DATA_btree: + case BCH_DATA_user: + return ALLOC_dirty; + case BCH_DATA_cached: + return ALLOC_cached; + case BCH_DATA_stripe: + return ALLOC_stripe; + default: + BUG(); + } +} + +static int check_bucket_backpointers_to_extents(struct btree_trans *, struct bch_dev *, struct bpos); + +static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct bkey_s_c alloc_k, + struct bkey_buf *last_flushed) +{ + struct bch_fs *c = trans->c; + struct bch_alloc_v4 a_convert; + const struct bch_alloc_v4 *a = bch2_alloc_to_v4(alloc_k, &a_convert); + bool need_commit = false; + + if (a->data_type == BCH_DATA_sb || + a->data_type == BCH_DATA_journal || + a->data_type == BCH_DATA_parity) + return 0; + + u32 sectors[ALLOC_SECTORS_NR]; + memset(sectors, 0, sizeof(sectors)); + + struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(trans->c, alloc_k.k->p); + if (!ca) + return 0; + + struct btree_iter iter; + struct bkey_s_c bp_k; + int ret = 0; + for_each_btree_key_max_norestart(trans, iter, BTREE_ID_backpointers, + bucket_pos_to_bp_start(ca, alloc_k.k->p), + bucket_pos_to_bp_end(ca, alloc_k.k->p), 0, bp_k, ret) { + if (bp_k.k->type != KEY_TYPE_backpointer) + continue; + + struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(bp_k); + + if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_backpointer_bucket_gen && + (bp.v->bucket_gen != a->gen || + bp.v->pad)) { + ret = bch2_backpointer_del(trans, bp_k.k->p); + if (ret) + break; + + need_commit = true; + continue; + } + + if (bp.v->bucket_gen != a->gen) + continue; + + sectors[data_type_to_alloc_counter(bp.v->data_type)] += bp.v->bucket_len; + }; + bch2_trans_iter_exit(trans, &iter); + if (ret) + goto err; + + if (need_commit) { + ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); + if (ret) + goto err; + } + + /* Cached pointers don't have backpointers: */ + + if (sectors[ALLOC_dirty] != a->dirty_sectors || + sectors[ALLOC_stripe] != a->stripe_sectors) { + if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_backpointer_bucket_gen) { + ret = bch2_backpointers_maybe_flush(trans, alloc_k, last_flushed); + if (ret) + goto err; + } + + if (sectors[ALLOC_dirty] > a->dirty_sectors || + sectors[ALLOC_stripe] > a->stripe_sectors) { + ret = check_bucket_backpointers_to_extents(trans, ca, alloc_k.k->p) ?: + -BCH_ERR_transaction_restart_nested; + goto err; + } + + if (!sectors[ALLOC_dirty] && + !sectors[ALLOC_stripe]) + __set_bit(alloc_k.k->p.offset, ca->bucket_backpointer_empty); + else + __set_bit(alloc_k.k->p.offset, ca->bucket_backpointer_mismatches); + } +err: + bch2_dev_put(ca); + return ret; +} + +static bool backpointer_node_has_missing(struct bch_fs *c, struct bkey_s_c k) +{ + switch (k.k->type) { + case KEY_TYPE_btree_ptr_v2: { + bool ret = false; + + rcu_read_lock(); + struct bpos pos = bkey_s_c_to_btree_ptr_v2(k).v->min_key; + while (pos.inode <= k.k->p.inode) { + if (pos.inode >= c->sb.nr_devices) + break; + + struct bch_dev *ca = bch2_dev_rcu_noerror(c, pos.inode); + if (!ca) + goto next; + + struct bpos bucket = bp_pos_to_bucket(ca, pos); + bucket.offset = find_next_bit(ca->bucket_backpointer_mismatches, + ca->mi.nbuckets, bucket.offset); + if (bucket.offset == ca->mi.nbuckets) + goto next; + + ret = bpos_le(bucket_pos_to_bp_end(ca, bucket), k.k->p); + if (ret) + break; +next: + pos = SPOS(pos.inode + 1, 0, 0); + } + rcu_read_unlock(); + + return ret; + } + case KEY_TYPE_btree_ptr: + return true; + default: + return false; + } +} + +static int btree_node_get_and_pin(struct btree_trans *trans, struct bkey_i *k, + enum btree_id btree, unsigned level) +{ + struct btree_iter iter; + bch2_trans_node_iter_init(trans, &iter, btree, k->k.p, 0, level, 0); + struct btree *b = bch2_btree_iter_peek_node(&iter); + int ret = PTR_ERR_OR_ZERO(b); + if (ret) + goto err; + + if (b) + bch2_node_pin(trans->c, b); +err: + bch2_trans_iter_exit(trans, &iter); + return ret; +} + +static int bch2_pin_backpointer_nodes_with_missing(struct btree_trans *trans, + struct bpos start, struct bpos *end) +{ + struct bch_fs *c = trans->c; + int ret = 0; + + struct bkey_buf tmp; + bch2_bkey_buf_init(&tmp); + + bch2_btree_cache_unpin(c); + + *end = SPOS_MAX; + + s64 mem_may_pin = mem_may_pin_bytes(c); + struct btree_iter iter; + bch2_trans_node_iter_init(trans, &iter, BTREE_ID_backpointers, start, + 0, 1, BTREE_ITER_prefetch); + ret = for_each_btree_key_continue(trans, iter, 0, k, ({ + if (!backpointer_node_has_missing(c, k)) + continue; + + mem_may_pin -= c->opts.btree_node_size; + if (mem_may_pin <= 0) + break; + + bch2_bkey_buf_reassemble(&tmp, c, k); + struct btree_path *path = btree_iter_path(trans, &iter); + + BUG_ON(path->level != 1); + + bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id, path->level - 1); + })); + if (ret) + return ret; + + struct bpos pinned = SPOS_MAX; + mem_may_pin = mem_may_pin_bytes(c); + bch2_trans_node_iter_init(trans, &iter, BTREE_ID_backpointers, start, + 0, 1, BTREE_ITER_prefetch); + ret = for_each_btree_key_continue(trans, iter, 0, k, ({ + if (!backpointer_node_has_missing(c, k)) + continue; + + mem_may_pin -= c->opts.btree_node_size; + if (mem_may_pin <= 0) { + *end = pinned; + break; + } + + bch2_bkey_buf_reassemble(&tmp, c, k); + struct btree_path *path = btree_iter_path(trans, &iter); + + BUG_ON(path->level != 1); + + int ret2 = btree_node_get_and_pin(trans, tmp.k, path->btree_id, path->level - 1); + + if (!ret2) + pinned = tmp.k->k.p; + + ret; + })); + if (ret) + return ret; + + return ret; +} + int bch2_check_extents_to_backpointers(struct bch_fs *c) { + int ret = 0; + + /* + * Can't allow devices to come/go/resize while we have bucket bitmaps + * allocated + */ + lockdep_assert_held(&c->state_lock); + + for_each_member_device(c, ca) { + BUG_ON(ca->bucket_backpointer_mismatches); + ca->bucket_backpointer_mismatches = kvcalloc(BITS_TO_LONGS(ca->mi.nbuckets), + sizeof(unsigned long), + GFP_KERNEL); + ca->bucket_backpointer_empty = kvcalloc(BITS_TO_LONGS(ca->mi.nbuckets), + sizeof(unsigned long), + GFP_KERNEL); + if (!ca->bucket_backpointer_mismatches || + !ca->bucket_backpointer_empty) { + bch2_dev_put(ca); + ret = -BCH_ERR_ENOMEM_backpointer_mismatches_bitmap; + goto err_free_bitmaps; + } + } + struct btree_trans *trans = bch2_trans_get(c); - struct extents_to_bp_state s = { .bucket_start = POS_MIN }; - int ret; + struct extents_to_bp_state s = { .bp_start = POS_MIN }; bch2_bkey_buf_init(&s.last_flushed); bkey_init(&s.last_flushed.k->k); + ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, + POS_MIN, BTREE_ITER_prefetch, k, ({ + check_bucket_backpointer_mismatch(trans, k, &s.last_flushed); + })); + if (ret) + goto err; + + u64 nr_buckets = 0, nr_mismatches = 0, nr_empty = 0; + for_each_member_device(c, ca) { + nr_buckets += ca->mi.nbuckets; + nr_mismatches += bitmap_weight(ca->bucket_backpointer_mismatches, ca->mi.nbuckets); + nr_empty += bitmap_weight(ca->bucket_backpointer_empty, ca->mi.nbuckets); + } + + if (!nr_mismatches && !nr_empty) + goto err; + + bch_info(c, "scanning for missing backpointers in %llu/%llu buckets", + nr_mismatches + nr_empty, nr_buckets); + while (1) { - struct bbpos end; - ret = bch2_get_btree_in_memory_pos(trans, - BIT_ULL(BTREE_ID_backpointers), - BIT_ULL(BTREE_ID_backpointers), - BBPOS(BTREE_ID_backpointers, s.bucket_start), &end); + ret = bch2_pin_backpointer_nodes_with_missing(trans, s.bp_start, &s.bp_end); if (ret) break; - s.bucket_end = end.pos; - - if ( bpos_eq(s.bucket_start, POS_MIN) && - !bpos_eq(s.bucket_end, SPOS_MAX)) + if ( bpos_eq(s.bp_start, POS_MIN) && + !bpos_eq(s.bp_end, SPOS_MAX)) bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass", __func__, btree_nodes_fit_in_ram(c)); - if (!bpos_eq(s.bucket_start, POS_MIN) || - !bpos_eq(s.bucket_end, SPOS_MAX)) { + if (!bpos_eq(s.bp_start, POS_MIN) || + !bpos_eq(s.bp_end, SPOS_MAX)) { struct printbuf buf = PRINTBUF; prt_str(&buf, "check_extents_to_backpointers(): "); - bch2_bpos_to_text(&buf, s.bucket_start); + bch2_bpos_to_text(&buf, s.bp_start); prt_str(&buf, "-"); - bch2_bpos_to_text(&buf, s.bucket_end); + bch2_bpos_to_text(&buf, s.bp_end); bch_verbose(c, "%s", buf.buf); printbuf_exit(&buf); } ret = bch2_check_extents_to_backpointers_pass(trans, &s); - if (ret || bpos_eq(s.bucket_end, SPOS_MAX)) + if (ret || bpos_eq(s.bp_end, SPOS_MAX)) break; - s.bucket_start = bpos_successor(s.bucket_end); + s.bp_start = bpos_successor(s.bp_end); } +err: bch2_trans_put(trans); bch2_bkey_buf_exit(&s.last_flushed, c); - bch2_btree_cache_unpin(c); +err_free_bitmaps: + for_each_member_device(c, ca) { + kvfree(ca->bucket_backpointer_empty); + ca->bucket_backpointer_empty = NULL; + kvfree(ca->bucket_backpointer_mismatches); + ca->bucket_backpointer_mismatches = NULL; + } bch_err_fn(c, ret); return ret; @@ -959,44 +1159,43 @@ static int check_one_backpointer(struct btree_trans *trans, return 0; struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(bp_k); - struct bch_fs *c = trans->c; - struct btree_iter iter; struct bbpos pos = bp_to_bbpos(*bp.v); - struct bkey_s_c k; - struct printbuf buf = PRINTBUF; - int ret; if (bbpos_cmp(pos, start) < 0 || bbpos_cmp(pos, end) > 0) return 0; - k = bch2_backpointer_get_key(trans, &iter, bp.k->p, *bp.v, 0); - ret = bkey_err(k); + struct btree_iter iter; + struct bkey_s_c k = bch2_backpointer_get_key(trans, bp, &iter, 0, last_flushed); + int ret = bkey_err(k); if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node) return 0; if (ret) return ret; - if (!k.k) { - ret = bch2_btree_write_buffer_maybe_flush(trans, bp.s_c, last_flushed); - if (ret) - goto out; - - if (fsck_err(trans, backpointer_to_missing_ptr, - "backpointer for missing %s\n %s", - bp.v->level ? "btree node" : "extent", - (bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf))) { - ret = bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p); - goto out; - } - } -out: -fsck_err: bch2_trans_iter_exit(trans, &iter); - printbuf_exit(&buf); return ret; } +static int check_bucket_backpointers_to_extents(struct btree_trans *trans, + struct bch_dev *ca, struct bpos bucket) +{ + u32 restart_count = trans->restart_count; + struct bkey_buf last_flushed; + bch2_bkey_buf_init(&last_flushed); + bkey_init(&last_flushed.k->k); + + int ret = for_each_btree_key_max(trans, iter, BTREE_ID_backpointers, + bucket_pos_to_bp_start(ca, bucket), + bucket_pos_to_bp_end(ca, bucket), + 0, k, + check_one_backpointer(trans, BBPOS_MIN, BBPOS_MAX, k, &last_flushed) + ); + + bch2_bkey_buf_exit(&last_flushed, trans->c); + return ret ?: trans_was_restarted(trans, restart_count); +} + static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans, struct bbpos start, struct bbpos end) @@ -1009,9 +1208,8 @@ static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans, bkey_init(&last_flushed.k->k); progress_init(&progress, trans->c, BIT_ULL(BTREE_ID_backpointers)); - int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers, - POS_MIN, BTREE_ITER_prefetch, k, - NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ + int ret = for_each_btree_key(trans, iter, BTREE_ID_backpointers, + POS_MIN, BTREE_ITER_prefetch, k, ({ progress_update_iter(trans, &progress, &iter, "backpointers_to_extents"); check_one_backpointer(trans, start, end, k, &last_flushed); })); diff --git a/fs/bcachefs/backpointers.h b/fs/bcachefs/backpointers.h index 3b29fdf519dd..060dad1521ee 100644 --- a/fs/bcachefs/backpointers.h +++ b/fs/bcachefs/backpointers.h @@ -18,14 +18,14 @@ static inline u64 swab40(u64 x) ((x & 0xff00000000ULL) >> 32)); } -int bch2_backpointer_validate(struct bch_fs *, struct bkey_s_c k, enum bch_validate_flags); -void bch2_backpointer_to_text(struct printbuf *, const struct bch_backpointer *); -void bch2_backpointer_k_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); +int bch2_backpointer_validate(struct bch_fs *, struct bkey_s_c k, + struct bkey_validate_context); +void bch2_backpointer_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); void bch2_backpointer_swab(struct bkey_s); #define bch2_bkey_ops_backpointer ((struct bkey_ops) { \ .key_validate = bch2_backpointer_validate, \ - .val_to_text = bch2_backpointer_k_to_text, \ + .val_to_text = bch2_backpointer_to_text, \ .swab = bch2_backpointer_swab, \ .min_val_size = 32, \ }) @@ -43,22 +43,24 @@ static inline struct bpos bp_pos_to_bucket(const struct bch_dev *ca, struct bpos return POS(bp_pos.inode, sector_to_bucket(ca, bucket_sector)); } +static inline struct bpos bp_pos_to_bucket_and_offset(const struct bch_dev *ca, struct bpos bp_pos, + u32 *bucket_offset) +{ + u64 bucket_sector = bp_pos.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT; + + return POS(bp_pos.inode, sector_to_bucket_and_offset(ca, bucket_sector, bucket_offset)); +} + static inline bool bp_pos_to_bucket_nodev_noerror(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket) { rcu_read_lock(); - struct bch_dev *ca = bch2_dev_rcu(c, bp_pos.inode); + struct bch_dev *ca = bch2_dev_rcu_noerror(c, bp_pos.inode); if (ca) *bucket = bp_pos_to_bucket(ca, bp_pos); rcu_read_unlock(); return ca != NULL; } -static inline bool bp_pos_to_bucket_nodev(struct bch_fs *c, struct bpos bp_pos, struct bpos *bucket) -{ - return !bch2_fs_inconsistent_on(!bp_pos_to_bucket_nodev_noerror(c, bp_pos, bucket), - c, "backpointer for missing device %llu", bp_pos.inode); -} - static inline struct bpos bucket_pos_to_bp_noerror(const struct bch_dev *ca, struct bpos bucket, u64 bucket_offset) @@ -80,31 +82,35 @@ static inline struct bpos bucket_pos_to_bp(const struct bch_dev *ca, return ret; } -int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *, struct bch_dev *, - struct bpos bucket, struct bch_backpointer, struct bkey_s_c, bool); +static inline struct bpos bucket_pos_to_bp_start(const struct bch_dev *ca, struct bpos bucket) +{ + return bucket_pos_to_bp(ca, bucket, 0); +} + +static inline struct bpos bucket_pos_to_bp_end(const struct bch_dev *ca, struct bpos bucket) +{ + return bpos_nosnap_predecessor(bucket_pos_to_bp(ca, bpos_nosnap_successor(bucket), 0)); +} + +int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *, + struct bkey_s_c, + struct bkey_i_backpointer *, + bool); static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans, - struct bch_dev *ca, - struct bpos bucket, - struct bch_backpointer bp, struct bkey_s_c orig_k, + struct bkey_i_backpointer *bp, bool insert) { if (unlikely(bch2_backpointers_no_use_write_buffer)) - return bch2_bucket_backpointer_mod_nowritebuffer(trans, ca, bucket, bp, orig_k, insert); - - struct bkey_i_backpointer bp_k; - - bkey_backpointer_init(&bp_k.k_i); - bp_k.k.p = bucket_pos_to_bp(ca, bucket, bp.bucket_offset); - bp_k.v = bp; + return bch2_bucket_backpointer_mod_nowritebuffer(trans, orig_k, bp, insert); if (!insert) { - bp_k.k.type = KEY_TYPE_deleted; - set_bkey_val_u64s(&bp_k.k, 0); + bp->k.type = KEY_TYPE_deleted; + set_bkey_val_u64s(&bp->k, 0); } - return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp_k.k_i); + return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp->k_i); } static inline enum bch_data_type bch2_bkey_ptr_data_type(struct bkey_s_c k, @@ -134,44 +140,29 @@ static inline enum bch_data_type bch2_bkey_ptr_data_type(struct bkey_s_c k, } } -static inline void __bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca, +static inline void bch2_extent_ptr_to_bp(struct bch_fs *c, enum btree_id btree_id, unsigned level, struct bkey_s_c k, struct extent_ptr_decoded p, const union bch_extent_entry *entry, - struct bpos *bucket_pos, struct bch_backpointer *bp, - u64 sectors) + struct bkey_i_backpointer *bp) { - u32 bucket_offset; - *bucket_pos = PTR_BUCKET_POS_OFFSET(ca, &p.ptr, &bucket_offset); - *bp = (struct bch_backpointer) { + bkey_backpointer_init(&bp->k_i); + bp->k.p = POS(p.ptr.dev, ((u64) p.ptr.offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) + p.crc.offset); + bp->v = (struct bch_backpointer) { .btree_id = btree_id, .level = level, .data_type = bch2_bkey_ptr_data_type(k, p, entry), - .bucket_offset = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) + - p.crc.offset, - .bucket_len = sectors, + .bucket_gen = p.ptr.gen, + .bucket_len = ptr_disk_sectors(level ? btree_sectors(c) : k.k->size, p), .pos = k.k->p, }; } -static inline void bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca, - enum btree_id btree_id, unsigned level, - struct bkey_s_c k, struct extent_ptr_decoded p, - const union bch_extent_entry *entry, - struct bpos *bucket_pos, struct bch_backpointer *bp) -{ - u64 sectors = ptr_disk_sectors(level ? btree_sectors(c) : k.k->size, p); - - __bch2_extent_ptr_to_bp(c, ca, btree_id, level, k, p, entry, bucket_pos, bp, sectors); -} - -int bch2_get_next_backpointer(struct btree_trans *, struct bch_dev *ca, struct bpos, int, - struct bpos *, struct bch_backpointer *, unsigned); -struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *, struct btree_iter *, - struct bpos, struct bch_backpointer, - unsigned); -struct btree *bch2_backpointer_get_node(struct btree_trans *, struct btree_iter *, - struct bpos, struct bch_backpointer); +struct bkey_buf; +struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *, struct bkey_s_c_backpointer, + struct btree_iter *, unsigned, struct bkey_buf *); +struct btree *bch2_backpointer_get_node(struct btree_trans *, struct bkey_s_c_backpointer, + struct btree_iter *, struct bkey_buf *); int bch2_check_btree_backpointers(struct bch_fs *); int bch2_check_extents_to_backpointers(struct bch_fs *); diff --git a/fs/bcachefs/bbpos.h b/fs/bcachefs/bbpos.h index be2edced5213..63abe17f35ea 100644 --- a/fs/bcachefs/bbpos.h +++ b/fs/bcachefs/bbpos.h @@ -29,7 +29,7 @@ static inline struct bbpos bbpos_successor(struct bbpos pos) static inline void bch2_bbpos_to_text(struct printbuf *out, struct bbpos pos) { - prt_str(out, bch2_btree_id_str(pos.btree)); + bch2_btree_id_to_text(out, pos.btree); prt_char(out, ':'); bch2_bpos_to_text(out, pos.pos); } diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h index e94a83b8113e..161cf2f05d2a 100644 --- a/fs/bcachefs/bcachefs.h +++ b/fs/bcachefs/bcachefs.h @@ -205,6 +205,7 @@ #include <linux/zstd.h> #include "bcachefs_format.h" +#include "btree_journal_iter_types.h" #include "disk_accounting_types.h" #include "errcode.h" #include "fifo.h" @@ -293,6 +294,8 @@ do { \ #define bch_info(c, fmt, ...) \ bch2_print(c, KERN_INFO bch2_fmt(c, fmt), ##__VA_ARGS__) +#define bch_info_ratelimited(c, fmt, ...) \ + bch2_print_ratelimited(c, KERN_INFO bch2_fmt(c, fmt), ##__VA_ARGS__) #define bch_notice(c, fmt, ...) \ bch2_print(c, KERN_NOTICE bch2_fmt(c, fmt), ##__VA_ARGS__) #define bch_warn(c, fmt, ...) \ @@ -352,6 +355,12 @@ do { \ bch_info(c, fmt, ##__VA_ARGS__); \ } while (0) +#define bch_verbose_ratelimited(c, fmt, ...) \ +do { \ + if ((c)->opts.verbose) \ + bch_info_ratelimited(c, fmt, ##__VA_ARGS__); \ +} while (0) + #define pr_verbose_init(opts, fmt, ...) \ do { \ if (opt_get(opts, verbose)) \ @@ -538,20 +547,20 @@ struct bch_dev { /* * Buckets: - * Per-bucket arrays are protected by c->mark_lock, bucket_lock and - * gc_gens_lock, for device resize - holding any is sufficient for - * access: Or rcu_read_lock(), but only for dev_ptr_stale(): + * Per-bucket arrays are protected by either rcu_read_lock or + * state_lock, for device resize. */ GENRADIX(struct bucket) buckets_gc; struct bucket_gens __rcu *bucket_gens; u8 *oldest_gen; unsigned long *buckets_nouse; - struct rw_semaphore bucket_lock; + + unsigned long *bucket_backpointer_mismatches; + unsigned long *bucket_backpointer_empty; struct bch_dev_usage __percpu *usage; /* Allocator: */ - u64 new_fs_bucket_idx; u64 alloc_cursor[3]; unsigned nr_open_buckets; @@ -606,6 +615,7 @@ struct bch_dev { x(going_ro) \ x(write_disable_complete) \ x(clean_shutdown) \ + x(recovery_running) \ x(fsck_running) \ x(initial_gc_unfixed) \ x(need_delete_dead_snapshots) \ @@ -650,28 +660,6 @@ struct journal_seq_blacklist_table { } entries[]; }; -struct journal_keys { - /* must match layout in darray_types.h */ - size_t nr, size; - struct journal_key { - u64 journal_seq; - u32 journal_offset; - enum btree_id btree_id:8; - unsigned level:8; - bool allocated; - bool overwritten; - struct bkey_i *k; - } *data; - /* - * Gap buffer: instead of all the empty space in the array being at the - * end of the buffer - from @nr to @size - the empty space is at @gap. - * This means that sequential insertions are O(n) instead of O(n^2). - */ - size_t gap; - atomic_t ref; - bool initial_ref_held; -}; - struct btree_trans_buf { struct btree_trans *trans; }; @@ -680,6 +668,7 @@ struct btree_trans_buf { ((subvol_inum) { BCACHEFS_ROOT_SUBVOL, BCACHEFS_ROOT_INO }) #define BCH_WRITE_REFS() \ + x(journal) \ x(trans) \ x(write) \ x(promote) \ @@ -692,6 +681,7 @@ struct btree_trans_buf { x(dio_write) \ x(discard) \ x(discard_fast) \ + x(check_discard_freespace_key) \ x(invalidate) \ x(delete_dead_snapshots) \ x(gc_gens) \ @@ -735,6 +725,12 @@ struct bch_fs { struct percpu_ref writes; #endif /* + * Certain operations are only allowed in single threaded mode, during + * recovery, and we want to assert that this is the case: + */ + struct task_struct *recovery_task; + + /* * Analagous to c->writes, for asynchronous ops that don't necessarily * need fs to be read-write */ @@ -764,6 +760,8 @@ struct bch_fs { __uuid_t user_uuid; u16 version; + u16 version_incompat; + u16 version_incompat_allowed; u16 version_min; u16 version_upgrade_complete; @@ -834,9 +832,10 @@ struct bch_fs { struct work_struct btree_interior_update_work; struct workqueue_struct *btree_node_rewrite_worker; - - struct list_head pending_node_rewrites; - struct mutex pending_node_rewrites_lock; + struct list_head btree_node_rewrites; + struct list_head btree_node_rewrites_pending; + spinlock_t btree_node_rewrites_lock; + struct closure_waitlist btree_node_rewrites_wait; /* btree_io.c: */ spinlock_t btree_write_error_lock; @@ -967,8 +966,7 @@ struct bch_fs { struct rhashtable promote_table; mempool_t compression_bounce[2]; - mempool_t compress_workspace[BCH_COMPRESSION_TYPE_NR]; - mempool_t decompress_workspace; + mempool_t compress_workspace[BCH_COMPRESSION_OPT_NR]; size_t zstd_workspace_size; struct crypto_shash *sha256; @@ -1027,6 +1025,7 @@ struct bch_fs { struct list_head vfs_inodes_list; struct mutex vfs_inodes_lock; struct rhashtable vfs_inodes_table; + struct rhltable vfs_inodes_by_inum_table; /* VFS IO PATH - fs-io.c */ struct bio_set writepage_bioset; @@ -1048,10 +1047,12 @@ struct bch_fs { * for signaling to the toplevel code which pass we want to run now. */ enum bch_recovery_pass curr_recovery_pass; + enum bch_recovery_pass next_recovery_pass; /* bitmask of recovery passes that we actually ran */ u64 recovery_passes_complete; /* never rewinds version of curr_recovery_pass */ enum bch_recovery_pass recovery_pass_done; + spinlock_t recovery_pass_lock; struct semaphore online_fsck_mutex; /* DEBUG JUNK */ @@ -1062,9 +1063,6 @@ struct bch_fs { struct btree_node *verify_ondisk; struct mutex verify_lock; - u64 *unused_inode_hints; - unsigned inode_shard_bits; - /* * A btree node on disk could have too many bsets for an iterator to fit * on the stack - have to dynamically allocate them @@ -1086,8 +1084,6 @@ struct bch_fs { u64 counters_on_mount[BCH_COUNTER_NR]; u64 __percpu *counters; - unsigned copy_gc_enabled:1; - struct bch2_time_stats times[BCH_TIME_STAT_NR]; struct btree_transaction_stats btree_transaction_stats[BCH_TRANSACTIONS_NR]; diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h index 5004f6ba997c..f70f0108401f 100644 --- a/fs/bcachefs/bcachefs_format.h +++ b/fs/bcachefs/bcachefs_format.h @@ -418,7 +418,8 @@ static inline void bkey_init(struct bkey *k) x(snapshot_tree, 31) \ x(logged_op_truncate, 32) \ x(logged_op_finsert, 33) \ - x(accounting, 34) + x(accounting, 34) \ + x(inode_alloc_cursor, 35) enum bch_bkey_type { #define x(name, nr) KEY_TYPE_##name = nr, @@ -463,7 +464,8 @@ struct bch_backpointer { __u8 btree_id; __u8 level; __u8 data_type; - __u64 bucket_offset:40; + __u8 bucket_gen; + __u32 pad; __u32 bucket_len; struct bpos pos; } __packed __aligned(8); @@ -499,8 +501,6 @@ struct bch_sb_field { #include "disk_groups_format.h" #include "extents_format.h" #include "ec_format.h" -#include "dirent_format.h" -#include "disk_groups_format.h" #include "inode_format.h" #include "journal_seq_blacklist_format.h" #include "logged_ops_format.h" @@ -679,7 +679,14 @@ struct bch_sb_field_ext { x(disk_accounting_v3, BCH_VERSION(1, 10)) \ x(disk_accounting_inum, BCH_VERSION(1, 11)) \ x(rebalance_work_acct_fix, BCH_VERSION(1, 12)) \ - x(inode_has_child_snapshots, BCH_VERSION(1, 13)) + x(inode_has_child_snapshots, BCH_VERSION(1, 13)) \ + x(backpointer_bucket_gen, BCH_VERSION(1, 14)) \ + x(disk_accounting_big_endian, BCH_VERSION(1, 15)) \ + x(reflink_p_may_update_opts, BCH_VERSION(1, 16)) \ + x(inode_depth, BCH_VERSION(1, 17)) \ + x(persistent_inode_cursors, BCH_VERSION(1, 18)) \ + x(autofix_errors, BCH_VERSION(1, 19)) \ + x(directory_size, BCH_VERSION(1, 20)) enum bcachefs_metadata_version { bcachefs_metadata_version_min = 9, @@ -844,6 +851,10 @@ LE64_BITMASK(BCH_SB_VERSION_UPGRADE_COMPLETE, struct bch_sb, flags[5], 0, 16); LE64_BITMASK(BCH_SB_ALLOCATOR_STUCK_TIMEOUT, struct bch_sb, flags[5], 16, 32); +LE64_BITMASK(BCH_SB_VERSION_INCOMPAT, struct bch_sb, flags[5], 32, 48); +LE64_BITMASK(BCH_SB_VERSION_INCOMPAT_ALLOWED, + struct bch_sb, flags[5], 48, 64); +LE64_BITMASK(BCH_SB_SHARD_INUMS_NBITS, struct bch_sb, flags[6], 0, 4); static inline __u64 BCH_SB_COMPRESSION_TYPE(const struct bch_sb *sb) { @@ -896,21 +907,22 @@ static inline void SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE(struct bch_sb *sb, __u x(new_varint, 15) \ x(journal_no_flush, 16) \ x(alloc_v2, 17) \ - x(extents_across_btree_nodes, 18) + x(extents_across_btree_nodes, 18) \ + x(incompat_version_field, 19) #define BCH_SB_FEATURES_ALWAYS \ - ((1ULL << BCH_FEATURE_new_extent_overwrite)| \ - (1ULL << BCH_FEATURE_extents_above_btree_updates)|\ - (1ULL << BCH_FEATURE_btree_updates_journalled)|\ - (1ULL << BCH_FEATURE_alloc_v2)|\ - (1ULL << BCH_FEATURE_extents_across_btree_nodes)) + (BIT_ULL(BCH_FEATURE_new_extent_overwrite)| \ + BIT_ULL(BCH_FEATURE_extents_above_btree_updates)|\ + BIT_ULL(BCH_FEATURE_btree_updates_journalled)|\ + BIT_ULL(BCH_FEATURE_alloc_v2)|\ + BIT_ULL(BCH_FEATURE_extents_across_btree_nodes)) #define BCH_SB_FEATURES_ALL \ (BCH_SB_FEATURES_ALWAYS| \ - (1ULL << BCH_FEATURE_new_siphash)| \ - (1ULL << BCH_FEATURE_btree_ptr_v2)| \ - (1ULL << BCH_FEATURE_new_varint)| \ - (1ULL << BCH_FEATURE_journal_no_flush)) + BIT_ULL(BCH_FEATURE_new_siphash)| \ + BIT_ULL(BCH_FEATURE_btree_ptr_v2)| \ + BIT_ULL(BCH_FEATURE_new_varint)| \ + BIT_ULL(BCH_FEATURE_journal_no_flush)) enum bch_sb_feature { #define x(f, n) BCH_FEATURE_##f, @@ -1032,7 +1044,7 @@ static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type) x(crc64, 2) \ x(xxhash, 3) -enum bch_csum_opts { +enum bch_csum_opt { #define x(t, n) BCH_CSUM_OPT_##t = n, BCH_CSUM_OPTS() #undef x @@ -1221,6 +1233,15 @@ struct jset_entry_log { u8 d[]; } __packed __aligned(8); +static inline unsigned jset_entry_log_msg_bytes(struct jset_entry_log *l) +{ + unsigned b = vstruct_bytes(&l->entry) - offsetof(struct jset_entry_log, d); + + while (b && !l->d[b - 1]) + --b; + return b; +} + struct jset_entry_datetime { struct jset_entry entry; __le64 seconds; @@ -1268,14 +1289,18 @@ LE32_BITMASK(JSET_NO_FLUSH, struct jset, flags, 5, 6); /* Btree: */ enum btree_id_flags { - BTREE_ID_EXTENTS = BIT(0), - BTREE_ID_SNAPSHOTS = BIT(1), - BTREE_ID_SNAPSHOT_FIELD = BIT(2), - BTREE_ID_DATA = BIT(3), + BTREE_IS_extents = BIT(0), + BTREE_IS_snapshots = BIT(1), + BTREE_IS_snapshot_field = BIT(2), + BTREE_IS_data = BIT(3), + BTREE_IS_write_buffer = BIT(4), }; #define BCH_BTREE_IDS() \ - x(extents, 0, BTREE_ID_EXTENTS|BTREE_ID_SNAPSHOTS|BTREE_ID_DATA,\ + x(extents, 0, \ + BTREE_IS_extents| \ + BTREE_IS_snapshots| \ + BTREE_IS_data, \ BIT_ULL(KEY_TYPE_whiteout)| \ BIT_ULL(KEY_TYPE_error)| \ BIT_ULL(KEY_TYPE_cookie)| \ @@ -1283,17 +1308,20 @@ enum btree_id_flags { BIT_ULL(KEY_TYPE_reservation)| \ BIT_ULL(KEY_TYPE_reflink_p)| \ BIT_ULL(KEY_TYPE_inline_data)) \ - x(inodes, 1, BTREE_ID_SNAPSHOTS, \ + x(inodes, 1, \ + BTREE_IS_snapshots, \ BIT_ULL(KEY_TYPE_whiteout)| \ BIT_ULL(KEY_TYPE_inode)| \ BIT_ULL(KEY_TYPE_inode_v2)| \ BIT_ULL(KEY_TYPE_inode_v3)| \ BIT_ULL(KEY_TYPE_inode_generation)) \ - x(dirents, 2, BTREE_ID_SNAPSHOTS, \ + x(dirents, 2, \ + BTREE_IS_snapshots, \ BIT_ULL(KEY_TYPE_whiteout)| \ BIT_ULL(KEY_TYPE_hash_whiteout)| \ BIT_ULL(KEY_TYPE_dirent)) \ - x(xattrs, 3, BTREE_ID_SNAPSHOTS, \ + x(xattrs, 3, \ + BTREE_IS_snapshots, \ BIT_ULL(KEY_TYPE_whiteout)| \ BIT_ULL(KEY_TYPE_cookie)| \ BIT_ULL(KEY_TYPE_hash_whiteout)| \ @@ -1307,7 +1335,9 @@ enum btree_id_flags { BIT_ULL(KEY_TYPE_quota)) \ x(stripes, 6, 0, \ BIT_ULL(KEY_TYPE_stripe)) \ - x(reflink, 7, BTREE_ID_EXTENTS|BTREE_ID_DATA, \ + x(reflink, 7, \ + BTREE_IS_extents| \ + BTREE_IS_data, \ BIT_ULL(KEY_TYPE_reflink_v)| \ BIT_ULL(KEY_TYPE_indirect_inline_data)| \ BIT_ULL(KEY_TYPE_error)) \ @@ -1315,28 +1345,38 @@ enum btree_id_flags { BIT_ULL(KEY_TYPE_subvolume)) \ x(snapshots, 9, 0, \ BIT_ULL(KEY_TYPE_snapshot)) \ - x(lru, 10, 0, \ + x(lru, 10, \ + BTREE_IS_write_buffer, \ BIT_ULL(KEY_TYPE_set)) \ - x(freespace, 11, BTREE_ID_EXTENTS, \ + x(freespace, 11, \ + BTREE_IS_extents, \ BIT_ULL(KEY_TYPE_set)) \ x(need_discard, 12, 0, \ BIT_ULL(KEY_TYPE_set)) \ - x(backpointers, 13, 0, \ + x(backpointers, 13, \ + BTREE_IS_write_buffer, \ BIT_ULL(KEY_TYPE_backpointer)) \ x(bucket_gens, 14, 0, \ BIT_ULL(KEY_TYPE_bucket_gens)) \ x(snapshot_trees, 15, 0, \ BIT_ULL(KEY_TYPE_snapshot_tree)) \ - x(deleted_inodes, 16, BTREE_ID_SNAPSHOT_FIELD, \ + x(deleted_inodes, 16, \ + BTREE_IS_snapshot_field| \ + BTREE_IS_write_buffer, \ BIT_ULL(KEY_TYPE_set)) \ x(logged_ops, 17, 0, \ BIT_ULL(KEY_TYPE_logged_op_truncate)| \ - BIT_ULL(KEY_TYPE_logged_op_finsert)) \ - x(rebalance_work, 18, BTREE_ID_SNAPSHOT_FIELD, \ + BIT_ULL(KEY_TYPE_logged_op_finsert)| \ + BIT_ULL(KEY_TYPE_inode_alloc_cursor)) \ + x(rebalance_work, 18, \ + BTREE_IS_snapshot_field| \ + BTREE_IS_write_buffer, \ BIT_ULL(KEY_TYPE_set)|BIT_ULL(KEY_TYPE_cookie)) \ x(subvolume_children, 19, 0, \ BIT_ULL(KEY_TYPE_set)) \ - x(accounting, 20, BTREE_ID_SNAPSHOT_FIELD, \ + x(accounting, 20, \ + BTREE_IS_snapshot_field| \ + BTREE_IS_write_buffer, \ BIT_ULL(KEY_TYPE_accounting)) \ enum btree_id { @@ -1361,6 +1401,8 @@ static inline bool btree_id_is_alloc(enum btree_id id) case BTREE_ID_need_discard: case BTREE_ID_freespace: case BTREE_ID_bucket_gens: + case BTREE_ID_lru: + case BTREE_ID_accounting: return true; default: return false; diff --git a/fs/bcachefs/bkey.h b/fs/bcachefs/bkey.h index 41df24a53d97..054e2d5e8448 100644 --- a/fs/bcachefs/bkey.h +++ b/fs/bcachefs/bkey.h @@ -9,13 +9,6 @@ #include "util.h" #include "vstructs.h" -enum bch_validate_flags { - BCH_VALIDATE_write = BIT(0), - BCH_VALIDATE_commit = BIT(1), - BCH_VALIDATE_journal = BIT(2), - BCH_VALIDATE_silent = BIT(3), -}; - #if 0 /* diff --git a/fs/bcachefs/bkey_methods.c b/fs/bcachefs/bkey_methods.c index e7ac227ba7e8..15c93576b5c2 100644 --- a/fs/bcachefs/bkey_methods.c +++ b/fs/bcachefs/bkey_methods.c @@ -28,7 +28,7 @@ const char * const bch2_bkey_types[] = { }; static int deleted_key_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { return 0; } @@ -42,7 +42,7 @@ static int deleted_key_validate(struct bch_fs *c, struct bkey_s_c k, }) static int empty_val_key_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { int ret = 0; @@ -59,7 +59,7 @@ fsck_err: }) static int key_type_cookie_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { return 0; } @@ -83,7 +83,7 @@ static void key_type_cookie_to_text(struct printbuf *out, struct bch_fs *c, }) static int key_type_inline_data_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { return 0; } @@ -124,7 +124,7 @@ const struct bkey_ops bch2_bkey_null_ops = { }; int bch2_bkey_val_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { if (test_bit(BCH_FS_no_invalid_checks, &c->flags)) return 0; @@ -140,7 +140,7 @@ int bch2_bkey_val_validate(struct bch_fs *c, struct bkey_s_c k, if (!ops->key_validate) return 0; - ret = ops->key_validate(c, k, flags); + ret = ops->key_validate(c, k, from); fsck_err: return ret; } @@ -161,9 +161,10 @@ const char *bch2_btree_node_type_str(enum btree_node_type type) } int __bch2_bkey_validate(struct bch_fs *c, struct bkey_s_c k, - enum btree_node_type type, - enum bch_validate_flags flags) + struct bkey_validate_context from) { + enum btree_node_type type = __btree_node_type(from.level, from.btree); + if (test_bit(BCH_FS_no_invalid_checks, &c->flags)) return 0; @@ -177,7 +178,7 @@ int __bch2_bkey_validate(struct bch_fs *c, struct bkey_s_c k, return 0; bkey_fsck_err_on(k.k->type < KEY_TYPE_MAX && - (type == BKEY_TYPE_btree || (flags & BCH_VALIDATE_commit)) && + (type == BKEY_TYPE_btree || (from.flags & BCH_VALIDATE_commit)) && !(bch2_key_types_allowed[type] & BIT_ULL(k.k->type)), c, bkey_invalid_type_for_btree, "invalid key type for btree %s (%s)", @@ -228,15 +229,15 @@ fsck_err: } int bch2_bkey_validate(struct bch_fs *c, struct bkey_s_c k, - enum btree_node_type type, - enum bch_validate_flags flags) + struct bkey_validate_context from) { - return __bch2_bkey_validate(c, k, type, flags) ?: - bch2_bkey_val_validate(c, k, flags); + return __bch2_bkey_validate(c, k, from) ?: + bch2_bkey_val_validate(c, k, from); } int bch2_bkey_in_btree_node(struct bch_fs *c, struct btree *b, - struct bkey_s_c k, enum bch_validate_flags flags) + struct bkey_s_c k, + struct bkey_validate_context from) { int ret = 0; diff --git a/fs/bcachefs/bkey_methods.h b/fs/bcachefs/bkey_methods.h index 018fb72e32d3..bf34111cdf00 100644 --- a/fs/bcachefs/bkey_methods.h +++ b/fs/bcachefs/bkey_methods.h @@ -22,7 +22,7 @@ extern const struct bkey_ops bch2_bkey_null_ops; */ struct bkey_ops { int (*key_validate)(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags); + struct bkey_validate_context from); void (*val_to_text)(struct printbuf *, struct bch_fs *, struct bkey_s_c); void (*swab)(struct bkey_s); @@ -48,13 +48,14 @@ static inline const struct bkey_ops *bch2_bkey_type_ops(enum bch_bkey_type type) : &bch2_bkey_null_ops; } -int bch2_bkey_val_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); -int __bch2_bkey_validate(struct bch_fs *, struct bkey_s_c, enum btree_node_type, - enum bch_validate_flags); -int bch2_bkey_validate(struct bch_fs *, struct bkey_s_c, enum btree_node_type, - enum bch_validate_flags); +int bch2_bkey_val_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); +int __bch2_bkey_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); +int bch2_bkey_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); int bch2_bkey_in_btree_node(struct bch_fs *, struct btree *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context from); void bch2_bpos_to_text(struct printbuf *, struct bpos); void bch2_bkey_to_text(struct printbuf *, const struct bkey *); diff --git a/fs/bcachefs/bkey_types.h b/fs/bcachefs/bkey_types.h index c9ae9e42b385..b4f328f9853c 100644 --- a/fs/bcachefs/bkey_types.h +++ b/fs/bcachefs/bkey_types.h @@ -210,4 +210,32 @@ static inline struct bkey_i_##name *bkey_##name##_init(struct bkey_i *_k)\ BCH_BKEY_TYPES(); #undef x +enum bch_validate_flags { + BCH_VALIDATE_write = BIT(0), + BCH_VALIDATE_commit = BIT(1), + BCH_VALIDATE_silent = BIT(2), +}; + +#define BKEY_VALIDATE_CONTEXTS() \ + x(unknown) \ + x(superblock) \ + x(journal) \ + x(btree_root) \ + x(btree_node) \ + x(commit) + +struct bkey_validate_context { + enum { +#define x(n) BKEY_VALIDATE_##n, + BKEY_VALIDATE_CONTEXTS() +#undef x + } from:8; + enum bch_validate_flags flags:8; + u8 level; + enum btree_id btree; + bool root:1; + unsigned journal_offset; + u64 journal_seq; +}; + #endif /* _BCACHEFS_BKEY_TYPES_H */ diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 7123019ab3bc..ca755e8d1a37 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -24,7 +24,10 @@ do { \ } while (0) const char * const bch2_btree_node_flags[] = { -#define x(f) #f, + "typebit", + "typebit", + "typebit", +#define x(f) [BTREE_NODE_##f] = #f, BTREE_FLAGS() #undef x NULL @@ -222,7 +225,6 @@ void bch2_node_pin(struct bch_fs *c, struct btree *b) struct btree_cache *bc = &c->btree_cache; mutex_lock(&bc->lock); - BUG_ON(!__btree_node_pinned(bc, b)); if (b != btree_node_root(c, b) && !btree_node_pinned(b)) { set_btree_node_pinned(b); list_move(&b->list, &bc->live[1].list); @@ -326,7 +328,7 @@ void bch2_btree_node_update_key_early(struct btree_trans *trans, if (!IS_ERR_OR_NULL(b)) { mutex_lock(&c->btree_cache.lock); - bch2_btree_node_hash_remove(&c->btree_cache, b); + __bch2_btree_node_hash_remove(&c->btree_cache, b); bkey_copy(&b->key, new); ret = __bch2_btree_node_hash_insert(&c->btree_cache, b); @@ -1004,16 +1006,14 @@ static noinline void btree_bad_header(struct bch_fs *c, struct btree *b) return; prt_printf(&buf, - "btree node header doesn't match ptr\n" - "btree %s level %u\n" - "ptr: ", - bch2_btree_id_str(b->c.btree_id), b->c.level); + "btree node header doesn't match ptr: "); + bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level); + prt_str(&buf, "\nptr: "); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); - prt_printf(&buf, "\nheader: btree %s level %llu\n" - "min ", - bch2_btree_id_str(BTREE_NODE_ID(b->data)), - BTREE_NODE_LEVEL(b->data)); + prt_str(&buf, "\nheader: "); + bch2_btree_id_level_to_text(&buf, BTREE_NODE_ID(b->data), BTREE_NODE_LEVEL(b->data)); + prt_str(&buf, "\nmin "); bch2_bpos_to_text(&buf, b->data->min_key); prt_printf(&buf, "\nmax "); @@ -1133,7 +1133,7 @@ retry: if (unlikely(btree_node_read_error(b))) { six_unlock_type(&b->c.lock, lock_type); - return ERR_PTR(-BCH_ERR_btree_node_read_error); + return ERR_PTR(-BCH_ERR_btree_node_read_err_cached); } EBUG_ON(b->c.btree_id != path->btree_id); @@ -1223,7 +1223,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path * if (unlikely(btree_node_read_error(b))) { six_unlock_type(&b->c.lock, lock_type); - return ERR_PTR(-BCH_ERR_btree_node_read_error); + return ERR_PTR(-BCH_ERR_btree_node_read_err_cached); } EBUG_ON(b->c.btree_id != path->btree_id); @@ -1305,7 +1305,7 @@ lock_node: if (unlikely(btree_node_read_error(b))) { six_unlock_read(&b->c.lock); - b = ERR_PTR(-BCH_ERR_btree_node_read_error); + b = ERR_PTR(-BCH_ERR_btree_node_read_err_cached); goto out; } @@ -1398,13 +1398,31 @@ void bch2_btree_id_to_text(struct printbuf *out, enum btree_id btree) prt_printf(out, "(unknown btree %u)", btree); } +void bch2_btree_id_level_to_text(struct printbuf *out, enum btree_id btree, unsigned level) +{ + prt_str(out, "btree="); + bch2_btree_id_to_text(out, btree); + prt_printf(out, " level=%u", level); +} + +void __bch2_btree_pos_to_text(struct printbuf *out, struct bch_fs *c, + enum btree_id btree, unsigned level, struct bkey_s_c k) +{ + bch2_btree_id_to_text(out, btree); + prt_printf(out, " level %u/", level); + struct btree_root *r = bch2_btree_id_root(c, btree); + if (r) + prt_printf(out, "%u", r->level); + else + prt_printf(out, "(unknown)"); + prt_printf(out, "\n "); + + bch2_bkey_val_to_text(out, c, k); +} + void bch2_btree_pos_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b) { - prt_printf(out, "%s level %u/%u\n ", - bch2_btree_id_str(b->c.btree_id), - b->c.level, - bch2_btree_id_root(c, b->c.btree_id)->level); - bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key)); + __bch2_btree_pos_to_text(out, c, b->c.btree_id, b->c.level, bkey_i_to_s_c(&b->key)); } void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b) @@ -1478,8 +1496,12 @@ void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc prt_printf(out, "cannibalize lock:\t%p\n", bc->alloc_lock); prt_newline(out); - for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++) - prt_btree_cache_line(out, c, bch2_btree_id_str(i), bc->nr_by_btree[i]); + for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++) { + bch2_btree_id_to_text(out, i); + prt_printf(out, "\t"); + prt_human_readable_u64(out, bc->nr_by_btree[i] * c->opts.btree_node_size); + prt_printf(out, " (%zu)\n", bc->nr_by_btree[i]); + } prt_newline(out); prt_printf(out, "freed:\t%zu\n", bc->nr_freed); diff --git a/fs/bcachefs/btree_cache.h b/fs/bcachefs/btree_cache.h index 66e86d1a178d..ca3c1b145330 100644 --- a/fs/bcachefs/btree_cache.h +++ b/fs/bcachefs/btree_cache.h @@ -128,19 +128,27 @@ static inline struct btree_root *bch2_btree_id_root(struct bch_fs *c, unsigned i } else { unsigned idx = id - BTREE_ID_NR; - EBUG_ON(idx >= c->btree_roots_extra.nr); + /* This can happen when we're called from btree_node_scan */ + if (idx >= c->btree_roots_extra.nr) + return NULL; + return &c->btree_roots_extra.data[idx]; } } static inline struct btree *btree_node_root(struct bch_fs *c, struct btree *b) { - return bch2_btree_id_root(c, b->c.btree_id)->b; + struct btree_root *r = bch2_btree_id_root(c, b->c.btree_id); + + return r ? r->b : NULL; } -const char *bch2_btree_id_str(enum btree_id); +const char *bch2_btree_id_str(enum btree_id); /* avoid */ void bch2_btree_id_to_text(struct printbuf *, enum btree_id); +void bch2_btree_id_level_to_text(struct printbuf *, enum btree_id, unsigned); +void __bch2_btree_pos_to_text(struct printbuf *, struct bch_fs *, + enum btree_id, unsigned, struct bkey_s_c); void bch2_btree_pos_to_text(struct printbuf *, struct bch_fs *, const struct btree *); void bch2_btree_node_to_text(struct printbuf *, struct bch_fs *, const struct btree *); void bch2_btree_cache_to_text(struct printbuf *, const struct btree_cache *); diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 81dcf9e512c0..dd1d9b74076e 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -29,6 +29,7 @@ #include "move.h" #include "recovery_passes.h" #include "reflink.h" +#include "recovery.h" #include "replicas.h" #include "super-io.h" #include "trace.h" @@ -56,8 +57,8 @@ void bch2_gc_pos_to_text(struct printbuf *out, struct gc_pos *p) { prt_str(out, bch2_gc_phase_strs[p->phase]); prt_char(out, ' '); - bch2_btree_id_to_text(out, p->btree); - prt_printf(out, " l=%u ", p->level); + bch2_btree_id_level_to_text(out, p->btree, p->level); + prt_char(out, ' '); bch2_bpos_to_text(out, p->pos); } @@ -209,8 +210,9 @@ static int btree_check_node_boundaries(struct btree_trans *trans, struct btree * if (bpos_eq(expected_start, cur->data->min_key)) return 0; - prt_printf(&buf, " at btree %s level %u:\n parent: ", - bch2_btree_id_str(b->c.btree_id), b->c.level); + prt_printf(&buf, " at "); + bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level); + prt_printf(&buf, ":\n parent: "); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); if (prev) { @@ -277,8 +279,9 @@ static int btree_repair_node_end(struct btree_trans *trans, struct btree *b, if (bpos_eq(child->key.k.p, b->key.k.p)) return 0; - prt_printf(&buf, "at btree %s level %u:\n parent: ", - bch2_btree_id_str(b->c.btree_id), b->c.level); + prt_printf(&buf, " at "); + bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level); + prt_printf(&buf, ":\n parent: "); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); prt_str(&buf, "\n child: "); @@ -341,14 +344,14 @@ again: ret = PTR_ERR_OR_ZERO(cur); printbuf_reset(&buf); + bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level - 1); + prt_char(&buf, ' '); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur_k.k)); if (mustfix_fsck_err_on(bch2_err_matches(ret, EIO), - trans, btree_node_unreadable, - "Topology repair: unreadable btree node at btree %s level %u:\n" + trans, btree_node_read_error, + "Topology repair: unreadable btree node at\n" " %s", - bch2_btree_id_str(b->c.btree_id), - b->c.level - 1, buf.buf)) { bch2_btree_node_evict(trans, cur_k.k); cur = NULL; @@ -357,11 +360,9 @@ again: if (ret) break; - if (!btree_id_is_alloc(b->c.btree_id)) { - ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes); - if (ret) - break; - } + ret = bch2_btree_lost_data(c, b->c.btree_id); + if (ret) + break; continue; } @@ -370,7 +371,7 @@ again: break; if (bch2_btree_node_is_stale(c, cur)) { - bch_info(c, "btree node %s older than nodes found by scanning", buf.buf); + bch_info(c, "btree node older than nodes found by scanning\n %s", buf.buf); six_unlock_read(&cur->c.lock); bch2_btree_node_evict(trans, cur_k.k); ret = bch2_journal_key_delete(c, b->c.btree_id, @@ -478,14 +479,13 @@ again: } printbuf_reset(&buf); + bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level); + prt_newline(&buf); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); if (mustfix_fsck_err_on(!have_child, trans, btree_node_topology_interior_node_empty, - "empty interior btree node at btree %s level %u\n" - " %s", - bch2_btree_id_str(b->c.btree_id), - b->c.level, buf.buf)) + "empty interior btree node at %s", buf.buf)) ret = DROP_THIS_NODE; err: fsck_err: @@ -511,6 +511,7 @@ int bch2_check_topology(struct bch_fs *c) { struct btree_trans *trans = bch2_trans_get(c); struct bpos pulled_from_scan = POS_MIN; + struct printbuf buf = PRINTBUF; int ret = 0; bch2_trans_srcu_unlock(trans); @@ -519,19 +520,22 @@ int bch2_check_topology(struct bch_fs *c) struct btree_root *r = bch2_btree_id_root(c, i); bool reconstructed_root = false; + printbuf_reset(&buf); + bch2_btree_id_to_text(&buf, i); + if (r->error) { - ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes); + ret = bch2_btree_lost_data(c, i); if (ret) break; reconstruct_root: - bch_info(c, "btree root %s unreadable, must recover from scan", bch2_btree_id_str(i)); + bch_info(c, "btree root %s unreadable, must recover from scan", buf.buf); r->alive = false; r->error = 0; if (!bch2_btree_has_scanned_nodes(c, i)) { mustfix_fsck_err(trans, btree_root_unreadable_and_scan_found_nothing, - "no nodes found for btree %s, continue?", bch2_btree_id_str(i)); + "no nodes found for btree %s, continue?", buf.buf); bch2_btree_root_alloc_fake_trans(trans, i, 0); } else { bch2_btree_root_alloc_fake_trans(trans, i, 1); @@ -560,13 +564,14 @@ reconstruct_root: if (!reconstructed_root) goto reconstruct_root; - bch_err(c, "empty btree root %s", bch2_btree_id_str(i)); + bch_err(c, "empty btree root %s", buf.buf); bch2_btree_root_alloc_fake_trans(trans, i, 0); r->alive = false; ret = 0; } } fsck_err: + printbuf_exit(&buf); bch2_trans_put(trans); return ret; } @@ -713,6 +718,7 @@ static int bch2_gc_btrees(struct bch_fs *c) { struct btree_trans *trans = bch2_trans_get(c); enum btree_id ids[BTREE_ID_NR]; + struct printbuf buf = PRINTBUF; unsigned i; int ret = 0; @@ -727,14 +733,9 @@ static int bch2_gc_btrees(struct bch_fs *c) continue; ret = bch2_gc_btree(trans, btree, true); - - if (mustfix_fsck_err_on(bch2_err_matches(ret, EIO), - trans, btree_node_read_error, - "btree node read error for %s", - bch2_btree_id_str(btree))) - ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology); } -fsck_err: + + printbuf_exit(&buf); bch2_trans_put(trans); bch_err_fn(c, ret); return ret; @@ -802,7 +803,6 @@ static int bch2_alloc_write_key(struct btree_trans *trans, old = bch2_alloc_to_v4(k, &old_convert); gc = new = *old; - percpu_down_read(&c->mark_lock); __bucket_m_to_alloc(&gc, *gc_bucket(ca, iter->pos.offset)); old_gc = gc; @@ -813,7 +813,6 @@ static int bch2_alloc_write_key(struct btree_trans *trans, gc.data_type = old->data_type; gc.dirty_sectors = old->dirty_sectors; } - percpu_up_read(&c->mark_lock); /* * gc.data_type doesn't yet include need_discard & need_gc_gen states - @@ -831,11 +830,9 @@ static int bch2_alloc_write_key(struct btree_trans *trans, * safe w.r.t. transaction restarts, so fixup the gc_bucket so * we don't run it twice: */ - percpu_down_read(&c->mark_lock); struct bucket *gc_m = gc_bucket(ca, iter->pos.offset); gc_m->data_type = gc.data_type; gc_m->dirty_sectors = gc.dirty_sectors; - percpu_up_read(&c->mark_lock); } if (fsck_err_on(new.data_type != gc.data_type, @@ -895,11 +892,11 @@ static int bch2_gc_alloc_done(struct bch_fs *c) for_each_member_device(c, ca) { ret = bch2_trans_run(c, - for_each_btree_key_upto_commit(trans, iter, BTREE_ID_alloc, + for_each_btree_key_max_commit(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, ca->mi.first_bucket), POS(ca->dev_idx, ca->mi.nbuckets - 1), BTREE_ITER_slots|BTREE_ITER_prefetch, k, - NULL, NULL, BCH_TRANS_COMMIT_lazy_rw, + NULL, NULL, BCH_TRANS_COMMIT_no_enospc, bch2_alloc_write_key(trans, &iter, ca, k))); if (ret) { bch2_dev_put(ca); @@ -928,98 +925,6 @@ static int bch2_gc_alloc_start(struct bch_fs *c) return ret; } -static int bch2_gc_write_reflink_key(struct btree_trans *trans, - struct btree_iter *iter, - struct bkey_s_c k, - size_t *idx) -{ - struct bch_fs *c = trans->c; - const __le64 *refcount = bkey_refcount_c(k); - struct printbuf buf = PRINTBUF; - struct reflink_gc *r; - int ret = 0; - - if (!refcount) - return 0; - - while ((r = genradix_ptr(&c->reflink_gc_table, *idx)) && - r->offset < k.k->p.offset) - ++*idx; - - if (!r || - r->offset != k.k->p.offset || - r->size != k.k->size) { - bch_err(c, "unexpected inconsistency walking reflink table at gc finish"); - return -EINVAL; - } - - if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), - trans, reflink_v_refcount_wrong, - "reflink key has wrong refcount:\n" - " %s\n" - " should be %u", - (bch2_bkey_val_to_text(&buf, c, k), buf.buf), - r->refcount)) { - struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k); - ret = PTR_ERR_OR_ZERO(new); - if (ret) - goto out; - - if (!r->refcount) - new->k.type = KEY_TYPE_deleted; - else - *bkey_refcount(bkey_i_to_s(new)) = cpu_to_le64(r->refcount); - ret = bch2_trans_update(trans, iter, new, 0); - } -out: -fsck_err: - printbuf_exit(&buf); - return ret; -} - -static int bch2_gc_reflink_done(struct bch_fs *c) -{ - size_t idx = 0; - - int ret = bch2_trans_run(c, - for_each_btree_key_commit(trans, iter, - BTREE_ID_reflink, POS_MIN, - BTREE_ITER_prefetch, k, - NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - bch2_gc_write_reflink_key(trans, &iter, k, &idx))); - c->reflink_gc_nr = 0; - return ret; -} - -static int bch2_gc_reflink_start(struct bch_fs *c) -{ - c->reflink_gc_nr = 0; - - int ret = bch2_trans_run(c, - for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN, - BTREE_ITER_prefetch, k, ({ - const __le64 *refcount = bkey_refcount_c(k); - - if (!refcount) - continue; - - struct reflink_gc *r = genradix_ptr_alloc(&c->reflink_gc_table, - c->reflink_gc_nr++, GFP_KERNEL); - if (!r) { - ret = -BCH_ERR_ENOMEM_gc_reflink_start; - break; - } - - r->offset = k.k->p.offset; - r->size = k.k->size; - r->refcount = 0; - 0; - }))); - - bch_err_fn(c, ret); - return ret; -} - static int bch2_gc_write_stripes_key(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k) @@ -1171,7 +1076,6 @@ static int gc_btree_gens_key(struct btree_trans *trans, if (unlikely(test_bit(BCH_FS_going_ro, &c->flags))) return -EROFS; - percpu_down_read(&c->mark_lock); rcu_read_lock(); bkey_for_each_ptr(ptrs, ptr) { struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev); @@ -1180,7 +1084,6 @@ static int gc_btree_gens_key(struct btree_trans *trans, if (dev_ptr_stale(ca, ptr) > 16) { rcu_read_unlock(); - percpu_up_read(&c->mark_lock); goto update; } } @@ -1195,7 +1098,6 @@ static int gc_btree_gens_key(struct btree_trans *trans, *gen = ptr->gen; } rcu_read_unlock(); - percpu_up_read(&c->mark_lock); return 0; update: u = bch2_bkey_make_mut(trans, iter, &k, 0); @@ -1224,7 +1126,6 @@ static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct bch_dev return ret; a_mut->v.oldest_gen = ca->oldest_gen[iter->pos.offset]; - alloc_data_type_set(&a_mut->v, a_mut->v.data_type); return bch2_trans_update(trans, iter, &a_mut->k_i, 0); } @@ -1337,9 +1238,16 @@ void bch2_gc_gens_async(struct bch_fs *c) bch2_write_ref_put(c, BCH_WRITE_REF_gc_gens); } -void bch2_fs_gc_init(struct bch_fs *c) +void bch2_fs_btree_gc_exit(struct bch_fs *c) { - seqcount_init(&c->gc_pos_lock); +} +int bch2_fs_btree_gc_init(struct bch_fs *c) +{ + seqcount_init(&c->gc_pos_lock); INIT_WORK(&c->gc_gens_work, bch2_gc_gens_work); + + init_rwsem(&c->gc_lock); + mutex_init(&c->gc_gens_lock); + return 0; } diff --git a/fs/bcachefs/btree_gc.h b/fs/bcachefs/btree_gc.h index 8a47e8bd0791..9693a90a48a2 100644 --- a/fs/bcachefs/btree_gc.h +++ b/fs/bcachefs/btree_gc.h @@ -82,6 +82,8 @@ void bch2_gc_pos_to_text(struct printbuf *, struct gc_pos *); int bch2_gc_gens(struct bch_fs *); void bch2_gc_gens_async(struct bch_fs *); -void bch2_fs_gc_init(struct bch_fs *); + +void bch2_fs_btree_gc_exit(struct bch_fs *); +int bch2_fs_btree_gc_init(struct bch_fs *); #endif /* _BCACHEFS_BTREE_GC_H */ diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index 839d68802e42..e371e60e3133 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -25,9 +25,8 @@ static void bch2_btree_node_header_to_text(struct printbuf *out, struct btree_node *bn) { - prt_printf(out, "btree=%s l=%u seq %llux\n", - bch2_btree_id_str(BTREE_NODE_ID(bn)), - (unsigned) BTREE_NODE_LEVEL(bn), bn->keys.seq); + bch2_btree_id_level_to_text(out, BTREE_NODE_ID(bn), BTREE_NODE_LEVEL(bn)); + prt_printf(out, " seq %llx %llu\n", bn->keys.seq, BTREE_NODE_SEQ(bn)); prt_str(out, "min: "); bch2_bpos_to_text(out, bn->min_key); prt_newline(out); @@ -490,8 +489,8 @@ void bch2_btree_init_next(struct btree_trans *trans, struct btree *b) if (b->nsets == MAX_BSETS && !btree_node_write_in_flight(b) && should_compact_all(c, b)) { - bch2_btree_node_write(c, b, SIX_LOCK_write, - BTREE_WRITE_init_next_bset); + bch2_btree_node_write_trans(trans, b, SIX_LOCK_write, + BTREE_WRITE_init_next_bset); reinit_iter = true; } @@ -832,13 +831,32 @@ fsck_err: return ret; } +static int btree_node_bkey_val_validate(struct bch_fs *c, struct btree *b, + struct bkey_s_c k, + enum bch_validate_flags flags) +{ + return bch2_bkey_val_validate(c, k, (struct bkey_validate_context) { + .from = BKEY_VALIDATE_btree_node, + .level = b->c.level, + .btree = b->c.btree_id, + .flags = flags + }); +} + static int bset_key_validate(struct bch_fs *c, struct btree *b, struct bkey_s_c k, - bool updated_range, int rw) + bool updated_range, + enum bch_validate_flags flags) { - return __bch2_bkey_validate(c, k, btree_node_type(b), 0) ?: - (!updated_range ? bch2_bkey_in_btree_node(c, b, k, 0) : 0) ?: - (rw == WRITE ? bch2_bkey_val_validate(c, k, 0) : 0); + struct bkey_validate_context from = (struct bkey_validate_context) { + .from = BKEY_VALIDATE_btree_node, + .level = b->c.level, + .btree = b->c.btree_id, + .flags = flags, + }; + return __bch2_bkey_validate(c, k, from) ?: + (!updated_range ? bch2_bkey_in_btree_node(c, b, k, from) : 0) ?: + (flags & BCH_VALIDATE_write ? btree_node_bkey_val_validate(c, b, k, flags) : 0); } static bool bkey_packed_valid(struct bch_fs *c, struct btree *b, @@ -855,7 +873,21 @@ static bool bkey_packed_valid(struct bch_fs *c, struct btree *b, struct bkey tmp; struct bkey_s u = __bkey_disassemble(b, k, &tmp); - return !__bch2_bkey_validate(c, u.s_c, btree_node_type(b), BCH_VALIDATE_silent); + return !__bch2_bkey_validate(c, u.s_c, + (struct bkey_validate_context) { + .from = BKEY_VALIDATE_btree_node, + .level = b->c.level, + .btree = b->c.btree_id, + .flags = BCH_VALIDATE_silent + }); +} + +static inline int btree_node_read_bkey_cmp(const struct btree *b, + const struct bkey_packed *l, + const struct bkey_packed *r) +{ + return bch2_bkey_cmp_packed(b, l, r) + ?: (int) bkey_deleted(r) - (int) bkey_deleted(l); } static int validate_bset_keys(struct bch_fs *c, struct btree *b, @@ -918,7 +950,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b, BSET_BIG_ENDIAN(i), write, &b->format, k); - if (prev && bkey_iter_cmp(b, prev, k) > 0) { + if (prev && btree_node_read_bkey_cmp(b, prev, k) >= 0) { struct bkey up = bkey_unpack_key(b, prev); printbuf_reset(&buf); @@ -965,6 +997,7 @@ drop_this_key: got_good_key: le16_add_cpu(&i->u64s, -next_good_key); memmove_u64s_down(k, bkey_p_next(k), (u64 *) vstruct_end(i) - (u64 *) k); + set_btree_node_need_rewrite(b); } fsck_err: printbuf_exit(&buf); @@ -1038,39 +1071,51 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, while (b->written < (ptr_written ?: btree_sectors(c))) { unsigned sectors; - struct nonce nonce; bool first = !b->written; - bool csum_bad; - if (!b->written) { + if (first) { + bne = NULL; i = &b->data->keys; + } else { + bne = write_block(b); + i = &bne->keys; - btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)), - -BCH_ERR_btree_node_read_err_want_retry, - c, ca, b, i, NULL, - bset_unknown_csum, - "unknown checksum type %llu", BSET_CSUM_TYPE(i)); - - nonce = btree_nonce(i, b->written << 9); + if (i->seq != b->data->keys.seq) + break; + } - struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data); - csum_bad = bch2_crc_cmp(b->data->csum, csum); - if (csum_bad) - bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); + struct nonce nonce = btree_nonce(i, b->written << 9); + bool good_csum_type = bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)); - btree_err_on(csum_bad, - -BCH_ERR_btree_node_read_err_want_retry, - c, ca, b, i, NULL, - bset_bad_csum, - "%s", - (printbuf_reset(&buf), - bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum), - buf.buf)); - - ret = bset_encrypt(c, i, b->written << 9); - if (bch2_fs_fatal_err_on(ret, c, - "decrypting btree node: %s", bch2_err_str(ret))) - goto fsck_err; + btree_err_on(!good_csum_type, + bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)) + ? -BCH_ERR_btree_node_read_err_must_retry + : -BCH_ERR_btree_node_read_err_want_retry, + c, ca, b, i, NULL, + bset_unknown_csum, + "unknown checksum type %llu", BSET_CSUM_TYPE(i)); + + if (first) { + if (good_csum_type) { + struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data); + bool csum_bad = bch2_crc_cmp(b->data->csum, csum); + if (csum_bad) + bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); + + btree_err_on(csum_bad, + -BCH_ERR_btree_node_read_err_want_retry, + c, ca, b, i, NULL, + bset_bad_csum, + "%s", + (printbuf_reset(&buf), + bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum), + buf.buf)); + + ret = bset_encrypt(c, i, b->written << 9); + if (bch2_fs_fatal_err_on(ret, c, + "decrypting btree node: %s", bch2_err_str(ret))) + goto fsck_err; + } btree_err_on(btree_node_type_is_extents(btree_node_type(b)) && !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data), @@ -1081,37 +1126,26 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, sectors = vstruct_sectors(b->data, c->block_bits); } else { - bne = write_block(b); - i = &bne->keys; - - if (i->seq != b->data->keys.seq) - break; - - btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)), - -BCH_ERR_btree_node_read_err_want_retry, - c, ca, b, i, NULL, - bset_unknown_csum, - "unknown checksum type %llu", BSET_CSUM_TYPE(i)); - - nonce = btree_nonce(i, b->written << 9); - struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne); - csum_bad = bch2_crc_cmp(bne->csum, csum); - if (ca && csum_bad) - bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); - - btree_err_on(csum_bad, - -BCH_ERR_btree_node_read_err_want_retry, - c, ca, b, i, NULL, - bset_bad_csum, - "%s", - (printbuf_reset(&buf), - bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), bne->csum, csum), - buf.buf)); - - ret = bset_encrypt(c, i, b->written << 9); - if (bch2_fs_fatal_err_on(ret, c, - "decrypting btree node: %s", bch2_err_str(ret))) - goto fsck_err; + if (good_csum_type) { + struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne); + bool csum_bad = bch2_crc_cmp(bne->csum, csum); + if (ca && csum_bad) + bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); + + btree_err_on(csum_bad, + -BCH_ERR_btree_node_read_err_want_retry, + c, ca, b, i, NULL, + bset_bad_csum, + "%s", + (printbuf_reset(&buf), + bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), bne->csum, csum), + buf.buf)); + + ret = bset_encrypt(c, i, b->written << 9); + if (bch2_fs_fatal_err_on(ret, c, + "decrypting btree node: %s", bch2_err_str(ret))) + goto fsck_err; + } sectors = vstruct_sectors(bne, c->block_bits); } @@ -1216,7 +1250,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, struct bkey tmp; struct bkey_s u = __bkey_disassemble(b, k, &tmp); - ret = bch2_bkey_val_validate(c, u.s_c, READ); + ret = btree_node_bkey_val_validate(c, b, u.s_c, READ); if (ret == -BCH_ERR_fsck_delete_bkey || (bch2_inject_invalid_keys && !bversion_cmp(u.k->bversion, MAX_VERSION))) { @@ -1226,6 +1260,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, memmove_u64s_down(k, bkey_p_next(k), (u64 *) vstruct_end(i) - (u64 *) k); set_btree_bset_end(b, b->set); + set_btree_node_need_rewrite(b); continue; } if (ret) @@ -1339,13 +1374,18 @@ start: rb->start_time); bio_put(&rb->bio); - if (saw_error && + if ((saw_error || + btree_node_need_rewrite(b)) && !btree_node_read_error(b) && c->curr_recovery_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) { - printbuf_reset(&buf); - bch2_bpos_to_text(&buf, b->key.k.p); - bch_err_ratelimited(c, "%s: rewriting btree node at btree=%s level=%u %s due to error", - __func__, bch2_btree_id_str(b->c.btree_id), b->c.level, buf.buf); + if (saw_error) { + printbuf_reset(&buf); + bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level); + prt_str(&buf, " "); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); + bch_err_ratelimited(c, "%s: rewriting btree node at due to error\n %s", + __func__, buf.buf); + } bch2_btree_node_rewrite_async(c, b); } @@ -1933,7 +1973,12 @@ static int validate_bset_for_write(struct bch_fs *c, struct btree *b, bool saw_error; int ret = bch2_bkey_validate(c, bkey_i_to_s_c(&b->key), - BKEY_TYPE_btree, WRITE); + (struct bkey_validate_context) { + .from = BKEY_VALIDATE_btree_node, + .level = b->c.level + 1, + .btree = b->c.btree_id, + .flags = BCH_VALIDATE_write, + }); if (ret) { bch2_fs_inconsistent(c, "invalid btree node key before write"); return ret; @@ -2300,6 +2345,34 @@ void bch2_btree_node_write(struct bch_fs *c, struct btree *b, } } +void bch2_btree_node_write_trans(struct btree_trans *trans, struct btree *b, + enum six_lock_type lock_type_held, + unsigned flags) +{ + struct bch_fs *c = trans->c; + + if (lock_type_held == SIX_LOCK_intent || + (lock_type_held == SIX_LOCK_read && + six_lock_tryupgrade(&b->c.lock))) { + __bch2_btree_node_write(c, b, flags); + + /* don't cycle lock unnecessarily: */ + if (btree_node_just_written(b) && + six_trylock_write(&b->c.lock)) { + bch2_btree_post_write_cleanup(c, b); + __bch2_btree_node_unlock_write(trans, b); + } + + if (lock_type_held == SIX_LOCK_read) + six_lock_downgrade(&b->c.lock); + } else { + __bch2_btree_node_write(c, b, flags); + if (lock_type_held == SIX_LOCK_write && + btree_node_just_written(b)) + bch2_btree_post_write_cleanup(c, b); + } +} + static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag) { struct bucket_table *tbl; diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h index 9b01ca3de907..6f9e4a6dacf7 100644 --- a/fs/bcachefs/btree_io.h +++ b/fs/bcachefs/btree_io.h @@ -144,11 +144,13 @@ enum btree_write_flags { void __bch2_btree_node_write(struct bch_fs *, struct btree *, unsigned); void bch2_btree_node_write(struct bch_fs *, struct btree *, enum six_lock_type, unsigned); +void bch2_btree_node_write_trans(struct btree_trans *, struct btree *, + enum six_lock_type, unsigned); -static inline void btree_node_write_if_need(struct bch_fs *c, struct btree *b, +static inline void btree_node_write_if_need(struct btree_trans *trans, struct btree *b, enum six_lock_type lock_held) { - bch2_btree_node_write(c, b, lock_held, BTREE_WRITE_ONLY_IF_NEED); + bch2_btree_node_write_trans(trans, b, lock_held, BTREE_WRITE_ONLY_IF_NEED); } bool bch2_btree_flush_all_reads(struct bch_fs *); diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index eef9b89c561d..e32fce4fd258 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -270,8 +270,10 @@ static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) BUG_ON(!(iter->flags & BTREE_ITER_all_snapshots) && iter->pos.snapshot != iter->snapshot); - BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) || - bkey_gt(iter->pos, iter->k.p)); + BUG_ON(iter->flags & BTREE_ITER_all_snapshots ? !bpos_eq(iter->pos, iter->k.p) : + !(iter->flags & BTREE_ITER_is_extents) ? !bkey_eq(iter->pos, iter->k.p) : + (bkey_lt(iter->pos, bkey_start_pos(&iter->k)) || + bkey_gt(iter->pos, iter->k.p))); } static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) @@ -327,7 +329,7 @@ out: void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id, struct bpos pos) { - bch2_trans_verify_not_unlocked(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); struct btree_path *path; struct trans_for_each_path_inorder_iter iter; @@ -697,6 +699,19 @@ void bch2_trans_node_add(struct btree_trans *trans, bch2_trans_revalidate_updates_in_node(trans, b); } +void bch2_trans_node_drop(struct btree_trans *trans, + struct btree *b) +{ + struct btree_path *path; + unsigned i, level = b->c.level; + + trans_for_each_path(trans, path, i) + if (path->l[level].b == b) { + btree_node_unlock(trans, path, level); + path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init); + } +} + /* * A btree node has been modified in such a way as to invalidate iterators - fix * them: @@ -720,7 +735,7 @@ static inline int btree_path_lock_root(struct btree_trans *trans, unsigned long trace_ip) { struct bch_fs *c = trans->c; - struct btree *b, **rootp = &bch2_btree_id_root(c, path->btree_id)->b; + struct btree_root *r = bch2_btree_id_root(c, path->btree_id); enum six_lock_type lock_type; unsigned i; int ret; @@ -728,7 +743,12 @@ static inline int btree_path_lock_root(struct btree_trans *trans, EBUG_ON(path->nodes_locked); while (1) { - b = READ_ONCE(*rootp); + struct btree *b = READ_ONCE(r->b); + if (unlikely(!b)) { + BUG_ON(!r->error); + return r->error; + } + path->level = READ_ONCE(b->c.level); if (unlikely(path->level < depth_want)) { @@ -748,14 +768,12 @@ static inline int btree_path_lock_root(struct btree_trans *trans, ret = btree_node_lock(trans, path, &b->c, path->level, lock_type, trace_ip); if (unlikely(ret)) { - if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed)) - continue; if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) return ret; BUG(); } - if (likely(b == READ_ONCE(*rootp) && + if (likely(b == READ_ONCE(r->b) && b->c.level == path->level && !race_fault())) { for (i = 0; i < path->level; i++) @@ -825,6 +843,8 @@ static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *p bch2_bkey_buf_init(&tmp); + jiter->fail_if_too_many_whiteouts = true; + while (nr-- && !ret) { if (!bch2_btree_node_relock(trans, path, path->level)) break; @@ -1000,7 +1020,7 @@ retry_all: bch2_trans_unlock(trans); cond_resched(); - trans_set_locked(trans); + trans_set_locked(trans, false); if (unlikely(trans->memory_allocation_failure)) { struct closure cl; @@ -1267,7 +1287,7 @@ __bch2_btree_path_set_pos(struct btree_trans *trans, { int cmp = bpos_cmp(new_pos, trans->paths[path_idx].pos); - bch2_trans_verify_not_in_restart(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); EBUG_ON(!trans->paths[path_idx].ref); trace_btree_path_set_pos(trans, trans->paths + path_idx, &new_pos); @@ -1427,17 +1447,31 @@ void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_ (void *) trans->last_begin_ip); } -void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans) +static void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans) { +#ifdef CONFIG_BCACHEFS_DEBUG + struct printbuf buf = PRINTBUF; + bch2_prt_backtrace(&buf, &trans->last_restarted_trace); + panic("in transaction restart: %s, last restarted by\n%s", + bch2_err_str(trans->restarted), + buf.buf); +#else panic("in transaction restart: %s, last restarted by %pS\n", bch2_err_str(trans->restarted), (void *) trans->last_restarted_ip); +#endif } -void __noreturn bch2_trans_unlocked_error(struct btree_trans *trans) +void __noreturn bch2_trans_unlocked_or_in_restart_error(struct btree_trans *trans) { - panic("trans should be locked, unlocked by %pS\n", - (void *) trans->last_unlock_ip); + if (trans->restarted) + bch2_trans_in_restart_error(trans); + + if (!trans->locked) + panic("trans should be locked, unlocked by %pS\n", + (void *) trans->last_unlock_ip); + + BUG(); } noinline __cold @@ -1450,10 +1484,11 @@ void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans) trans_for_each_update(trans, i) { struct bkey_s_c old = { &i->old_k, i->old_v }; - prt_printf(buf, "update: btree=%s cached=%u %pS\n", - bch2_btree_id_str(i->btree_id), - i->cached, - (void *) i->ip_allocated); + prt_str(buf, "update: btree="); + bch2_btree_id_to_text(buf, i->btree_id); + prt_printf(buf, " cached=%u %pS\n", + i->cached, + (void *) i->ip_allocated); prt_printf(buf, " old "); bch2_bkey_val_to_text(buf, trans->c, old); @@ -1486,13 +1521,13 @@ static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_tra { struct btree_path *path = trans->paths + path_idx; - prt_printf(out, "path: idx %3u ref %u:%u %c %c %c btree=%s l=%u pos ", + prt_printf(out, "path: idx %3u ref %u:%u %c %c %c ", path_idx, path->ref, path->intent_ref, path->preserve ? 'P' : ' ', path->should_be_locked ? 'S' : ' ', - path->cached ? 'C' : 'B', - bch2_btree_id_str(path->btree_id), - path->level); + path->cached ? 'C' : 'B'); + bch2_btree_id_level_to_text(out, path->btree_id, path->level); + prt_str(out, " pos "); bch2_bpos_to_text(out, path->pos); if (!path->cached && btree_node_locked(path, path->level)) { @@ -1717,8 +1752,7 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans, struct trans_for_each_path_inorder_iter iter; btree_path_idx_t path_pos = 0, path_idx; - bch2_trans_verify_not_unlocked(trans); - bch2_trans_verify_not_in_restart(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); bch2_trans_verify_locks(trans); btree_trans_sort_paths(trans); @@ -1833,7 +1867,7 @@ struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey * !bkey_eq(path->pos, ck->key.pos)); *u = ck->k->k; - k = bkey_i_to_s_c(ck->k); + k = (struct bkey_s_c) { u, &ck->k->v }; } return k; @@ -1843,7 +1877,6 @@ hole: return (struct bkey_s_c) { u, NULL }; } - void bch2_set_btree_iter_dontneed(struct btree_iter *iter) { struct btree_trans *trans = iter->trans; @@ -1870,7 +1903,7 @@ bch2_btree_iter_traverse(struct btree_iter *iter) struct btree_trans *trans = iter->trans; int ret; - bch2_trans_verify_not_unlocked(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); iter->path = bch2_btree_path_set_pos(trans, iter->path, btree_iter_search_key(iter), @@ -1945,7 +1978,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) int ret; EBUG_ON(trans->paths[iter->path].cached); - bch2_trans_verify_not_in_restart(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); bch2_btree_iter_verify(iter); ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); @@ -2101,7 +2134,7 @@ static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans, { struct btree_path *path = btree_iter_path(trans, iter); - return bch2_journal_keys_peek_upto(trans->c, iter->btree_id, + return bch2_journal_keys_peek_max(trans->c, iter->btree_id, path->level, path->pos, end_pos, @@ -2124,21 +2157,47 @@ struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans, } static noinline -struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans, - struct btree_iter *iter, - struct bkey_s_c k) +void btree_trans_peek_journal(struct btree_trans *trans, + struct btree_iter *iter, + struct bkey_s_c *k) { struct btree_path *path = btree_iter_path(trans, iter); struct bkey_i *next_journal = bch2_btree_journal_peek(trans, iter, - k.k ? k.k->p : path_l(path)->b->key.k.p); - + k->k ? k->k->p : path_l(path)->b->key.k.p); if (next_journal) { iter->k = next_journal->k; - k = bkey_i_to_s_c(next_journal); + *k = bkey_i_to_s_c(next_journal); } +} - return k; +static struct bkey_i *bch2_btree_journal_peek_prev(struct btree_trans *trans, + struct btree_iter *iter, + struct bpos end_pos) +{ + struct btree_path *path = btree_iter_path(trans, iter); + + return bch2_journal_keys_peek_prev_min(trans->c, iter->btree_id, + path->level, + path->pos, + end_pos, + &iter->journal_idx); +} + +static noinline +void btree_trans_peek_prev_journal(struct btree_trans *trans, + struct btree_iter *iter, + struct bkey_s_c *k) +{ + struct btree_path *path = btree_iter_path(trans, iter); + struct bkey_i *next_journal = + bch2_btree_journal_peek_prev(trans, iter, + k->k ? k->k->p : path_l(path)->b->key.k.p); + + if (next_journal) { + iter->k = next_journal->k; + *k = bkey_i_to_s_c(next_journal); + } } /* @@ -2154,8 +2213,7 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos struct bkey_s_c k; int ret; - bch2_trans_verify_not_in_restart(trans); - bch2_trans_verify_not_unlocked(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); if ((iter->flags & BTREE_ITER_key_cache_fill) && bpos_eq(iter->pos, pos)) @@ -2181,13 +2239,17 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos if (unlikely(ret)) return bkey_s_c_err(ret); - btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path); - k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u); - if (k.k && !bkey_err(k)) { - iter->k = u; - k.k = &iter->k; - } + if (!k.k) + return k; + + if ((iter->flags & BTREE_ITER_all_snapshots) && + !bpos_eq(pos, k.k->p)) + return bkey_s_c_null; + + iter->k = u; + k.k = &iter->k; + btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path); return k; } @@ -2201,8 +2263,6 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp bch2_btree_iter_verify(iter); while (1) { - struct btree_path_level *l; - iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key, iter->flags & BTREE_ITER_intent, btree_iter_ip_allocated(iter)); @@ -2212,17 +2272,17 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp /* ensure that iter->k is consistent with iter->pos: */ bch2_btree_iter_set_pos(iter, iter->pos); k = bkey_s_c_err(ret); - goto out; + break; } struct btree_path *path = btree_iter_path(trans, iter); - l = path_l(path); + struct btree_path_level *l = path_l(path); if (unlikely(!l->b)) { /* No btree nodes at requested level: */ bch2_btree_iter_set_pos(iter, SPOS_MAX); k = bkey_s_c_null; - goto out; + break; } btree_path_set_should_be_locked(trans, path); @@ -2233,15 +2293,14 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp k.k && (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) { k = k2; - ret = bkey_err(k); - if (ret) { + if (bkey_err(k)) { bch2_btree_iter_set_pos(iter, iter->pos); - goto out; + break; } } if (unlikely(iter->flags & BTREE_ITER_with_journal)) - k = btree_trans_peek_journal(trans, iter, k); + btree_trans_peek_journal(trans, iter, &k); if (unlikely((iter->flags & BTREE_ITER_with_updates) && trans->nr_updates)) @@ -2270,42 +2329,46 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp /* End of btree: */ bch2_btree_iter_set_pos(iter, SPOS_MAX); k = bkey_s_c_null; - goto out; + break; } } -out: - bch2_btree_iter_verify(iter); + bch2_btree_iter_verify(iter); return k; } /** - * bch2_btree_iter_peek_upto() - returns first key greater than or equal to + * bch2_btree_iter_peek_max() - returns first key greater than or equal to * iterator's current position * @iter: iterator to peek from * @end: search limit: returns keys less than or equal to @end * * Returns: key if found, or an error extractable with bkey_err(). */ -struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end) +struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *iter, struct bpos end) { struct btree_trans *trans = iter->trans; struct bpos search_key = btree_iter_search_key(iter); struct bkey_s_c k; - struct bpos iter_pos; + struct bpos iter_pos = iter->pos; int ret; - bch2_trans_verify_not_unlocked(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); + bch2_btree_iter_verify_entry_exit(iter); EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bkey_eq(end, POS_MAX)); + ret = trans_maybe_inject_restart(trans, _RET_IP_); + if (unlikely(ret)) { + k = bkey_s_c_err(ret); + goto out_no_locked; + } + if (iter->update_path) { bch2_path_put_nokeep(trans, iter->update_path, iter->flags & BTREE_ITER_intent); iter->update_path = 0; } - bch2_btree_iter_verify_entry_exit(iter); - while (1) { k = __bch2_btree_iter_peek(iter, search_key); if (unlikely(!k.k)) @@ -2313,75 +2376,75 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e if (unlikely(bkey_err(k))) goto out_no_locked; - /* - * We need to check against @end before FILTER_SNAPSHOTS because - * if we get to a different inode that requested we might be - * seeing keys for a different snapshot tree that will all be - * filtered out. - * - * But we can't do the full check here, because bkey_start_pos() - * isn't monotonically increasing before FILTER_SNAPSHOTS, and - * that's what we check against in extents mode: - */ - if (unlikely(!(iter->flags & BTREE_ITER_is_extents) - ? bkey_gt(k.k->p, end) - : k.k->p.inode > end.inode)) - goto end; + if (iter->flags & BTREE_ITER_filter_snapshots) { + /* + * We need to check against @end before FILTER_SNAPSHOTS because + * if we get to a different inode that requested we might be + * seeing keys for a different snapshot tree that will all be + * filtered out. + * + * But we can't do the full check here, because bkey_start_pos() + * isn't monotonically increasing before FILTER_SNAPSHOTS, and + * that's what we check against in extents mode: + */ + if (unlikely(!(iter->flags & BTREE_ITER_is_extents) + ? bkey_gt(k.k->p, end) + : k.k->p.inode > end.inode)) + goto end; + + if (iter->update_path && + !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) { + bch2_path_put_nokeep(trans, iter->update_path, + iter->flags & BTREE_ITER_intent); + iter->update_path = 0; + } - if (iter->update_path && - !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) { - bch2_path_put_nokeep(trans, iter->update_path, - iter->flags & BTREE_ITER_intent); - iter->update_path = 0; - } + if ((iter->flags & BTREE_ITER_intent) && + !(iter->flags & BTREE_ITER_is_extents) && + !iter->update_path) { + struct bpos pos = k.k->p; - if ((iter->flags & BTREE_ITER_filter_snapshots) && - (iter->flags & BTREE_ITER_intent) && - !(iter->flags & BTREE_ITER_is_extents) && - !iter->update_path) { - struct bpos pos = k.k->p; + if (pos.snapshot < iter->snapshot) { + search_key = bpos_successor(k.k->p); + continue; + } - if (pos.snapshot < iter->snapshot) { - search_key = bpos_successor(k.k->p); - continue; - } + pos.snapshot = iter->snapshot; - pos.snapshot = iter->snapshot; + /* + * advance, same as on exit for iter->path, but only up + * to snapshot + */ + __btree_path_get(trans, trans->paths + iter->path, iter->flags & BTREE_ITER_intent); + iter->update_path = iter->path; + + iter->update_path = bch2_btree_path_set_pos(trans, + iter->update_path, pos, + iter->flags & BTREE_ITER_intent, + _THIS_IP_); + ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags); + if (unlikely(ret)) { + k = bkey_s_c_err(ret); + goto out_no_locked; + } + } /* - * advance, same as on exit for iter->path, but only up - * to snapshot + * We can never have a key in a leaf node at POS_MAX, so + * we don't have to check these successor() calls: */ - __btree_path_get(trans, trans->paths + iter->path, iter->flags & BTREE_ITER_intent); - iter->update_path = iter->path; - - iter->update_path = bch2_btree_path_set_pos(trans, - iter->update_path, pos, - iter->flags & BTREE_ITER_intent, - _THIS_IP_); - ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags); - if (unlikely(ret)) { - k = bkey_s_c_err(ret); - goto out_no_locked; + if (!bch2_snapshot_is_ancestor(trans->c, + iter->snapshot, + k.k->p.snapshot)) { + search_key = bpos_successor(k.k->p); + continue; } - } - - /* - * We can never have a key in a leaf node at POS_MAX, so - * we don't have to check these successor() calls: - */ - if ((iter->flags & BTREE_ITER_filter_snapshots) && - !bch2_snapshot_is_ancestor(trans->c, - iter->snapshot, - k.k->p.snapshot)) { - search_key = bpos_successor(k.k->p); - continue; - } - if (bkey_whiteout(k.k) && - !(iter->flags & BTREE_ITER_all_snapshots)) { - search_key = bkey_successor(iter, k.k->p); - continue; + if (bkey_whiteout(k.k) && + !(iter->flags & BTREE_ITER_key_cache_fill)) { + search_key = bkey_successor(iter, k.k->p); + continue; + } } /* @@ -2451,127 +2514,210 @@ struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter) return bch2_btree_iter_peek(iter); } -/** - * bch2_btree_iter_peek_prev() - returns first key less than or equal to - * iterator's current position - * @iter: iterator to peek from - * - * Returns: key if found, or an error extractable with bkey_err(). - */ -struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) +static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, struct bpos search_key) { struct btree_trans *trans = iter->trans; - struct bpos search_key = iter->pos; - struct bkey_s_c k; - struct bkey saved_k; - const struct bch_val *saved_v; - btree_path_idx_t saved_path = 0; - int ret; - - bch2_trans_verify_not_unlocked(trans); - EBUG_ON(btree_iter_path(trans, iter)->cached || - btree_iter_path(trans, iter)->level); - - if (iter->flags & BTREE_ITER_with_journal) - return bkey_s_c_err(-BCH_ERR_btree_iter_with_journal_not_supported); + struct bkey_s_c k, k2; bch2_btree_iter_verify(iter); - bch2_btree_iter_verify_entry_exit(iter); - - if (iter->flags & BTREE_ITER_filter_snapshots) - search_key.snapshot = U32_MAX; while (1) { iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key, - iter->flags & BTREE_ITER_intent, - btree_iter_ip_allocated(iter)); + iter->flags & BTREE_ITER_intent, + btree_iter_ip_allocated(iter)); - ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); + int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags); if (unlikely(ret)) { /* ensure that iter->k is consistent with iter->pos: */ bch2_btree_iter_set_pos(iter, iter->pos); k = bkey_s_c_err(ret); - goto out_no_locked; + break; } struct btree_path *path = btree_iter_path(trans, iter); + struct btree_path_level *l = path_l(path); + + if (unlikely(!l->b)) { + /* No btree nodes at requested level: */ + bch2_btree_iter_set_pos(iter, SPOS_MAX); + k = bkey_s_c_null; + break; + } + + btree_path_set_should_be_locked(trans, path); + + k = btree_path_level_peek_all(trans->c, l, &iter->k); + if (!k.k || bpos_gt(k.k->p, search_key)) { + k = btree_path_level_prev(trans, path, l, &iter->k); - k = btree_path_level_peek(trans, path, &path->l[0], &iter->k); - if (!k.k || - ((iter->flags & BTREE_ITER_is_extents) - ? bpos_ge(bkey_start_pos(k.k), search_key) - : bpos_gt(k.k->p, search_key))) - k = btree_path_level_prev(trans, path, &path->l[0], &iter->k); + BUG_ON(k.k && bpos_gt(k.k->p, search_key)); + } + + if (unlikely(iter->flags & BTREE_ITER_with_key_cache) && + k.k && + (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) { + k = k2; + if (bkey_err(k2)) { + bch2_btree_iter_set_pos(iter, iter->pos); + break; + } + } + + if (unlikely(iter->flags & BTREE_ITER_with_journal)) + btree_trans_peek_prev_journal(trans, iter, &k); if (unlikely((iter->flags & BTREE_ITER_with_updates) && trans->nr_updates)) bch2_btree_trans_peek_prev_updates(trans, iter, &k); - if (likely(k.k)) { - if (iter->flags & BTREE_ITER_filter_snapshots) { - if (k.k->p.snapshot == iter->snapshot) - goto got_key; + if (likely(k.k && !bkey_deleted(k.k))) { + break; + } else if (k.k) { + search_key = bpos_predecessor(k.k->p); + } else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) { + /* Advance to previous leaf node: */ + search_key = bpos_predecessor(path->l[0].b->data->min_key); + } else { + /* Start of btree: */ + bch2_btree_iter_set_pos(iter, POS_MIN); + k = bkey_s_c_null; + break; + } + } + bch2_btree_iter_verify(iter); + return k; +} + +/** + * bch2_btree_iter_peek_prev_min() - returns first key less than or equal to + * iterator's current position + * @iter: iterator to peek from + * @end: search limit: returns keys greater than or equal to @end + * + * Returns: key if found, or an error extractable with bkey_err(). + */ +struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bpos end) +{ + if ((iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots)) && + !bkey_eq(iter->pos, POS_MAX)) { + /* + * bkey_start_pos(), for extents, is not monotonically + * increasing until after filtering for snapshots: + * + * Thus, for extents we need to search forward until we find a + * real visible extents - easiest to just use peek_slot() (which + * internally uses peek() for extents) + */ + struct bkey_s_c k = bch2_btree_iter_peek_slot(iter); + if (bkey_err(k)) + return k; + + if (!bkey_deleted(k.k) && + (!(iter->flags & BTREE_ITER_is_extents) || + bkey_lt(bkey_start_pos(k.k), iter->pos))) + return k; + } + + struct btree_trans *trans = iter->trans; + struct bpos search_key = iter->pos; + struct bkey_s_c k; + btree_path_idx_t saved_path = 0; + + bch2_trans_verify_not_unlocked_or_in_restart(trans); + bch2_btree_iter_verify_entry_exit(iter); + EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bpos_eq(end, POS_MIN)); + + int ret = trans_maybe_inject_restart(trans, _RET_IP_); + if (unlikely(ret)) { + k = bkey_s_c_err(ret); + goto out_no_locked; + } + + while (1) { + k = __bch2_btree_iter_peek_prev(iter, search_key); + if (unlikely(!k.k)) + goto end; + if (unlikely(bkey_err(k))) + goto out_no_locked; + + if (iter->flags & BTREE_ITER_filter_snapshots) { + struct btree_path *s = saved_path ? trans->paths + saved_path : NULL; + if (s && bpos_lt(k.k->p, SPOS(s->pos.inode, s->pos.offset, iter->snapshot))) { + /* + * If we have a saved candidate, and we're past + * the last possible snapshot overwrite, return + * it: + */ + bch2_path_put_nokeep(trans, iter->path, + iter->flags & BTREE_ITER_intent); + iter->path = saved_path; + saved_path = 0; + k = bch2_btree_path_peek_slot(btree_iter_path(trans, iter), &iter->k); + break; + } + + /* + * We need to check against @end before FILTER_SNAPSHOTS because + * if we get to a different inode that requested we might be + * seeing keys for a different snapshot tree that will all be + * filtered out. + */ + if (unlikely(bkey_lt(k.k->p, end))) + goto end; + + if (!bch2_snapshot_is_ancestor(trans->c, iter->snapshot, k.k->p.snapshot)) { + search_key = bpos_predecessor(k.k->p); + continue; + } + + if (k.k->p.snapshot != iter->snapshot) { /* - * If we have a saved candidate, and we're no - * longer at the same _key_ (not pos), return - * that candidate + * Have a key visible in iter->snapshot, but + * might have overwrites: - save it and keep + * searching. Unless it's a whiteout - then drop + * our previous saved candidate: */ - if (saved_path && !bkey_eq(k.k->p, saved_k.p)) { - bch2_path_put_nokeep(trans, iter->path, - iter->flags & BTREE_ITER_intent); - iter->path = saved_path; + if (saved_path) { + bch2_path_put_nokeep(trans, saved_path, + iter->flags & BTREE_ITER_intent); saved_path = 0; - iter->k = saved_k; - k.v = saved_v; - goto got_key; } - if (bch2_snapshot_is_ancestor(trans->c, - iter->snapshot, - k.k->p.snapshot)) { - if (saved_path) - bch2_path_put_nokeep(trans, saved_path, - iter->flags & BTREE_ITER_intent); + if (!bkey_whiteout(k.k)) { saved_path = btree_path_clone(trans, iter->path, iter->flags & BTREE_ITER_intent, _THIS_IP_); - path = btree_iter_path(trans, iter); - trace_btree_path_save_pos(trans, path, trans->paths + saved_path); - saved_k = *k.k; - saved_v = k.v; + trace_btree_path_save_pos(trans, + trans->paths + iter->path, + trans->paths + saved_path); } search_key = bpos_predecessor(k.k->p); continue; } -got_key: - if (bkey_whiteout(k.k) && - !(iter->flags & BTREE_ITER_all_snapshots)) { + + if (bkey_whiteout(k.k)) { search_key = bkey_predecessor(iter, k.k->p); - if (iter->flags & BTREE_ITER_filter_snapshots) - search_key.snapshot = U32_MAX; + search_key.snapshot = U32_MAX; continue; } - - btree_path_set_should_be_locked(trans, path); - break; - } else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) { - /* Advance to previous leaf node: */ - search_key = bpos_predecessor(path->l[0].b->data->min_key); - } else { - /* Start of btree: */ - bch2_btree_iter_set_pos(iter, POS_MIN); - k = bkey_s_c_null; - goto out_no_locked; } - } - EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos)); + EBUG_ON(iter->flags & BTREE_ITER_all_snapshots ? bpos_gt(k.k->p, iter->pos) : + iter->flags & BTREE_ITER_is_extents ? bkey_ge(bkey_start_pos(k.k), iter->pos) : + bkey_gt(k.k->p, iter->pos)); + + if (unlikely(iter->flags & BTREE_ITER_all_snapshots ? bpos_lt(k.k->p, end) : + iter->flags & BTREE_ITER_is_extents ? bkey_le(k.k->p, end) : + bkey_lt(k.k->p, end))) + goto end; + + break; + } /* Extents can straddle iter->pos: */ - if (bkey_lt(k.k->p, iter->pos)) - iter->pos = k.k->p; + iter->pos = bpos_min(iter->pos, k.k->p);; if (iter->flags & BTREE_ITER_filter_snapshots) iter->pos.snapshot = iter->snapshot; @@ -2581,8 +2727,11 @@ out_no_locked: bch2_btree_iter_verify_entry_exit(iter); bch2_btree_iter_verify(iter); - return k; +end: + bch2_btree_iter_set_pos(iter, end); + k = bkey_s_c_null; + goto out_no_locked; } /** @@ -2607,11 +2756,17 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) struct bkey_s_c k; int ret; - bch2_trans_verify_not_unlocked(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); bch2_btree_iter_verify(iter); bch2_btree_iter_verify_entry_exit(iter); EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache)); + ret = trans_maybe_inject_restart(trans, _RET_IP_); + if (unlikely(ret)) { + k = bkey_s_c_err(ret); + goto out_no_locked; + } + /* extents can't span inode numbers: */ if ((iter->flags & BTREE_ITER_is_extents) && unlikely(iter->pos.offset == KEY_OFFSET_MAX)) { @@ -2632,6 +2787,10 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) goto out_no_locked; } + struct btree_path *path = btree_iter_path(trans, iter); + if (unlikely(!btree_path_node(path, path->level))) + return bkey_s_c_null; + if ((iter->flags & BTREE_ITER_cached) || !(iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots))) { k = bkey_s_c_null; @@ -2658,6 +2817,11 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) k = bch2_btree_path_peek_slot(trans->paths + iter->path, &iter->k); if (unlikely(!k.k)) goto out_no_locked; + + if (unlikely(k.k->type == KEY_TYPE_whiteout && + (iter->flags & BTREE_ITER_filter_snapshots) && + !(iter->flags & BTREE_ITER_key_cache_fill))) + iter->k.type = KEY_TYPE_deleted; } else { struct bpos next; struct bpos end = iter->pos; @@ -2671,7 +2835,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) struct btree_iter iter2; bch2_trans_copy_iter(&iter2, iter); - k = bch2_btree_iter_peek_upto(&iter2, end); + k = bch2_btree_iter_peek_max(&iter2, end); if (k.k && !bkey_err(k)) { swap(iter->key_cache_path, iter2.key_cache_path); @@ -2682,7 +2846,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) } else { struct bpos pos = iter->pos; - k = bch2_btree_iter_peek_upto(iter, end); + k = bch2_btree_iter_peek_max(iter, end); if (unlikely(bkey_err(k))) bch2_btree_iter_set_pos(iter, pos); else @@ -2902,7 +3066,7 @@ void bch2_trans_iter_init_outlined(struct btree_trans *trans, unsigned flags) { bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0, - bch2_btree_iter_flags(trans, btree_id, flags), + bch2_btree_iter_flags(trans, btree_id, 0, flags), _RET_IP_); } @@ -2918,8 +3082,11 @@ void bch2_trans_node_iter_init(struct btree_trans *trans, flags |= BTREE_ITER_snapshot_field; flags |= BTREE_ITER_all_snapshots; + if (!depth && btree_id_cached(trans->c, btree_id)) + flags |= BTREE_ITER_with_key_cache; + bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth, - __bch2_btree_iter_flags(trans, btree_id, flags), + bch2_btree_iter_flags(trans, btree_id, depth, flags), _RET_IP_); iter->min_depth = depth; @@ -2957,6 +3124,10 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size) WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX); + ret = trans_maybe_inject_restart(trans, _RET_IP_); + if (ret) + return ERR_PTR(ret); + struct btree_transaction_stats *s = btree_trans_stats(trans); s->max_mem = max(s->max_mem, new_bytes); @@ -3014,7 +3185,8 @@ out_new_mem: if (old_bytes) { trace_and_count(c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes); - return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced)); + return ERR_PTR(btree_trans_restart_ip(trans, + BCH_ERR_transaction_restart_mem_realloced, _RET_IP_)); } out_change_top: p = trans->mem + trans->mem_top; @@ -3122,14 +3294,22 @@ u32 bch2_trans_begin(struct btree_trans *trans) trans->last_begin_ip = _RET_IP_; - trans_set_locked(trans); +#ifdef CONFIG_BCACHEFS_INJECT_TRANSACTION_RESTARTS + if (trans->restarted) { + trans->restart_count_this_trans++; + } else { + trans->restart_count_this_trans = 0; + } +#endif + + trans_set_locked(trans, false); if (trans->restarted) { bch2_btree_path_traverse_all(trans); trans->notrace_relock_fail = false; } - bch2_trans_verify_not_unlocked(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); return trans->restart_count; } @@ -3228,7 +3408,7 @@ got_trans: trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier); trans->srcu_lock_time = jiffies; trans->srcu_held = true; - trans_set_locked(trans); + trans_set_locked(trans, false); closure_init_stack_release(&trans->ref); return trans; @@ -3262,6 +3442,9 @@ void bch2_trans_put(struct btree_trans *trans) { struct bch_fs *c = trans->c; + if (trans->restarted) + bch2_trans_in_restart_error(trans); + bch2_trans_unlock(trans); trans_for_each_update(trans, i) @@ -3285,6 +3468,10 @@ void bch2_trans_put(struct btree_trans *trans) closure_return_sync(&trans->ref); trans->locking_wait.task = NULL; +#ifdef CONFIG_BCACHEFS_DEBUG + darray_exit(&trans->last_restarted_trace); +#endif + unsigned long *paths_allocated = trans->paths_allocated; trans->paths_allocated = NULL; trans->paths = NULL; @@ -3338,8 +3525,9 @@ bch2_btree_bkey_cached_common_to_text(struct printbuf *out, pid = owner ? owner->pid : 0; rcu_read_unlock(); - prt_printf(out, "\t%px %c l=%u %s:", b, b->cached ? 'c' : 'b', - b->level, bch2_btree_id_str(b->btree_id)); + prt_printf(out, "\t%px %c ", b, b->cached ? 'c' : 'b'); + bch2_btree_id_to_text(out, b->btree_id); + prt_printf(out, " l=%u:", b->level); bch2_bpos_to_text(out, btree_node_pos(b)); prt_printf(out, "\t locks %u:%u:%u held by pid %u", @@ -3378,11 +3566,11 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans) if (!path->nodes_locked) continue; - prt_printf(out, " path %u %c l=%u %s:", - idx, - path->cached ? 'c' : 'b', - path->level, - bch2_btree_id_str(path->btree_id)); + prt_printf(out, " path %u %c ", + idx, + path->cached ? 'c' : 'b'); + bch2_btree_id_to_text(out, path->btree_id); + prt_printf(out, " l=%u:", path->level); bch2_bpos_to_text(out, path->pos); prt_newline(out); @@ -3488,7 +3676,7 @@ int bch2_fs_btree_iter_init(struct bch_fs *c) #ifdef CONFIG_LOCKDEP fs_reclaim_acquire(GFP_KERNEL); struct btree_trans *trans = bch2_trans_get(c); - trans_set_locked(trans); + trans_set_locked(trans, false); bch2_trans_put(trans); fs_reclaim_release(GFP_KERNEL); #endif diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h index 0bda054f80d7..b96157f3dc9c 100644 --- a/fs/bcachefs/btree_iter.h +++ b/fs/bcachefs/btree_iter.h @@ -23,6 +23,7 @@ static inline void __btree_path_get(struct btree_trans *trans, struct btree_path { unsigned idx = path - trans->paths; + EBUG_ON(idx >= trans->nr_paths); EBUG_ON(!test_bit(idx, trans->paths_allocated)); if (unlikely(path->ref == U8_MAX)) { bch2_dump_trans_paths_updates(trans); @@ -36,6 +37,7 @@ static inline void __btree_path_get(struct btree_trans *trans, struct btree_path static inline bool __btree_path_put(struct btree_trans *trans, struct btree_path *path, bool intent) { + EBUG_ON(path - trans->paths >= trans->nr_paths); EBUG_ON(!test_bit(path - trans->paths, trans->paths_allocated)); EBUG_ON(!path->ref); EBUG_ON(!path->intent_ref && intent); @@ -234,12 +236,12 @@ int __must_check bch2_btree_path_traverse_one(struct btree_trans *, btree_path_idx_t, unsigned, unsigned long); -static inline void bch2_trans_verify_not_unlocked(struct btree_trans *); +static inline void bch2_trans_verify_not_unlocked_or_in_restart(struct btree_trans *); static inline int __must_check bch2_btree_path_traverse(struct btree_trans *trans, btree_path_idx_t path, unsigned flags) { - bch2_trans_verify_not_unlocked(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); if (trans->paths[path].uptodate < BTREE_ITER_NEED_RELOCK) return 0; @@ -324,38 +326,45 @@ static inline void bch2_trans_verify_not_restarted(struct btree_trans *trans, bch2_trans_restart_error(trans, restart_count); } -void __noreturn bch2_trans_in_restart_error(struct btree_trans *); +void __noreturn bch2_trans_unlocked_or_in_restart_error(struct btree_trans *); -static inline void bch2_trans_verify_not_in_restart(struct btree_trans *trans) +static inline void bch2_trans_verify_not_unlocked_or_in_restart(struct btree_trans *trans) { - if (trans->restarted) - bch2_trans_in_restart_error(trans); -} - -void __noreturn bch2_trans_unlocked_error(struct btree_trans *); - -static inline void bch2_trans_verify_not_unlocked(struct btree_trans *trans) -{ - if (!trans->locked) - bch2_trans_unlocked_error(trans); + if (trans->restarted || !trans->locked) + bch2_trans_unlocked_or_in_restart_error(trans); } __always_inline -static int btree_trans_restart_nounlock(struct btree_trans *trans, int err) +static int btree_trans_restart_ip(struct btree_trans *trans, int err, unsigned long ip) { BUG_ON(err <= 0); BUG_ON(!bch2_err_matches(-err, BCH_ERR_transaction_restart)); trans->restarted = err; - trans->last_restarted_ip = _THIS_IP_; + trans->last_restarted_ip = ip; +#ifdef CONFIG_BCACHEFS_DEBUG + darray_exit(&trans->last_restarted_trace); + bch2_save_backtrace(&trans->last_restarted_trace, current, 0, GFP_NOWAIT); +#endif return -err; } __always_inline static int btree_trans_restart(struct btree_trans *trans, int err) { - btree_trans_restart_nounlock(trans, err); - return -err; + return btree_trans_restart_ip(trans, err, _THIS_IP_); +} + +static inline int trans_maybe_inject_restart(struct btree_trans *trans, unsigned long ip) +{ +#ifdef CONFIG_BCACHEFS_INJECT_TRANSACTION_RESTARTS + if (!(ktime_get_ns() & ~(~0ULL << min(63, (10 + trans->restart_count_this_trans))))) { + trace_and_count(trans->c, trans_restart_injected, trans, ip); + return btree_trans_restart_ip(trans, + BCH_ERR_transaction_restart_fault_inject, ip); + } +#endif + return 0; } bool bch2_btree_node_upgrade(struct btree_trans *, @@ -375,6 +384,7 @@ static inline void bch2_btree_path_downgrade(struct btree_trans *trans, void bch2_trans_downgrade(struct btree_trans *); void bch2_trans_node_add(struct btree_trans *trans, struct btree_path *, struct btree *); +void bch2_trans_node_drop(struct btree_trans *trans, struct btree *); void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *); int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter); @@ -384,15 +394,21 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *); struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *); struct btree *bch2_btree_iter_next_node(struct btree_iter *); -struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *, struct bpos); +struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *, struct bpos); struct bkey_s_c bch2_btree_iter_next(struct btree_iter *); static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter) { - return bch2_btree_iter_peek_upto(iter, SPOS_MAX); + return bch2_btree_iter_peek_max(iter, SPOS_MAX); +} + +struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *, struct bpos); + +static inline struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) +{ + return bch2_btree_iter_peek_prev_min(iter, POS_MIN); } -struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *); struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *); struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *); @@ -443,10 +459,17 @@ static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 sna void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *); -static inline unsigned __bch2_btree_iter_flags(struct btree_trans *trans, - unsigned btree_id, - unsigned flags) +static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans, + unsigned btree_id, + unsigned level, + unsigned flags) { + if (level || !btree_id_cached(trans->c, btree_id)) { + flags &= ~BTREE_ITER_cached; + flags &= ~BTREE_ITER_with_key_cache; + } else if (!(flags & BTREE_ITER_cached)) + flags |= BTREE_ITER_with_key_cache; + if (!(flags & (BTREE_ITER_all_snapshots|BTREE_ITER_not_extents)) && btree_id_is_extents(btree_id)) flags |= BTREE_ITER_is_extents; @@ -465,19 +488,6 @@ static inline unsigned __bch2_btree_iter_flags(struct btree_trans *trans, return flags; } -static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans, - unsigned btree_id, - unsigned flags) -{ - if (!btree_id_cached(trans->c, btree_id)) { - flags &= ~BTREE_ITER_cached; - flags &= ~BTREE_ITER_with_key_cache; - } else if (!(flags & BTREE_ITER_cached)) - flags |= BTREE_ITER_with_key_cache; - - return __bch2_btree_iter_flags(trans, btree_id, flags); -} - static inline void bch2_trans_iter_init_common(struct btree_trans *trans, struct btree_iter *iter, unsigned btree_id, struct bpos pos, @@ -514,7 +524,7 @@ static inline void bch2_trans_iter_init(struct btree_trans *trans, if (__builtin_constant_p(btree_id) && __builtin_constant_p(flags)) bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0, - bch2_btree_iter_flags(trans, btree_id, flags), + bch2_btree_iter_flags(trans, btree_id, 0, flags), _THIS_IP_); else bch2_trans_iter_init_outlined(trans, iter, btree_id, pos, flags); @@ -593,13 +603,18 @@ static inline struct bkey_s_c bch2_bkey_get_iter(struct btree_trans *trans, bkey_s_c_to_##_type(__bch2_bkey_get_iter(_trans, _iter, \ _btree_id, _pos, _flags, KEY_TYPE_##_type)) +static inline void __bkey_val_copy(void *dst_v, unsigned dst_size, struct bkey_s_c src_k) +{ + unsigned b = min_t(unsigned, dst_size, bkey_val_bytes(src_k.k)); + memcpy(dst_v, src_k.v, b); + if (unlikely(b < dst_size)) + memset(dst_v + b, 0, dst_size - b); +} + #define bkey_val_copy(_dst_v, _src_k) \ do { \ - unsigned b = min_t(unsigned, sizeof(*_dst_v), \ - bkey_val_bytes(_src_k.k)); \ - memcpy(_dst_v, _src_k.v, b); \ - if (b < sizeof(*_dst_v)) \ - memset((void *) (_dst_v) + b, 0, sizeof(*_dst_v) - b); \ + BUILD_BUG_ON(!__typecheck(*_dst_v, *_src_k.v)); \ + __bkey_val_copy(_dst_v, sizeof(*_dst_v), _src_k.s_c); \ } while (0) static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans, @@ -608,17 +623,10 @@ static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans, unsigned val_size, void *val) { struct btree_iter iter; - struct bkey_s_c k; - int ret; - - k = __bch2_bkey_get_iter(trans, &iter, btree_id, pos, flags, type); - ret = bkey_err(k); + struct bkey_s_c k = __bch2_bkey_get_iter(trans, &iter, btree_id, pos, flags, type); + int ret = bkey_err(k); if (!ret) { - unsigned b = min_t(unsigned, bkey_val_bytes(k.k), val_size); - - memcpy(val, k.v, b); - if (unlikely(b < sizeof(*val))) - memset((void *) val + b, 0, sizeof(*val) - b); + __bkey_val_copy(val, val_size, k); bch2_trans_iter_exit(trans, &iter); } @@ -677,12 +685,12 @@ static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter, bch2_btree_iter_peek(iter); } -static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *iter, +static inline struct bkey_s_c bch2_btree_iter_peek_max_type(struct btree_iter *iter, struct bpos end, unsigned flags) { if (!(flags & BTREE_ITER_slots)) - return bch2_btree_iter_peek_upto(iter, end); + return bch2_btree_iter_peek_max(iter, end); if (bkey_gt(iter->pos, end)) return bkey_s_c_null; @@ -743,10 +751,10 @@ transaction_restart: \ if (!_ret2) \ bch2_trans_verify_not_restarted(_trans, _restart_count);\ \ - _ret2 ?: trans_was_restarted(_trans, _restart_count); \ + _ret2 ?: trans_was_restarted(_trans, _orig_restart_count); \ }) -#define for_each_btree_key_upto_continue(_trans, _iter, \ +#define for_each_btree_key_max_continue(_trans, _iter, \ _end, _flags, _k, _do) \ ({ \ struct bkey_s_c _k; \ @@ -754,7 +762,7 @@ transaction_restart: \ \ do { \ _ret3 = lockrestart_do(_trans, ({ \ - (_k) = bch2_btree_iter_peek_upto_type(&(_iter), \ + (_k) = bch2_btree_iter_peek_max_type(&(_iter), \ _end, (_flags)); \ if (!(_k).k) \ break; \ @@ -768,9 +776,9 @@ transaction_restart: \ }) #define for_each_btree_key_continue(_trans, _iter, _flags, _k, _do) \ - for_each_btree_key_upto_continue(_trans, _iter, SPOS_MAX, _flags, _k, _do) + for_each_btree_key_max_continue(_trans, _iter, SPOS_MAX, _flags, _k, _do) -#define for_each_btree_key_upto(_trans, _iter, _btree_id, \ +#define for_each_btree_key_max(_trans, _iter, _btree_id, \ _start, _end, _flags, _k, _do) \ ({ \ bch2_trans_begin(trans); \ @@ -779,12 +787,12 @@ transaction_restart: \ bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ (_start), (_flags)); \ \ - for_each_btree_key_upto_continue(_trans, _iter, _end, _flags, _k, _do);\ + for_each_btree_key_max_continue(_trans, _iter, _end, _flags, _k, _do);\ }) #define for_each_btree_key(_trans, _iter, _btree_id, \ _start, _flags, _k, _do) \ - for_each_btree_key_upto(_trans, _iter, _btree_id, _start, \ + for_each_btree_key_max(_trans, _iter, _btree_id, _start, \ SPOS_MAX, _flags, _k, _do) #define for_each_btree_key_reverse(_trans, _iter, _btree_id, \ @@ -828,33 +836,33 @@ transaction_restart: \ (_do) ?: bch2_trans_commit(_trans, (_disk_res),\ (_journal_seq), (_commit_flags))) -#define for_each_btree_key_upto_commit(_trans, _iter, _btree_id, \ +#define for_each_btree_key_max_commit(_trans, _iter, _btree_id, \ _start, _end, _iter_flags, _k, \ _disk_res, _journal_seq, _commit_flags,\ _do) \ - for_each_btree_key_upto(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\ + for_each_btree_key_max(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\ (_do) ?: bch2_trans_commit(_trans, (_disk_res),\ (_journal_seq), (_commit_flags))) struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *); -#define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, \ +#define for_each_btree_key_max_norestart(_trans, _iter, _btree_id, \ _start, _end, _flags, _k, _ret) \ for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ (_start), (_flags)); \ - (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags),\ + (_k) = bch2_btree_iter_peek_max_type(&(_iter), _end, _flags),\ !((_ret) = bkey_err(_k)) && (_k).k; \ bch2_btree_iter_advance(&(_iter))) -#define for_each_btree_key_upto_continue_norestart(_iter, _end, _flags, _k, _ret)\ +#define for_each_btree_key_max_continue_norestart(_iter, _end, _flags, _k, _ret)\ for (; \ - (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags), \ + (_k) = bch2_btree_iter_peek_max_type(&(_iter), _end, _flags), \ !((_ret) = bkey_err(_k)) && (_k).k; \ bch2_btree_iter_advance(&(_iter))) #define for_each_btree_key_norestart(_trans, _iter, _btree_id, \ _start, _flags, _k, _ret) \ - for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, _start,\ + for_each_btree_key_max_norestart(_trans, _iter, _btree_id, _start,\ SPOS_MAX, _flags, _k, _ret) #define for_each_btree_key_reverse_norestart(_trans, _iter, _btree_id, \ @@ -866,7 +874,7 @@ struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *); bch2_btree_iter_rewind(&(_iter))) #define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \ - for_each_btree_key_upto_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret) + for_each_btree_key_max_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret) /* * This should not be used in a fastpath, without first trying _do in diff --git a/fs/bcachefs/btree_journal_iter.c b/fs/bcachefs/btree_journal_iter.c index c1657182c275..6d25e3f85ce8 100644 --- a/fs/bcachefs/btree_journal_iter.c +++ b/fs/bcachefs/btree_journal_iter.c @@ -16,6 +16,17 @@ * operations for the regular btree iter code to use: */ +static inline size_t pos_to_idx(struct journal_keys *keys, size_t pos) +{ + size_t gap_size = keys->size - keys->nr; + + BUG_ON(pos >= keys->gap && pos < keys->gap + gap_size); + + if (pos >= keys->gap) + pos -= gap_size; + return pos; +} + static inline size_t idx_to_pos(struct journal_keys *keys, size_t idx) { size_t gap_size = keys->size - keys->nr; @@ -61,7 +72,7 @@ static size_t bch2_journal_key_search(struct journal_keys *keys, } /* Returns first non-overwritten key >= search key: */ -struct bkey_i *bch2_journal_keys_peek_upto(struct bch_fs *c, enum btree_id btree_id, +struct bkey_i *bch2_journal_keys_peek_max(struct bch_fs *c, enum btree_id btree_id, unsigned level, struct bpos pos, struct bpos end_pos, size_t *idx) { @@ -84,27 +95,92 @@ search: } } + struct bkey_i *ret = NULL; + rcu_read_lock(); /* for overwritten_ranges */ + while ((k = *idx < keys->nr ? idx_to_key(keys, *idx) : NULL)) { if (__journal_key_cmp(btree_id, level, end_pos, k) < 0) - return NULL; + break; if (k->overwritten) { - (*idx)++; + if (k->overwritten_range) + *idx = rcu_dereference(k->overwritten_range)->end; + else + *idx += 1; continue; } - if (__journal_key_cmp(btree_id, level, pos, k) <= 0) - return k->k; + if (__journal_key_cmp(btree_id, level, pos, k) <= 0) { + ret = k->k; + break; + } (*idx)++; iters++; if (iters == 10) { *idx = 0; + rcu_read_unlock(); goto search; } } - return NULL; + rcu_read_unlock(); + return ret; +} + +struct bkey_i *bch2_journal_keys_peek_prev_min(struct bch_fs *c, enum btree_id btree_id, + unsigned level, struct bpos pos, + struct bpos end_pos, size_t *idx) +{ + struct journal_keys *keys = &c->journal_keys; + unsigned iters = 0; + struct journal_key *k; + + BUG_ON(*idx > keys->nr); +search: + if (!*idx) + *idx = __bch2_journal_key_search(keys, btree_id, level, pos); + + while (*idx && + __journal_key_cmp(btree_id, level, end_pos, idx_to_key(keys, *idx - 1)) <= 0) { + (*idx)++; + iters++; + if (iters == 10) { + *idx = 0; + goto search; + } + } + + struct bkey_i *ret = NULL; + rcu_read_lock(); /* for overwritten_ranges */ + + while ((k = *idx < keys->nr ? idx_to_key(keys, *idx) : NULL)) { + if (__journal_key_cmp(btree_id, level, end_pos, k) > 0) + break; + + if (k->overwritten) { + if (k->overwritten_range) + *idx = rcu_dereference(k->overwritten_range)->start - 1; + else + *idx -= 1; + continue; + } + + if (__journal_key_cmp(btree_id, level, pos, k) >= 0) { + ret = k->k; + break; + } + + --(*idx); + iters++; + if (iters == 10) { + *idx = 0; + goto search; + } + } + + rcu_read_unlock(); + return ret; } struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *c, enum btree_id btree_id, @@ -112,11 +188,12 @@ struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *c, enum btree_id btree { size_t idx = 0; - return bch2_journal_keys_peek_upto(c, btree_id, level, pos, pos, &idx); + return bch2_journal_keys_peek_max(c, btree_id, level, pos, pos, &idx); } static void journal_iter_verify(struct journal_iter *iter) { +#ifdef CONFIG_BCACHEFS_DEBUG struct journal_keys *keys = iter->keys; size_t gap_size = keys->size - keys->nr; @@ -126,10 +203,10 @@ static void journal_iter_verify(struct journal_iter *iter) if (iter->idx < keys->size) { struct journal_key *k = keys->data + iter->idx; - int cmp = cmp_int(k->btree_id, iter->btree_id) ?: - cmp_int(k->level, iter->level); - BUG_ON(cmp < 0); + int cmp = __journal_key_btree_cmp(iter->btree_id, iter->level, k); + BUG_ON(cmp > 0); } +#endif } static void journal_iters_fix(struct bch_fs *c) @@ -182,7 +259,7 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id, * Ensure these keys are done last by journal replay, to unblock * journal reclaim: */ - .journal_seq = U32_MAX, + .journal_seq = U64_MAX, }; struct journal_keys *keys = &c->journal_keys; size_t idx = bch2_journal_key_search(keys, id, level, k->k.p); @@ -290,6 +367,68 @@ bool bch2_key_deleted_in_journal(struct btree_trans *trans, enum btree_id btree, bkey_deleted(&keys->data[idx].k->k)); } +static void __bch2_journal_key_overwritten(struct journal_keys *keys, size_t pos) +{ + struct journal_key *k = keys->data + pos; + size_t idx = pos_to_idx(keys, pos); + + k->overwritten = true; + + struct journal_key *prev = idx > 0 ? keys->data + idx_to_pos(keys, idx - 1) : NULL; + struct journal_key *next = idx + 1 < keys->nr ? keys->data + idx_to_pos(keys, idx + 1) : NULL; + + bool prev_overwritten = prev && prev->overwritten; + bool next_overwritten = next && next->overwritten; + + struct journal_key_range_overwritten *prev_range = + prev_overwritten ? prev->overwritten_range : NULL; + struct journal_key_range_overwritten *next_range = + next_overwritten ? next->overwritten_range : NULL; + + BUG_ON(prev_range && prev_range->end != idx); + BUG_ON(next_range && next_range->start != idx + 1); + + if (prev_range && next_range) { + prev_range->end = next_range->end; + + keys->data[pos].overwritten_range = prev_range; + for (size_t i = next_range->start; i < next_range->end; i++) { + struct journal_key *ip = keys->data + idx_to_pos(keys, i); + BUG_ON(ip->overwritten_range != next_range); + ip->overwritten_range = prev_range; + } + + kfree_rcu_mightsleep(next_range); + } else if (prev_range) { + prev_range->end++; + k->overwritten_range = prev_range; + if (next_overwritten) { + prev_range->end++; + next->overwritten_range = prev_range; + } + } else if (next_range) { + next_range->start--; + k->overwritten_range = next_range; + if (prev_overwritten) { + next_range->start--; + prev->overwritten_range = next_range; + } + } else if (prev_overwritten || next_overwritten) { + struct journal_key_range_overwritten *r = kmalloc(sizeof(*r), GFP_KERNEL); + if (!r) + return; + + r->start = idx - (size_t) prev_overwritten; + r->end = idx + 1 + (size_t) next_overwritten; + + rcu_assign_pointer(k->overwritten_range, r); + if (prev_overwritten) + prev->overwritten_range = r; + if (next_overwritten) + next->overwritten_range = r; + } +} + void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree, unsigned level, struct bpos pos) { @@ -299,8 +438,12 @@ void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree, if (idx < keys->size && keys->data[idx].btree_id == btree && keys->data[idx].level == level && - bpos_eq(keys->data[idx].k->k.p, pos)) - keys->data[idx].overwritten = true; + bpos_eq(keys->data[idx].k->k.p, pos) && + !keys->data[idx].overwritten) { + mutex_lock(&keys->overwrite_lock); + __bch2_journal_key_overwritten(keys, idx); + mutex_unlock(&keys->overwrite_lock); + } } static void bch2_journal_iter_advance(struct journal_iter *iter) @@ -314,24 +457,32 @@ static void bch2_journal_iter_advance(struct journal_iter *iter) static struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter) { + struct bkey_s_c ret = bkey_s_c_null; + journal_iter_verify(iter); + rcu_read_lock(); while (iter->idx < iter->keys->size) { struct journal_key *k = iter->keys->data + iter->idx; - int cmp = cmp_int(k->btree_id, iter->btree_id) ?: - cmp_int(k->level, iter->level); - if (cmp > 0) + int cmp = __journal_key_btree_cmp(iter->btree_id, iter->level, k); + if (cmp < 0) break; BUG_ON(cmp); - if (!k->overwritten) - return bkey_i_to_s_c(k->k); + if (!k->overwritten) { + ret = bkey_i_to_s_c(k->k); + break; + } - bch2_journal_iter_advance(iter); + if (k->overwritten_range) + iter->idx = idx_to_pos(iter->keys, rcu_dereference(k->overwritten_range)->end); + else + bch2_journal_iter_advance(iter); } + rcu_read_unlock(); - return bkey_s_c_null; + return ret; } static void bch2_journal_iter_exit(struct journal_iter *iter) @@ -382,6 +533,7 @@ static void btree_and_journal_iter_prefetch(struct btree_and_journal_iter *_iter : (level > 1 ? 1 : 16); iter.prefetch = false; + iter.fail_if_too_many_whiteouts = true; bch2_bkey_buf_init(&tmp); while (nr--) { @@ -400,6 +552,7 @@ static void btree_and_journal_iter_prefetch(struct btree_and_journal_iter *_iter struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter) { struct bkey_s_c btree_k, journal_k = bkey_s_c_null, ret; + size_t iters = 0; if (iter->prefetch && iter->journal.level) btree_and_journal_iter_prefetch(iter); @@ -407,6 +560,11 @@ again: if (iter->at_end) return bkey_s_c_null; + iters++; + + if (iters > 20 && iter->fail_if_too_many_whiteouts) + return bkey_s_c_null; + while ((btree_k = bch2_journal_iter_peek_btree(iter)).k && bpos_lt(btree_k.k->p, iter->pos)) bch2_journal_iter_advance_btree(iter); @@ -481,16 +639,6 @@ void bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *trans, /* sort and dedup all keys in the journal: */ -void bch2_journal_entries_free(struct bch_fs *c) -{ - struct journal_replay **i; - struct genradix_iter iter; - - genradix_for_each(&c->journal_entries, iter, i) - kvfree(*i); - genradix_free(&c->journal_entries); -} - /* * When keys compare equal, oldest compares first: */ @@ -515,15 +663,26 @@ void bch2_journal_keys_put(struct bch_fs *c) move_gap(keys, keys->nr); - darray_for_each(*keys, i) + darray_for_each(*keys, i) { + if (i->overwritten_range && + (i == &darray_last(*keys) || + i->overwritten_range != i[1].overwritten_range)) + kfree(i->overwritten_range); + if (i->allocated) kfree(i->k); + } kvfree(keys->data); keys->data = NULL; keys->nr = keys->gap = keys->size = 0; - bch2_journal_entries_free(c); + struct journal_replay **i; + struct genradix_iter iter; + + genradix_for_each(&c->journal_entries, iter, i) + kvfree(*i); + genradix_free(&c->journal_entries); } static void __journal_keys_sort(struct journal_keys *keys) @@ -628,8 +787,20 @@ void bch2_journal_keys_dump(struct bch_fs *c) darray_for_each(*keys, i) { printbuf_reset(&buf); + prt_printf(&buf, "btree="); + bch2_btree_id_to_text(&buf, i->btree_id); + prt_printf(&buf, " l=%u ", i->level); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k)); - pr_err("%s l=%u %s", bch2_btree_id_str(i->btree_id), i->level, buf.buf); + pr_err("%s", buf.buf); } printbuf_exit(&buf); } + +void bch2_fs_journal_keys_init(struct bch_fs *c) +{ + struct journal_keys *keys = &c->journal_keys; + + atomic_set(&keys->ref, 1); + keys->initial_ref_held = true; + mutex_init(&keys->overwrite_lock); +} diff --git a/fs/bcachefs/btree_journal_iter.h b/fs/bcachefs/btree_journal_iter.h index 1653de9d609b..2a3082919b8d 100644 --- a/fs/bcachefs/btree_journal_iter.h +++ b/fs/bcachefs/btree_journal_iter.h @@ -26,16 +26,24 @@ struct btree_and_journal_iter { struct bpos pos; bool at_end; bool prefetch; + bool fail_if_too_many_whiteouts; }; +static inline int __journal_key_btree_cmp(enum btree_id l_btree_id, + unsigned l_level, + const struct journal_key *r) +{ + return -cmp_int(l_level, r->level) ?: + cmp_int(l_btree_id, r->btree_id); +} + static inline int __journal_key_cmp(enum btree_id l_btree_id, unsigned l_level, struct bpos l_pos, const struct journal_key *r) { - return (cmp_int(l_btree_id, r->btree_id) ?: - cmp_int(l_level, r->level) ?: - bpos_cmp(l_pos, r->k->k.p)); + return __journal_key_btree_cmp(l_btree_id, l_level, r) ?: + bpos_cmp(l_pos, r->k->k.p); } static inline int journal_key_cmp(const struct journal_key *l, const struct journal_key *r) @@ -43,7 +51,9 @@ static inline int journal_key_cmp(const struct journal_key *l, const struct jour return __journal_key_cmp(l->btree_id, l->level, l->k->k.p, r); } -struct bkey_i *bch2_journal_keys_peek_upto(struct bch_fs *, enum btree_id, +struct bkey_i *bch2_journal_keys_peek_max(struct bch_fs *, enum btree_id, + unsigned, struct bpos, struct bpos, size_t *); +struct bkey_i *bch2_journal_keys_peek_prev_min(struct bch_fs *, enum btree_id, unsigned, struct bpos, struct bpos, size_t *); struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *, enum btree_id, unsigned, struct bpos); @@ -79,8 +89,6 @@ static inline void bch2_journal_keys_put_initial(struct bch_fs *c) c->journal_keys.initial_ref_held = false; } -void bch2_journal_entries_free(struct bch_fs *); - int bch2_journal_keys_sort(struct bch_fs *); void bch2_shoot_down_journal_keys(struct bch_fs *, enum btree_id, @@ -89,4 +97,6 @@ void bch2_shoot_down_journal_keys(struct bch_fs *, enum btree_id, void bch2_journal_keys_dump(struct bch_fs *); +void bch2_fs_journal_keys_init(struct bch_fs *); + #endif /* _BCACHEFS_BTREE_JOURNAL_ITER_H */ diff --git a/fs/bcachefs/btree_journal_iter_types.h b/fs/bcachefs/btree_journal_iter_types.h new file mode 100644 index 000000000000..8b773823704f --- /dev/null +++ b/fs/bcachefs/btree_journal_iter_types.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BCACHEFS_BTREE_JOURNAL_ITER_TYPES_H +#define _BCACHEFS_BTREE_JOURNAL_ITER_TYPES_H + +struct journal_key_range_overwritten { + size_t start, end; +}; + +struct journal_key { + u64 journal_seq; + u32 journal_offset; + enum btree_id btree_id:8; + unsigned level:8; + bool allocated; + bool overwritten; + struct journal_key_range_overwritten __rcu * + overwritten_range; + struct bkey_i *k; +}; + +struct journal_keys { + /* must match layout in darray_types.h */ + size_t nr, size; + struct journal_key *data; + /* + * Gap buffer: instead of all the empty space in the array being at the + * end of the buffer - from @nr to @size - the empty space is at @gap. + * This means that sequential insertions are O(n) instead of O(n^2). + */ + size_t gap; + atomic_t ref; + bool initial_ref_held; + struct mutex overwrite_lock; +}; + +#endif /* _BCACHEFS_BTREE_JOURNAL_ITER_TYPES_H */ diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index 244610b1d0b5..1821f40c161a 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -197,7 +197,9 @@ out: return ck; } -static int btree_key_cache_create(struct btree_trans *trans, struct btree_path *path, +static int btree_key_cache_create(struct btree_trans *trans, + struct btree_path *path, + struct btree_path *ck_path, struct bkey_s_c k) { struct bch_fs *c = trans->c; @@ -217,7 +219,7 @@ static int btree_key_cache_create(struct btree_trans *trans, struct btree_path * key_u64s = min(256U, (key_u64s * 3) / 2); key_u64s = roundup_pow_of_two(key_u64s); - struct bkey_cached *ck = bkey_cached_alloc(trans, path, key_u64s); + struct bkey_cached *ck = bkey_cached_alloc(trans, ck_path, key_u64s); int ret = PTR_ERR_OR_ZERO(ck); if (ret) return ret; @@ -226,19 +228,19 @@ static int btree_key_cache_create(struct btree_trans *trans, struct btree_path * ck = bkey_cached_reuse(bc); if (unlikely(!ck)) { bch_err(c, "error allocating memory for key cache item, btree %s", - bch2_btree_id_str(path->btree_id)); + bch2_btree_id_str(ck_path->btree_id)); return -BCH_ERR_ENOMEM_btree_key_cache_create; } } ck->c.level = 0; - ck->c.btree_id = path->btree_id; - ck->key.btree_id = path->btree_id; - ck->key.pos = path->pos; + ck->c.btree_id = ck_path->btree_id; + ck->key.btree_id = ck_path->btree_id; + ck->key.pos = ck_path->pos; ck->flags = 1U << BKEY_CACHED_ACCESSED; if (unlikely(key_u64s > ck->u64s)) { - mark_btree_node_locked_noreset(path, 0, BTREE_NODE_UNLOCKED); + mark_btree_node_locked_noreset(ck_path, 0, BTREE_NODE_UNLOCKED); struct bkey_i *new_k = allocate_dropping_locks(trans, ret, kmalloc(key_u64s * sizeof(u64), _gfp)); @@ -258,22 +260,29 @@ static int btree_key_cache_create(struct btree_trans *trans, struct btree_path * bkey_reassemble(ck->k, k); + ret = bch2_btree_node_lock_write(trans, path, &path_l(path)->b->c); + if (unlikely(ret)) + goto err; + ret = rhashtable_lookup_insert_fast(&bc->table, &ck->hash, bch2_btree_key_cache_params); + + bch2_btree_node_unlock_write(trans, path, path_l(path)->b); + if (unlikely(ret)) /* raced with another fill? */ goto err; atomic_long_inc(&bc->nr_keys); six_unlock_write(&ck->c.lock); - enum six_lock_type lock_want = __btree_lock_want(path, 0); + enum six_lock_type lock_want = __btree_lock_want(ck_path, 0); if (lock_want == SIX_LOCK_read) six_lock_downgrade(&ck->c.lock); - btree_path_cached_set(trans, path, ck, (enum btree_node_locked_type) lock_want); - path->uptodate = BTREE_ITER_UPTODATE; + btree_path_cached_set(trans, ck_path, ck, (enum btree_node_locked_type) lock_want); + ck_path->uptodate = BTREE_ITER_UPTODATE; return 0; err: bkey_cached_free(bc, ck); - mark_btree_node_locked_noreset(path, 0, BTREE_NODE_UNLOCKED); + mark_btree_node_locked_noreset(ck_path, 0, BTREE_NODE_UNLOCKED); return ret; } @@ -283,7 +292,7 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans, unsigned flags) { if (flags & BTREE_ITER_cached_nofill) { - ck_path->uptodate = BTREE_ITER_UPTODATE; + ck_path->l[0].b = NULL; return 0; } @@ -293,6 +302,7 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans, int ret; bch2_trans_iter_init(trans, &iter, ck_path->btree_id, ck_path->pos, + BTREE_ITER_intent| BTREE_ITER_key_cache_fill| BTREE_ITER_cached_nofill); iter.flags &= ~BTREE_ITER_with_journal; @@ -306,9 +316,19 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans, if (unlikely(ret)) goto out; - ret = btree_key_cache_create(trans, ck_path, k); + ret = btree_key_cache_create(trans, btree_iter_path(trans, &iter), ck_path, k); if (ret) goto err; + + if (trace_key_cache_fill_enabled()) { + struct printbuf buf = PRINTBUF; + + bch2_bpos_to_text(&buf, ck_path->pos); + prt_char(&buf, ' '); + bch2_bkey_val_to_text(&buf, trans->c, k); + trace_key_cache_fill(trans, buf.buf); + printbuf_exit(&buf); + } out: /* We're not likely to need this iterator again: */ bch2_set_btree_iter_dontneed(&iter); @@ -424,8 +444,15 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans, !test_bit(JOURNAL_space_low, &c->journal.flags)) commit_flags |= BCH_TRANS_COMMIT_no_journal_res; - ret = bch2_btree_iter_traverse(&b_iter) ?: - bch2_trans_update(trans, &b_iter, ck->k, + struct bkey_s_c btree_k = bch2_btree_iter_peek_slot(&b_iter); + ret = bkey_err(btree_k); + if (ret) + goto err; + + /* * Check that we're not violating cache coherency rules: */ + BUG_ON(bkey_deleted(btree_k.k)); + + ret = bch2_trans_update(trans, &b_iter, ck->k, BTREE_UPDATE_key_cache_reclaim| BTREE_UPDATE_internal_snapshot_node| BTREE_TRIGGER_norun) ?: @@ -433,7 +460,7 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans, BCH_TRANS_COMMIT_no_check_rw| BCH_TRANS_COMMIT_no_enospc| commit_flags); - +err: bch2_fs_fatal_err_on(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart) && !bch2_err_matches(ret, BCH_ERR_journal_reclaim_would_deadlock) && @@ -586,8 +613,18 @@ void bch2_btree_key_cache_drop(struct btree_trans *trans, bkey_cached_free(bc, ck); mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED); - btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); - path->should_be_locked = false; + + struct btree_path *path2; + unsigned i; + trans_for_each_path(trans, path2, i) + if (path2->l[0].b == (void *) ck) { + __bch2_btree_path_unlock(trans, path2); + path2->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_drop); + path2->should_be_locked = false; + btree_path_set_dirty(path2, BTREE_ITER_NEED_TRAVERSE); + } + + bch2_trans_verify_locks(trans); } static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink, @@ -711,7 +748,6 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc) rcu_read_unlock(); mutex_lock(&bc->table.mutex); mutex_unlock(&bc->table.mutex); - rcu_read_lock(); continue; } for (i = 0; i < tbl->size; i++) diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c index efe2a007b482..10b805a60f52 100644 --- a/fs/bcachefs/btree_locking.c +++ b/fs/bcachefs/btree_locking.c @@ -109,6 +109,12 @@ static noinline void lock_graph_pop_all(struct lock_graph *g) lock_graph_up(g); } +static noinline void lock_graph_pop_from(struct lock_graph *g, struct trans_waiting_for_lock *i) +{ + while (g->g + g->nr > i) + lock_graph_up(g); +} + static void __lock_graph_down(struct lock_graph *g, struct btree_trans *trans) { g->g[g->nr++] = (struct trans_waiting_for_lock) { @@ -124,15 +130,20 @@ static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans) __lock_graph_down(g, trans); } -static bool lock_graph_remove_non_waiters(struct lock_graph *g) +static bool lock_graph_remove_non_waiters(struct lock_graph *g, + struct trans_waiting_for_lock *from) { struct trans_waiting_for_lock *i; - for (i = g->g + 1; i < g->g + g->nr; i++) + if (from->trans->locking != from->node_want) { + lock_graph_pop_from(g, from); + return true; + } + + for (i = from + 1; i < g->g + g->nr; i++) if (i->trans->locking != i->node_want || i->trans->locking_wait.start_time != i[-1].lock_start_time) { - while (g->g + g->nr > i) - lock_graph_up(g); + lock_graph_pop_from(g, i); return true; } @@ -179,13 +190,14 @@ static int btree_trans_abort_preference(struct btree_trans *trans) return 3; } -static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle) +static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle, + struct trans_waiting_for_lock *from) { struct trans_waiting_for_lock *i, *abort = NULL; unsigned best = 0, pref; int ret; - if (lock_graph_remove_non_waiters(g)) + if (lock_graph_remove_non_waiters(g, from)) return 0; /* Only checking, for debugfs: */ @@ -195,7 +207,7 @@ static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle) goto out; } - for (i = g->g; i < g->g + g->nr; i++) { + for (i = from; i < g->g + g->nr; i++) { pref = btree_trans_abort_preference(i->trans); if (pref > best) { abort = i; @@ -229,8 +241,9 @@ static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle) ret = abort_lock(g, abort); out: if (ret) - while (g->nr) - lock_graph_up(g); + lock_graph_pop_all(g); + else + lock_graph_pop_from(g, abort); return ret; } @@ -243,7 +256,7 @@ static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans, for (i = g->g; i < g->g + g->nr; i++) if (i->trans == trans) { closure_put(&trans->ref); - return break_cycle(g, cycle); + return break_cycle(g, cycle, i); } if (g->nr == ARRAY_SIZE(g->g)) { @@ -252,8 +265,7 @@ static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans, if (orig_trans->lock_may_not_fail) return 0; - while (g->nr) - lock_graph_up(g); + lock_graph_pop_all(g); if (cycle) return 0; @@ -281,7 +293,7 @@ int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle) g.nr = 0; - if (trans->lock_must_abort) { + if (trans->lock_must_abort && !trans->lock_may_not_fail) { if (cycle) return -1; @@ -336,7 +348,7 @@ next: * structures - which means it can't be blocked * waiting on a lock: */ - if (!lock_graph_remove_non_waiters(&g)) { + if (!lock_graph_remove_non_waiters(&g, g.g)) { /* * If lock_graph_remove_non_waiters() * didn't do anything, it must be @@ -512,7 +524,6 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans, struct btree_path *path, unsigned level) { struct btree *b = path->l[level].b; - struct six_lock_count count = bch2_btree_node_lock_counts(trans, path, &b->c, level); if (!is_btree_node(path, level)) return false; @@ -536,24 +547,11 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans, if (race_fault()) return false; - if (btree_node_locked(path, level)) { - bool ret; - - six_lock_readers_add(&b->c.lock, -count.n[SIX_LOCK_read]); - ret = six_lock_tryupgrade(&b->c.lock); - six_lock_readers_add(&b->c.lock, count.n[SIX_LOCK_read]); - - if (ret) - goto success; - } else { - if (six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq)) - goto success; - } + if (btree_node_locked(path, level) + ? six_lock_tryupgrade(&b->c.lock) + : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq)) + goto success; - /* - * Do we already have an intent lock via another path? If so, just bump - * lock count: - */ if (btree_node_lock_seq_matches(path, b, level) && btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) { btree_node_unlock(trans, path, level); @@ -782,7 +780,7 @@ static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace) return bch2_trans_relock_fail(trans, path, &f, trace); } - trans_set_locked(trans); + trans_set_locked(trans, true); out: bch2_trans_verify_locks(trans); return 0; @@ -818,6 +816,17 @@ void bch2_trans_unlock_long(struct btree_trans *trans) bch2_trans_srcu_unlock(trans); } +void bch2_trans_unlock_write(struct btree_trans *trans) +{ + struct btree_path *path; + unsigned i; + + trans_for_each_path(trans, path, i) + for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) + if (btree_node_write_locked(path, l)) + bch2_btree_node_unlock_write(trans, path, path->l[l].b); +} + int __bch2_trans_mutex_lock(struct btree_trans *trans, struct mutex *lock) { @@ -856,6 +865,9 @@ void bch2_btree_path_verify_locks(struct btree_path *path) (want == BTREE_NODE_UNLOCKED || have != BTREE_NODE_WRITE_LOCKED) && want != have); + + BUG_ON(btree_node_locked(path, l) && + path->l[l].lock_seq != six_lock_seq(&path->l[l].b->c.lock)); } } diff --git a/fs/bcachefs/btree_locking.h b/fs/bcachefs/btree_locking.h index 7c07f9fa9add..b54ef48eb8cc 100644 --- a/fs/bcachefs/btree_locking.h +++ b/fs/bcachefs/btree_locking.h @@ -16,6 +16,7 @@ void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags); void bch2_trans_unlock_noassert(struct btree_trans *); +void bch2_trans_unlock_write(struct btree_trans *); static inline bool is_btree_node(struct btree_path *path, unsigned l) { @@ -75,13 +76,6 @@ static inline void mark_btree_node_locked_noreset(struct btree_path *path, path->nodes_locked |= (type + 1) << (level << 1); } -static inline void mark_btree_node_unlocked(struct btree_path *path, - unsigned level) -{ - EBUG_ON(btree_node_write_locked(path, level)); - mark_btree_node_locked_noreset(path, level, BTREE_NODE_UNLOCKED); -} - static inline void mark_btree_node_locked(struct btree_trans *trans, struct btree_path *path, unsigned level, @@ -124,19 +118,25 @@ static void btree_trans_lock_hold_time_update(struct btree_trans *trans, /* unlock: */ +void bch2_btree_node_unlock_write(struct btree_trans *, + struct btree_path *, struct btree *); + static inline void btree_node_unlock(struct btree_trans *trans, struct btree_path *path, unsigned level) { int lock_type = btree_node_locked_type(path, level); EBUG_ON(level >= BTREE_MAX_DEPTH); - EBUG_ON(lock_type == BTREE_NODE_WRITE_LOCKED); if (lock_type != BTREE_NODE_UNLOCKED) { + if (unlikely(lock_type == BTREE_NODE_WRITE_LOCKED)) { + bch2_btree_node_unlock_write(trans, path, path->l[level].b); + lock_type = BTREE_NODE_INTENT_LOCKED; + } six_unlock_type(&path->l[level].b->c.lock, lock_type); btree_trans_lock_hold_time_update(trans, path, level); + mark_btree_node_locked_noreset(path, level, BTREE_NODE_UNLOCKED); } - mark_btree_node_unlocked(path, level); } static inline int btree_path_lowest_level_locked(struct btree_path *path) @@ -163,35 +163,39 @@ static inline void __bch2_btree_path_unlock(struct btree_trans *trans, * succeed: */ static inline void +__bch2_btree_node_unlock_write(struct btree_trans *trans, struct btree *b) +{ + if (!b->c.lock.write_lock_recurse) { + struct btree_path *linked; + unsigned i; + + trans_for_each_path_with_node(trans, b, linked, i) + linked->l[b->c.level].lock_seq++; + } + + six_unlock_write(&b->c.lock); +} + +static inline void bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path, struct btree *b) { - struct btree_path *linked; - unsigned i; - EBUG_ON(path->l[b->c.level].b != b); EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock)); EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write); mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED); - - trans_for_each_path_with_node(trans, b, linked, i) - linked->l[b->c.level].lock_seq++; - - six_unlock_write(&b->c.lock); + __bch2_btree_node_unlock_write(trans, b); } -void bch2_btree_node_unlock_write(struct btree_trans *, - struct btree_path *, struct btree *); - int bch2_six_check_for_deadlock(struct six_lock *lock, void *p); /* lock: */ -static inline void trans_set_locked(struct btree_trans *trans) +static inline void trans_set_locked(struct btree_trans *trans, bool try) { if (!trans->locked) { - lock_acquire_exclusive(&trans->dep_map, 0, 0, NULL, _THIS_IP_); + lock_acquire_exclusive(&trans->dep_map, 0, try, NULL, _THIS_IP_); trans->locked = true; trans->last_unlock_ip = 0; @@ -282,7 +286,7 @@ static inline int btree_node_lock(struct btree_trans *trans, int ret = 0; EBUG_ON(level >= BTREE_MAX_DEPTH); - bch2_trans_verify_not_unlocked(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); if (likely(six_trylock_type(&b->lock, type)) || btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) || diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c index 30131c3bdd97..a7f06deee13c 100644 --- a/fs/bcachefs/btree_node_scan.c +++ b/fs/bcachefs/btree_node_scan.c @@ -12,6 +12,7 @@ #include "recovery_passes.h" #include <linux/kthread.h> +#include <linux/min_heap.h> #include <linux/sort.h> struct find_btree_nodes_worker { @@ -22,17 +23,15 @@ struct find_btree_nodes_worker { static void found_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struct found_btree_node *n) { - prt_printf(out, "%s l=%u seq=%u journal_seq=%llu cookie=%llx ", - bch2_btree_id_str(n->btree_id), n->level, n->seq, - n->journal_seq, n->cookie); + bch2_btree_id_level_to_text(out, n->btree_id, n->level); + prt_printf(out, " seq=%u journal_seq=%llu cookie=%llx ", + n->seq, n->journal_seq, n->cookie); bch2_bpos_to_text(out, n->min_key); prt_str(out, "-"); bch2_bpos_to_text(out, n->max_key); if (n->range_updated) prt_str(out, " range updated"); - if (n->overwritten) - prt_str(out, " overwritten"); for (unsigned i = 0; i < n->nr_ptrs; i++) { prt_char(out, ' '); @@ -140,6 +139,24 @@ static int found_btree_node_cmp_pos(const void *_l, const void *_r) -found_btree_node_cmp_time(l, r); } +static inline bool found_btree_node_cmp_pos_less(const void *l, const void *r, void *arg) +{ + return found_btree_node_cmp_pos(l, r) < 0; +} + +static inline void found_btree_node_swap(void *_l, void *_r, void *arg) +{ + struct found_btree_node *l = _l; + struct found_btree_node *r = _r; + + swap(*l, *r); +} + +static const struct min_heap_callbacks found_btree_node_heap_cbs = { + .less = found_btree_node_cmp_pos_less, + .swp = found_btree_node_swap, +}; + static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca, struct bio *bio, struct btree_node *bn, u64 offset) { @@ -159,6 +176,9 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca, return; if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(&bn->keys))) { + if (!c->chacha20) + return; + struct nonce nonce = btree_nonce(&bn->keys, 0); unsigned bytes = (void *) &bn->keys - (void *) &bn->flags; @@ -292,55 +312,48 @@ err: return f->ret ?: ret; } -static void bubble_up(struct found_btree_node *n, struct found_btree_node *end) +static bool nodes_overlap(const struct found_btree_node *l, + const struct found_btree_node *r) { - while (n + 1 < end && - found_btree_node_cmp_pos(n, n + 1) > 0) { - swap(n[0], n[1]); - n++; - } + return (l->btree_id == r->btree_id && + l->level == r->level && + bpos_gt(l->max_key, r->min_key)); } static int handle_overwrites(struct bch_fs *c, - struct found_btree_node *start, - struct found_btree_node *end) + struct found_btree_node *l, + found_btree_nodes *nodes_heap) { - struct found_btree_node *n; -again: - for (n = start + 1; - n < end && - n->btree_id == start->btree_id && - n->level == start->level && - bpos_lt(n->min_key, start->max_key); - n++) { - int cmp = found_btree_node_cmp_time(start, n); + struct found_btree_node *r; + + while ((r = min_heap_peek(nodes_heap)) && + nodes_overlap(l, r)) { + int cmp = found_btree_node_cmp_time(l, r); if (cmp > 0) { - if (bpos_cmp(start->max_key, n->max_key) >= 0) - n->overwritten = true; + if (bpos_cmp(l->max_key, r->max_key) >= 0) + min_heap_pop(nodes_heap, &found_btree_node_heap_cbs, NULL); else { - n->range_updated = true; - n->min_key = bpos_successor(start->max_key); - n->range_updated = true; - bubble_up(n, end); - goto again; + r->range_updated = true; + r->min_key = bpos_successor(l->max_key); + r->range_updated = true; + min_heap_sift_down(nodes_heap, 0, &found_btree_node_heap_cbs, NULL); } } else if (cmp < 0) { - BUG_ON(bpos_cmp(n->min_key, start->min_key) <= 0); + BUG_ON(bpos_eq(l->min_key, r->min_key)); - start->max_key = bpos_predecessor(n->min_key); - start->range_updated = true; - } else if (n->level) { - n->overwritten = true; + l->max_key = bpos_predecessor(r->min_key); + l->range_updated = true; + } else if (r->level) { + min_heap_pop(nodes_heap, &found_btree_node_heap_cbs, NULL); } else { - if (bpos_cmp(start->max_key, n->max_key) >= 0) - n->overwritten = true; + if (bpos_cmp(l->max_key, r->max_key) >= 0) + min_heap_pop(nodes_heap, &found_btree_node_heap_cbs, NULL); else { - n->range_updated = true; - n->min_key = bpos_successor(start->max_key); - n->range_updated = true; - bubble_up(n, end); - goto again; + r->range_updated = true; + r->min_key = bpos_successor(l->max_key); + r->range_updated = true; + min_heap_sift_down(nodes_heap, 0, &found_btree_node_heap_cbs, NULL); } } } @@ -352,6 +365,7 @@ int bch2_scan_for_btree_nodes(struct bch_fs *c) { struct find_btree_nodes *f = &c->found_btree_nodes; struct printbuf buf = PRINTBUF; + found_btree_nodes nodes_heap = {}; size_t dst; int ret = 0; @@ -406,29 +420,57 @@ int bch2_scan_for_btree_nodes(struct bch_fs *c) bch2_print_string_as_lines(KERN_INFO, buf.buf); } - dst = 0; - darray_for_each(f->nodes, i) { - if (i->overwritten) - continue; + swap(nodes_heap, f->nodes); + + { + /* darray must have same layout as a heap */ + min_heap_char real_heap; + BUILD_BUG_ON(sizeof(nodes_heap.nr) != sizeof(real_heap.nr)); + BUILD_BUG_ON(sizeof(nodes_heap.size) != sizeof(real_heap.size)); + BUILD_BUG_ON(offsetof(found_btree_nodes, nr) != offsetof(min_heap_char, nr)); + BUILD_BUG_ON(offsetof(found_btree_nodes, size) != offsetof(min_heap_char, size)); + } + + min_heapify_all(&nodes_heap, &found_btree_node_heap_cbs, NULL); - ret = handle_overwrites(c, i, &darray_top(f->nodes)); + if (nodes_heap.nr) { + ret = darray_push(&f->nodes, *min_heap_peek(&nodes_heap)); if (ret) goto err; - BUG_ON(i->overwritten); - f->nodes.data[dst++] = *i; + min_heap_pop(&nodes_heap, &found_btree_node_heap_cbs, NULL); } - f->nodes.nr = dst; - if (c->opts.verbose) { + while (true) { + ret = handle_overwrites(c, &darray_last(f->nodes), &nodes_heap); + if (ret) + goto err; + + if (!nodes_heap.nr) + break; + + ret = darray_push(&f->nodes, *min_heap_peek(&nodes_heap)); + if (ret) + goto err; + + min_heap_pop(&nodes_heap, &found_btree_node_heap_cbs, NULL); + } + + for (struct found_btree_node *n = f->nodes.data; n < &darray_last(f->nodes); n++) + BUG_ON(nodes_overlap(n, n + 1)); + + if (0 && c->opts.verbose) { printbuf_reset(&buf); prt_printf(&buf, "%s: nodes found after overwrites:\n", __func__); found_btree_nodes_to_text(&buf, c, f->nodes); bch2_print_string_as_lines(KERN_INFO, buf.buf); + } else { + bch_info(c, "btree node scan found %zu nodes after overwrites", f->nodes.nr); } eytzinger0_sort(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_pos, NULL); err: + darray_exit(&nodes_heap); printbuf_exit(&buf); return ret; } @@ -499,7 +541,9 @@ int bch2_get_scanned_nodes(struct bch_fs *c, enum btree_id btree, if (c->opts.verbose) { struct printbuf buf = PRINTBUF; - prt_printf(&buf, "recovering %s l=%u ", bch2_btree_id_str(btree), level); + prt_str(&buf, "recovery "); + bch2_btree_id_level_to_text(&buf, btree, level); + prt_str(&buf, " "); bch2_bpos_to_text(&buf, node_min); prt_str(&buf, " - "); bch2_bpos_to_text(&buf, node_max); @@ -533,7 +577,12 @@ int bch2_get_scanned_nodes(struct bch_fs *c, enum btree_id btree, bch_verbose(c, "%s(): recovering %s", __func__, buf.buf); printbuf_exit(&buf); - BUG_ON(bch2_bkey_validate(c, bkey_i_to_s_c(&tmp.k), BKEY_TYPE_btree, 0)); + BUG_ON(bch2_bkey_validate(c, bkey_i_to_s_c(&tmp.k), + (struct bkey_validate_context) { + .from = BKEY_VALIDATE_btree_node, + .level = level + 1, + .btree = btree, + })); ret = bch2_journal_key_insert(c, btree, level + 1, &tmp.k); if (ret) diff --git a/fs/bcachefs/btree_node_scan_types.h b/fs/bcachefs/btree_node_scan_types.h index b6c36c45d0be..2811b6857c97 100644 --- a/fs/bcachefs/btree_node_scan_types.h +++ b/fs/bcachefs/btree_node_scan_types.h @@ -6,7 +6,6 @@ struct found_btree_node { bool range_updated:1; - bool overwritten:1; u8 btree_id; u8 level; unsigned sectors_written; diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c index 9bf471fa4361..c4f524b2ca9a 100644 --- a/fs/bcachefs/btree_trans_commit.c +++ b/fs/bcachefs/btree_trans_commit.c @@ -133,7 +133,7 @@ static inline int bch2_trans_lock_write(struct btree_trans *trans) return 0; } -static inline void bch2_trans_unlock_write(struct btree_trans *trans) +static inline void bch2_trans_unlock_updates_write(struct btree_trans *trans) { if (likely(trans->write_locked)) { trans_for_each_update(trans, i) @@ -249,7 +249,7 @@ static int __btree_node_flush(struct journal *j, struct journal_entry_pin *pin, new |= 1 << BTREE_NODE_need_write; } while (!try_cmpxchg(&b->flags, &old, new)); - btree_node_write_if_need(c, b, SIX_LOCK_read); + btree_node_write_if_need(trans, b, SIX_LOCK_read); six_unlock_read(&b->c.lock); bch2_trans_put(trans); @@ -348,7 +348,7 @@ static __always_inline int bch2_trans_journal_res_get(struct btree_trans *trans, unsigned flags) { return bch2_journal_res_get(&trans->c->journal, &trans->journal_res, - trans->journal_u64s, flags); + trans->journal_u64s, flags, trans); } #define JSET_ENTRY_LOG_U64s 4 @@ -384,7 +384,7 @@ btree_key_can_insert_cached_slowpath(struct btree_trans *trans, unsigned flags, struct bkey_i *new_k; int ret; - bch2_trans_unlock_write(trans); + bch2_trans_unlock_updates_write(trans); bch2_trans_unlock(trans); new_k = kmalloc(new_u64s * sizeof(u64), GFP_KERNEL); @@ -479,8 +479,7 @@ static int run_one_mem_trigger(struct btree_trans *trans, old, flags); } -static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_entry *i, - bool overwrite) +static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_entry *i) { verify_update_old_key(trans, i); @@ -507,10 +506,10 @@ static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_ return bch2_key_trigger(trans, i->btree_id, i->level, old, bkey_i_to_s(i->k), BTREE_TRIGGER_insert| BTREE_TRIGGER_overwrite|flags) ?: 1; - } else if (overwrite && !i->overwrite_trigger_run) { + } else if (!i->overwrite_trigger_run) { i->overwrite_trigger_run = true; return bch2_key_trigger_old(trans, i->btree_id, i->level, old, flags) ?: 1; - } else if (!overwrite && !i->insert_trigger_run) { + } else if (!i->insert_trigger_run) { i->insert_trigger_run = true; return bch2_key_trigger_new(trans, i->btree_id, i->level, bkey_i_to_s(i->k), flags) ?: 1; } else { @@ -519,39 +518,45 @@ static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_ } static int run_btree_triggers(struct btree_trans *trans, enum btree_id btree_id, - unsigned btree_id_start) + unsigned *btree_id_updates_start) { - for (int overwrite = 1; overwrite >= 0; --overwrite) { - bool trans_trigger_run; + bool trans_trigger_run; - /* - * Running triggers will append more updates to the list of updates as - * we're walking it: - */ - do { - trans_trigger_run = false; - - for (unsigned i = btree_id_start; - i < trans->nr_updates && trans->updates[i].btree_id <= btree_id; - i++) { - if (trans->updates[i].btree_id != btree_id) - continue; + /* + * Running triggers will append more updates to the list of updates as + * we're walking it: + */ + do { + trans_trigger_run = false; - int ret = run_one_trans_trigger(trans, trans->updates + i, overwrite); - if (ret < 0) - return ret; - if (ret) - trans_trigger_run = true; + for (unsigned i = *btree_id_updates_start; + i < trans->nr_updates && trans->updates[i].btree_id <= btree_id; + i++) { + if (trans->updates[i].btree_id < btree_id) { + *btree_id_updates_start = i; + continue; } - } while (trans_trigger_run); - } + + int ret = run_one_trans_trigger(trans, trans->updates + i); + if (ret < 0) + return ret; + if (ret) + trans_trigger_run = true; + } + } while (trans_trigger_run); + + trans_for_each_update(trans, i) + BUG_ON(!(i->flags & BTREE_TRIGGER_norun) && + i->btree_id == btree_id && + btree_node_type_has_trans_triggers(i->bkey_type) && + (!i->insert_trigger_run || !i->overwrite_trigger_run)); return 0; } static int bch2_trans_commit_run_triggers(struct btree_trans *trans) { - unsigned btree_id = 0, btree_id_start = 0; + unsigned btree_id = 0, btree_id_updates_start = 0; int ret = 0; /* @@ -565,27 +570,15 @@ static int bch2_trans_commit_run_triggers(struct btree_trans *trans) if (btree_id == BTREE_ID_alloc) continue; - while (btree_id_start < trans->nr_updates && - trans->updates[btree_id_start].btree_id < btree_id) - btree_id_start++; - - ret = run_btree_triggers(trans, btree_id, btree_id_start); + ret = run_btree_triggers(trans, btree_id, &btree_id_updates_start); if (ret) return ret; } - for (unsigned idx = 0; idx < trans->nr_updates; idx++) { - struct btree_insert_entry *i = trans->updates + idx; - - if (i->btree_id > BTREE_ID_alloc) - break; - if (i->btree_id == BTREE_ID_alloc) { - ret = run_btree_triggers(trans, BTREE_ID_alloc, idx); - if (ret) - return ret; - break; - } - } + btree_id_updates_start = 0; + ret = run_btree_triggers(trans, BTREE_ID_alloc, &btree_id_updates_start); + if (ret) + return ret; #ifdef CONFIG_BCACHEFS_DEBUG trans_for_each_update(trans, i) @@ -609,14 +602,6 @@ static noinline int bch2_trans_commit_run_gc_triggers(struct btree_trans *trans) return 0; } -static struct bversion journal_pos_to_bversion(struct journal_res *res, unsigned offset) -{ - return (struct bversion) { - .hi = res->seq >> 32, - .lo = (res->seq << 32) | (res->offset + offset), - }; -} - static inline int bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags, struct btree_insert_entry **stopped_at, @@ -627,12 +612,11 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags, unsigned u64s = 0; int ret = 0; - bch2_trans_verify_not_unlocked(trans); - bch2_trans_verify_not_in_restart(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); if (race_fault()) { trace_and_count(c, trans_restart_fault_inject, trans, trace_ip); - return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_fault_inject); + return btree_trans_restart(trans, BCH_ERR_transaction_restart_fault_inject); } /* @@ -701,25 +685,14 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags, struct jset_entry *entry = trans->journal_entries; percpu_down_read(&c->mark_lock); - for (entry = trans->journal_entries; entry != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s); entry = vstruct_next(entry)) if (entry->type == BCH_JSET_ENTRY_write_buffer_keys && entry->start->k.type == KEY_TYPE_accounting) { - BUG_ON(!trans->journal_res.ref); - - struct bkey_i_accounting *a = bkey_i_to_accounting(entry->start); - - a->k.bversion = journal_pos_to_bversion(&trans->journal_res, - (u64 *) entry - (u64 *) trans->journal_entries); - BUG_ON(bversion_zero(a->k.bversion)); - - if (likely(!(flags & BCH_TRANS_COMMIT_skip_accounting_apply))) { - ret = bch2_accounting_mem_mod_locked(trans, accounting_i_to_s_c(a), BCH_ACCOUNTING_normal); - if (ret) - goto revert_fs_usage; - } + ret = bch2_accounting_trans_commit_hook(trans, bkey_i_to_accounting(entry->start), flags); + if (ret) + goto revert_fs_usage; } percpu_up_read(&c->mark_lock); @@ -739,33 +712,17 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags, goto fatal_err; } - trans_for_each_update(trans, i) { - enum bch_validate_flags invalid_flags = 0; + struct bkey_validate_context validate_context = { .from = BKEY_VALIDATE_commit }; - if (!(flags & BCH_TRANS_COMMIT_no_journal_res)) - invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit; - - ret = bch2_bkey_validate(c, bkey_i_to_s_c(i->k), - i->bkey_type, invalid_flags); - if (unlikely(ret)){ - bch2_trans_inconsistent(trans, "invalid bkey on insert from %s -> %ps\n", - trans->fn, (void *) i->ip_allocated); - goto fatal_err; - } - btree_insert_entry_checks(trans, i); - } + if (!(flags & BCH_TRANS_COMMIT_no_journal_res)) + validate_context.flags = BCH_VALIDATE_write|BCH_VALIDATE_commit; for (struct jset_entry *i = trans->journal_entries; i != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s); i = vstruct_next(i)) { - enum bch_validate_flags invalid_flags = 0; - - if (!(flags & BCH_TRANS_COMMIT_no_journal_res)) - invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit; - ret = bch2_journal_entry_validate(c, NULL, i, bcachefs_metadata_version_current, - CPU_BIG_ENDIAN, invalid_flags); + CPU_BIG_ENDIAN, validate_context); if (unlikely(ret)) { bch2_trans_inconsistent(trans, "invalid journal entry on insert from %s\n", trans->fn); @@ -773,6 +730,19 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags, } } + trans_for_each_update(trans, i) { + validate_context.level = i->level; + validate_context.btree = i->btree_id; + + ret = bch2_bkey_validate(c, bkey_i_to_s_c(i->k), validate_context); + if (unlikely(ret)){ + bch2_trans_inconsistent(trans, "invalid bkey on insert from %s -> %ps\n", + trans->fn, (void *) i->ip_allocated); + goto fatal_err; + } + btree_insert_entry_checks(trans, i); + } + if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res))) { struct journal *j = &c->journal; struct jset_entry *entry; @@ -833,13 +803,9 @@ revert_fs_usage: entry2 != entry; entry2 = vstruct_next(entry2)) if (entry2->type == BCH_JSET_ENTRY_write_buffer_keys && - entry2->start->k.type == KEY_TYPE_accounting) { - struct bkey_s_accounting a = bkey_i_to_s_accounting(entry2->start); - - bch2_accounting_neg(a); - bch2_accounting_mem_mod_locked(trans, a.c, BCH_ACCOUNTING_normal); - bch2_accounting_neg(a); - } + entry2->start->k.type == KEY_TYPE_accounting) + bch2_accounting_trans_commit_revert(trans, + bkey_i_to_accounting(entry2->start), flags); percpu_up_read(&c->mark_lock); return ret; } @@ -902,7 +868,7 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags if (!ret && unlikely(trans->journal_replay_not_finished)) bch2_drop_overwrites_from_journal(trans); - bch2_trans_unlock_write(trans); + bch2_trans_unlock_updates_write(trans); if (!ret && trans->journal_pin) bch2_journal_pin_add(&c->journal, trans->journal_res.seq, @@ -994,24 +960,6 @@ int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags, return ret; } -static noinline int -bch2_trans_commit_get_rw_cold(struct btree_trans *trans, unsigned flags) -{ - struct bch_fs *c = trans->c; - int ret; - - if (likely(!(flags & BCH_TRANS_COMMIT_lazy_rw)) || - test_bit(BCH_FS_started, &c->flags)) - return -BCH_ERR_erofs_trans_commit; - - ret = drop_locks_do(trans, bch2_fs_read_write_early(c)); - if (ret) - return ret; - - bch2_write_ref_get(c, BCH_WRITE_REF_trans); - return 0; -} - /* * This is for updates done in the early part of fsck - btree_gc - before we've * gone RW. we only add the new key to the list of keys for journal replay to @@ -1022,6 +970,8 @@ do_bch2_trans_commit_to_journal_replay(struct btree_trans *trans) { struct bch_fs *c = trans->c; + BUG_ON(current != c->recovery_task); + trans_for_each_update(trans, i) { int ret = bch2_journal_key_insert(c, i->btree_id, i->level, i->k); if (ret) @@ -1047,8 +997,11 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags) struct bch_fs *c = trans->c; int ret = 0; - bch2_trans_verify_not_unlocked(trans); - bch2_trans_verify_not_in_restart(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); + + ret = trans_maybe_inject_restart(trans, _RET_IP_); + if (unlikely(ret)) + goto out_reset; if (!trans->nr_updates && !trans->journal_entries_u64s) @@ -1058,16 +1011,13 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags) if (ret) goto out_reset; - if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) { - ret = do_bch2_trans_commit_to_journal_replay(trans); - goto out_reset; - } - if (!(flags & BCH_TRANS_COMMIT_no_check_rw) && unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_trans))) { - ret = bch2_trans_commit_get_rw_cold(trans, flags); - if (ret) - goto out_reset; + if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) + ret = do_bch2_trans_commit_to_journal_replay(trans); + else + ret = -BCH_ERR_erofs_trans_commit; + goto out_reset; } EBUG_ON(test_bit(BCH_FS_clean_shutdown, &c->flags)); @@ -1112,8 +1062,7 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags) } retry: errored_at = NULL; - bch2_trans_verify_not_unlocked(trans); - bch2_trans_verify_not_in_restart(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res))) memset(&trans->journal_res, 0, sizeof(trans->journal_res)); memset(&trans->fs_usage_delta, 0, sizeof(trans->fs_usage_delta)); diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index 4568a41fefaf..a09cbe9cd94f 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -509,10 +509,16 @@ struct btree_trans { bool notrace_relock_fail:1; enum bch_errcode restarted:16; u32 restart_count; +#ifdef CONFIG_BCACHEFS_INJECT_TRANSACTION_RESTARTS + u32 restart_count_this_trans; +#endif u64 last_begin_time; unsigned long last_begin_ip; unsigned long last_restarted_ip; +#ifdef CONFIG_BCACHEFS_DEBUG + bch_stacktrace last_restarted_trace; +#endif unsigned long last_unlock_ip; unsigned long srcu_lock_time; @@ -787,53 +793,64 @@ static inline bool btree_node_type_has_triggers(enum btree_node_type type) return BIT_ULL(type) & BTREE_NODE_TYPE_HAS_TRIGGERS; } -static inline bool btree_node_type_is_extents(enum btree_node_type type) +static inline bool btree_id_is_extents(enum btree_id btree) { const u64 mask = 0 -#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_EXTENTS)) << (nr + 1)) +#define x(name, nr, flags, ...) |((!!((flags) & BTREE_IS_extents)) << nr) BCH_BTREE_IDS() #undef x ; - return BIT_ULL(type) & mask; + return BIT_ULL(btree) & mask; } -static inline bool btree_id_is_extents(enum btree_id btree) +static inline bool btree_node_type_is_extents(enum btree_node_type type) +{ + return type != BKEY_TYPE_btree && btree_id_is_extents(type - 1); +} + +static inline bool btree_type_has_snapshots(enum btree_id btree) { - return btree_node_type_is_extents(__btree_node_type(0, btree)); + const u64 mask = 0 +#define x(name, nr, flags, ...) |((!!((flags) & BTREE_IS_snapshots)) << nr) + BCH_BTREE_IDS() +#undef x + ; + + return BIT_ULL(btree) & mask; } -static inline bool btree_type_has_snapshots(enum btree_id id) +static inline bool btree_type_has_snapshot_field(enum btree_id btree) { const u64 mask = 0 -#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_SNAPSHOTS)) << nr) +#define x(name, nr, flags, ...) |((!!((flags) & (BTREE_IS_snapshot_field|BTREE_IS_snapshots))) << nr) BCH_BTREE_IDS() #undef x ; - return BIT_ULL(id) & mask; + return BIT_ULL(btree) & mask; } -static inline bool btree_type_has_snapshot_field(enum btree_id id) +static inline bool btree_type_has_ptrs(enum btree_id btree) { const u64 mask = 0 -#define x(name, nr, flags, ...) |((!!((flags) & (BTREE_ID_SNAPSHOT_FIELD|BTREE_ID_SNAPSHOTS))) << nr) +#define x(name, nr, flags, ...) |((!!((flags) & BTREE_IS_data)) << nr) BCH_BTREE_IDS() #undef x ; - return BIT_ULL(id) & mask; + return BIT_ULL(btree) & mask; } -static inline bool btree_type_has_ptrs(enum btree_id id) +static inline bool btree_type_uses_write_buffer(enum btree_id btree) { const u64 mask = 0 -#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_DATA)) << nr) +#define x(name, nr, flags, ...) |((!!((flags) & BTREE_IS_write_buffer)) << nr) BCH_BTREE_IDS() #undef x ; - return BIT_ULL(id) & mask; + return BIT_ULL(btree) & mask; } struct btree_root { diff --git a/fs/bcachefs/btree_update.c b/fs/bcachefs/btree_update.c index 5d809e8bd170..13d794f201a5 100644 --- a/fs/bcachefs/btree_update.c +++ b/fs/bcachefs/btree_update.c @@ -144,7 +144,7 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans, !(ret = bkey_err(old_k)) && bkey_eq(old_pos, old_k.k->p)) { struct bpos whiteout_pos = - SPOS(new_pos.inode, new_pos.offset, old_k.k->p.snapshot);; + SPOS(new_pos.inode, new_pos.offset, old_k.k->p.snapshot); if (!bch2_snapshot_is_ancestor(c, old_k.k->p.snapshot, old_pos.snapshot) || snapshot_list_has_ancestor(c, &s, old_k.k->p.snapshot)) @@ -296,7 +296,7 @@ static int bch2_trans_update_extent(struct btree_trans *trans, BTREE_ITER_intent| BTREE_ITER_with_updates| BTREE_ITER_not_extents); - k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX)); + k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX)); if ((ret = bkey_err(k))) goto err; if (!k.k) @@ -323,7 +323,7 @@ static int bch2_trans_update_extent(struct btree_trans *trans, goto out; next: bch2_btree_iter_advance(&iter); - k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX)); + k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX)); if ((ret = bkey_err(k))) goto err; if (!k.k) @@ -588,12 +588,9 @@ struct jset_entry *__bch2_trans_jset_entry_alloc(struct btree_trans *trans, unsi int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter, enum btree_id btree, struct bpos end) { - struct bkey_s_c k; - int ret = 0; - - bch2_trans_iter_init(trans, iter, btree, POS_MAX, BTREE_ITER_intent); - k = bch2_btree_iter_prev(iter); - ret = bkey_err(k); + bch2_trans_iter_init(trans, iter, btree, end, BTREE_ITER_intent); + struct bkey_s_c k = bch2_btree_iter_peek_prev(iter); + int ret = bkey_err(k); if (ret) goto err; @@ -672,27 +669,19 @@ int bch2_btree_insert(struct bch_fs *c, enum btree_id id, struct bkey_i *k, bch2_btree_insert_trans(trans, id, k, iter_flags)); } -int bch2_btree_delete_extent_at(struct btree_trans *trans, struct btree_iter *iter, - unsigned len, unsigned update_flags) +int bch2_btree_delete_at(struct btree_trans *trans, + struct btree_iter *iter, unsigned update_flags) { - struct bkey_i *k; - - k = bch2_trans_kmalloc(trans, sizeof(*k)); - if (IS_ERR(k)) - return PTR_ERR(k); + struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k)); + int ret = PTR_ERR_OR_ZERO(k); + if (ret) + return ret; bkey_init(&k->k); k->k.p = iter->pos; - bch2_key_resize(&k->k, len); return bch2_trans_update(trans, iter, k, update_flags); } -int bch2_btree_delete_at(struct btree_trans *trans, - struct btree_iter *iter, unsigned update_flags) -{ - return bch2_btree_delete_extent_at(trans, iter, 0, update_flags); -} - int bch2_btree_delete(struct btree_trans *trans, enum btree_id btree, struct bpos pos, unsigned update_flags) @@ -721,7 +710,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id, int ret = 0; bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_intent); - while ((k = bch2_btree_iter_peek_upto(&iter, end)).k) { + while ((k = bch2_btree_iter_peek_max(&iter, end)).k) { struct disk_reservation disk_res = bch2_disk_reservation_init(trans->c, 0); struct bkey_i delete; @@ -794,8 +783,7 @@ int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id, return ret; } -int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree, - struct bpos pos, bool set) +int bch2_btree_bit_mod_iter(struct btree_trans *trans, struct btree_iter *iter, bool set) { struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k)); int ret = PTR_ERR_OR_ZERO(k); @@ -804,13 +792,21 @@ int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree, bkey_init(&k->k); k->k.type = set ? KEY_TYPE_set : KEY_TYPE_deleted; - k->k.p = pos; + k->k.p = iter->pos; + if (iter->flags & BTREE_ITER_is_extents) + bch2_key_resize(&k->k, 1); + return bch2_trans_update(trans, iter, k, 0); +} + +int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree, + struct bpos pos, bool set) +{ struct btree_iter iter; bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_intent); - ret = bch2_btree_iter_traverse(&iter) ?: - bch2_trans_update(trans, &iter, k, 0); + int ret = bch2_btree_iter_traverse(&iter) ?: + bch2_btree_bit_mod_iter(trans, &iter, set); bch2_trans_iter_exit(trans, &iter); return ret; } @@ -827,10 +823,17 @@ int bch2_btree_bit_mod_buffered(struct btree_trans *trans, enum btree_id btree, return bch2_trans_update_buffered(trans, btree, &k); } -static int __bch2_trans_log_msg(struct btree_trans *trans, struct printbuf *buf, unsigned u64s) +int bch2_trans_log_msg(struct btree_trans *trans, struct printbuf *buf) { + unsigned u64s = DIV_ROUND_UP(buf->pos, sizeof(u64)); + prt_chars(buf, '\0', u64s * sizeof(u64) - buf->pos); + + int ret = buf->allocation_failure ? -BCH_ERR_ENOMEM_trans_log_msg : 0; + if (ret) + return ret; + struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, jset_u64s(u64s)); - int ret = PTR_ERR_OR_ZERO(e); + ret = PTR_ERR_OR_ZERO(e); if (ret) return ret; @@ -865,9 +868,8 @@ __bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt, memcpy(l->d, buf.buf, buf.pos); c->journal.early_journal_entries.nr += jset_u64s(u64s); } else { - ret = bch2_trans_commit_do(c, NULL, NULL, - BCH_TRANS_COMMIT_lazy_rw|commit_flags, - __bch2_trans_log_msg(trans, &buf, u64s)); + ret = bch2_trans_commit_do(c, NULL, NULL, commit_flags, + bch2_trans_log_msg(trans, &buf)); } err: printbuf_exit(&buf); diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h index 70b3c989fac2..8f22ef9a7651 100644 --- a/fs/bcachefs/btree_update.h +++ b/fs/bcachefs/btree_update.h @@ -24,7 +24,6 @@ void bch2_btree_insert_key_leaf(struct btree_trans *, struct btree_path *, #define BCH_TRANS_COMMIT_FLAGS() \ x(no_enospc, "don't check for enospc") \ x(no_check_rw, "don't attempt to take a ref on c->writes") \ - x(lazy_rw, "go read-write if we haven't yet - only for use in recovery") \ x(no_journal_res, "don't take a journal reservation, instead " \ "pin journal entry referred to by trans->journal_res.seq") \ x(journal_reclaim, "operation required for journal reclaim; may return error" \ @@ -47,8 +46,6 @@ enum bch_trans_commit_flags { void bch2_trans_commit_flags_to_text(struct printbuf *, enum bch_trans_commit_flags); -int bch2_btree_delete_extent_at(struct btree_trans *, struct btree_iter *, - unsigned, unsigned); int bch2_btree_delete_at(struct btree_trans *, struct btree_iter *, unsigned); int bch2_btree_delete(struct btree_trans *, enum btree_id, struct bpos, unsigned); @@ -66,6 +63,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *, enum btree_id, int bch2_btree_delete_range(struct bch_fs *, enum btree_id, struct bpos, struct bpos, unsigned, u64 *); +int bch2_btree_bit_mod_iter(struct btree_trans *, struct btree_iter *, bool); int bch2_btree_bit_mod(struct btree_trans *, enum btree_id, struct bpos, bool); int bch2_btree_bit_mod_buffered(struct btree_trans *, enum btree_id, struct bpos, bool); @@ -161,6 +159,7 @@ void bch2_trans_commit_hook(struct btree_trans *, struct btree_trans_commit_hook *); int __bch2_trans_commit(struct btree_trans *, unsigned); +int bch2_trans_log_msg(struct btree_trans *, struct printbuf *); __printf(2, 3) int bch2_fs_log_msg(struct bch_fs *, const char *, ...); __printf(2, 3) int bch2_journal_log_msg(struct bch_fs *, const char *, ...); @@ -244,7 +243,8 @@ static inline struct bkey_i *bch2_bkey_make_mut_noupdate(struct btree_trans *tra KEY_TYPE_##_type, sizeof(struct bkey_i_##_type))) static inline struct bkey_i *__bch2_bkey_make_mut(struct btree_trans *trans, struct btree_iter *iter, - struct bkey_s_c *k, unsigned flags, + struct bkey_s_c *k, + enum btree_iter_update_trigger_flags flags, unsigned type, unsigned min_bytes) { struct bkey_i *mut = __bch2_bkey_make_mut_noupdate(trans, *k, type, min_bytes); @@ -261,8 +261,9 @@ static inline struct bkey_i *__bch2_bkey_make_mut(struct btree_trans *trans, str return mut; } -static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, struct btree_iter *iter, - struct bkey_s_c *k, unsigned flags) +static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, + struct btree_iter *iter, struct bkey_s_c *k, + enum btree_iter_update_trigger_flags flags) { return __bch2_bkey_make_mut(trans, iter, k, flags, 0, 0); } @@ -274,7 +275,8 @@ static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, struc static inline struct bkey_i *__bch2_bkey_get_mut_noupdate(struct btree_trans *trans, struct btree_iter *iter, unsigned btree_id, struct bpos pos, - unsigned flags, unsigned type, unsigned min_bytes) + enum btree_iter_update_trigger_flags flags, + unsigned type, unsigned min_bytes) { struct bkey_s_c k = __bch2_bkey_get_iter(trans, iter, btree_id, pos, flags|BTREE_ITER_intent, type); @@ -289,7 +291,7 @@ static inline struct bkey_i *__bch2_bkey_get_mut_noupdate(struct btree_trans *tr static inline struct bkey_i *bch2_bkey_get_mut_noupdate(struct btree_trans *trans, struct btree_iter *iter, unsigned btree_id, struct bpos pos, - unsigned flags) + enum btree_iter_update_trigger_flags flags) { return __bch2_bkey_get_mut_noupdate(trans, iter, btree_id, pos, flags, 0, 0); } @@ -297,7 +299,8 @@ static inline struct bkey_i *bch2_bkey_get_mut_noupdate(struct btree_trans *tran static inline struct bkey_i *__bch2_bkey_get_mut(struct btree_trans *trans, struct btree_iter *iter, unsigned btree_id, struct bpos pos, - unsigned flags, unsigned type, unsigned min_bytes) + enum btree_iter_update_trigger_flags flags, + unsigned type, unsigned min_bytes) { struct bkey_i *mut = __bch2_bkey_get_mut_noupdate(trans, iter, btree_id, pos, flags|BTREE_ITER_intent, type, min_bytes); @@ -318,7 +321,8 @@ static inline struct bkey_i *__bch2_bkey_get_mut(struct btree_trans *trans, static inline struct bkey_i *bch2_bkey_get_mut_minsize(struct btree_trans *trans, struct btree_iter *iter, unsigned btree_id, struct bpos pos, - unsigned flags, unsigned min_bytes) + enum btree_iter_update_trigger_flags flags, + unsigned min_bytes) { return __bch2_bkey_get_mut(trans, iter, btree_id, pos, flags, 0, min_bytes); } @@ -326,7 +330,7 @@ static inline struct bkey_i *bch2_bkey_get_mut_minsize(struct btree_trans *trans static inline struct bkey_i *bch2_bkey_get_mut(struct btree_trans *trans, struct btree_iter *iter, unsigned btree_id, struct bpos pos, - unsigned flags) + enum btree_iter_update_trigger_flags flags) { return __bch2_bkey_get_mut(trans, iter, btree_id, pos, flags, 0, 0); } @@ -337,7 +341,8 @@ static inline struct bkey_i *bch2_bkey_get_mut(struct btree_trans *trans, KEY_TYPE_##_type, sizeof(struct bkey_i_##_type))) static inline struct bkey_i *__bch2_bkey_alloc(struct btree_trans *trans, struct btree_iter *iter, - unsigned flags, unsigned type, unsigned val_size) + enum btree_iter_update_trigger_flags flags, + unsigned type, unsigned val_size) { struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k) + val_size); int ret; diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c index d596ef93239f..f4aeadbe53c1 100644 --- a/fs/bcachefs/btree_update_interior.c +++ b/fs/bcachefs/btree_update_interior.c @@ -58,11 +58,15 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b) !bpos_eq(bkey_i_to_btree_ptr_v2(&b->key)->v.min_key, b->data->min_key)); + bch2_bkey_buf_init(&prev); + bkey_init(&prev.k->k); + bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b); + if (b == btree_node_root(c, b)) { if (!bpos_eq(b->data->min_key, POS_MIN)) { printbuf_reset(&buf); bch2_bpos_to_text(&buf, b->data->min_key); - need_fsck_err(trans, btree_root_bad_min_key, + log_fsck_err(trans, btree_root_bad_min_key, "btree root with incorrect min_key: %s", buf.buf); goto topology_repair; } @@ -70,18 +74,14 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b) if (!bpos_eq(b->data->max_key, SPOS_MAX)) { printbuf_reset(&buf); bch2_bpos_to_text(&buf, b->data->max_key); - need_fsck_err(trans, btree_root_bad_max_key, + log_fsck_err(trans, btree_root_bad_max_key, "btree root with incorrect max_key: %s", buf.buf); goto topology_repair; } } if (!b->c.level) - return 0; - - bch2_bkey_buf_init(&prev); - bkey_init(&prev.k->k); - bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b); + goto out; while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) { if (k.k->type != KEY_TYPE_btree_ptr_v2) @@ -97,16 +97,16 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b) bch2_topology_error(c); printbuf_reset(&buf); - prt_str(&buf, "end of prev node doesn't match start of next node\n"), - prt_printf(&buf, " in btree %s level %u node ", - bch2_btree_id_str(b->c.btree_id), b->c.level); + prt_str(&buf, "end of prev node doesn't match start of next node\n in "); + bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level); + prt_str(&buf, " node "); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); prt_str(&buf, "\n prev "); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(prev.k)); prt_str(&buf, "\n next "); bch2_bkey_val_to_text(&buf, c, k); - need_fsck_err(trans, btree_node_topology_bad_min_key, "%s", buf.buf); + log_fsck_err(trans, btree_node_topology_bad_min_key, "%s", buf.buf); goto topology_repair; } @@ -118,25 +118,25 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b) bch2_topology_error(c); printbuf_reset(&buf); - prt_str(&buf, "empty interior node\n"); - prt_printf(&buf, " in btree %s level %u node ", - bch2_btree_id_str(b->c.btree_id), b->c.level); + prt_str(&buf, "empty interior node\n in "); + bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level); + prt_str(&buf, " node "); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); - need_fsck_err(trans, btree_node_topology_empty_interior_node, "%s", buf.buf); + log_fsck_err(trans, btree_node_topology_empty_interior_node, "%s", buf.buf); goto topology_repair; } else if (!bpos_eq(prev.k->k.p, b->key.k.p)) { bch2_topology_error(c); printbuf_reset(&buf); - prt_str(&buf, "last child node doesn't end at end of parent node\n"); - prt_printf(&buf, " in btree %s level %u node ", - bch2_btree_id_str(b->c.btree_id), b->c.level); + prt_str(&buf, "last child node doesn't end at end of parent node\n in "); + bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level); + prt_str(&buf, " node "); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); prt_str(&buf, "\n last key "); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(prev.k)); - need_fsck_err(trans, btree_node_topology_bad_max_key, "%s", buf.buf); + log_fsck_err(trans, btree_node_topology_bad_max_key, "%s", buf.buf); goto topology_repair; } out: @@ -146,13 +146,7 @@ fsck_err: printbuf_exit(&buf); return ret; topology_repair: - if ((c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_check_topology)) && - c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology) { - bch2_inconsistent_error(c); - ret = -BCH_ERR_btree_need_topology_repair; - } else { - ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology); - } + ret = bch2_topology_error(c); goto out; } @@ -244,7 +238,6 @@ static void bch2_btree_node_free_inmem(struct btree_trans *trans, struct btree *b) { struct bch_fs *c = trans->c; - unsigned i, level = b->c.level; bch2_btree_node_lock_write_nofail(trans, path, &b->c); @@ -255,13 +248,9 @@ static void bch2_btree_node_free_inmem(struct btree_trans *trans, mutex_unlock(&c->btree_cache.lock); six_unlock_write(&b->c.lock); - mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED); + mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED); - trans_for_each_path(trans, path, i) - if (path->l[level].b == b) { - btree_node_unlock(trans, path, level); - path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init); - } + bch2_trans_node_drop(trans, b); } static void bch2_btree_node_free_never_used(struct btree_update *as, @@ -270,8 +259,6 @@ static void bch2_btree_node_free_never_used(struct btree_update *as, { struct bch_fs *c = as->c; struct prealloc_nodes *p = &as->prealloc_nodes[b->c.lock.readers != NULL]; - struct btree_path *path; - unsigned i, level = b->c.level; BUG_ON(!list_empty(&b->write_blocked)); BUG_ON(b->will_make_reachable != (1UL|(unsigned long) as)); @@ -293,11 +280,7 @@ static void bch2_btree_node_free_never_used(struct btree_update *as, six_unlock_intent(&b->c.lock); - trans_for_each_path(trans, path, i) - if (path->l[level].b == b) { - btree_node_unlock(trans, path, level); - path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init); - } + bch2_trans_node_drop(trans, b); } static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans, @@ -809,7 +792,7 @@ err: mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED); six_unlock_write(&b->c.lock); - btree_node_write_if_need(c, b, SIX_LOCK_intent); + btree_node_write_if_need(trans, b, SIX_LOCK_intent); btree_node_unlock(trans, path, b->c.level); bch2_path_put(trans, path_idx, true); } @@ -830,7 +813,7 @@ err: b = as->new_nodes[i]; btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); - btree_node_write_if_need(c, b, SIX_LOCK_read); + btree_node_write_if_need(trans, b, SIX_LOCK_read); six_unlock_read(&b->c.lock); } @@ -1366,9 +1349,14 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, if (unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags))) bch2_journal_key_overwritten(c, b->c.btree_id, b->c.level, insert->k.p); - if (bch2_bkey_validate(c, bkey_i_to_s_c(insert), - btree_node_type(b), BCH_VALIDATE_write) ?: - bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), BCH_VALIDATE_write)) { + struct bkey_validate_context from = (struct bkey_validate_context) { + .from = BKEY_VALIDATE_btree_node, + .level = b->c.level, + .btree = b->c.btree_id, + .flags = BCH_VALIDATE_commit, + }; + if (bch2_bkey_validate(c, bkey_i_to_s_c(insert), from) ?: + bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), from)) { bch2_fs_inconsistent(c, "%s: inserting invalid bkey", __func__); dump_stack(); } @@ -1418,15 +1406,26 @@ bch2_btree_insert_keys_interior(struct btree_update *as, (bkey_cmp_left_packed(b, k, &insert->k.p) >= 0)) ; - while (!bch2_keylist_empty(keys)) { - insert = bch2_keylist_front(keys); + for (; + insert != keys->top && bpos_le(insert->k.p, b->key.k.p); + insert = bkey_next(insert)) + bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, insert); - if (bpos_gt(insert->k.p, b->key.k.p)) - break; + if (bch2_btree_node_check_topology(trans, b)) { + struct printbuf buf = PRINTBUF; - bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, insert); - bch2_keylist_pop_front(keys); + for (struct bkey_i *k = keys->keys; + k != insert; + k = bkey_next(k)) { + bch2_bkey_val_to_text(&buf, trans->c, bkey_i_to_s_c(k)); + prt_newline(&buf); + } + + panic("%s(): check_topology error: inserted keys\n%s", __func__, buf.buf); } + + memmove_u64s_down(keys->keys, insert, keys->top_p - insert->_data); + keys->top_p -= insert->_data - keys->keys_p; } static bool key_deleted_in_insert(struct keylist *insert_keys, struct bpos pos) @@ -1575,8 +1574,6 @@ static void btree_split_insert_keys(struct btree_update *as, bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p); bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys); - - BUG_ON(bch2_btree_node_check_topology(trans, b)); } } @@ -1599,8 +1596,6 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans, if (ret) return ret; - bch2_btree_interior_update_will_free_node(as, b); - if (b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c)) { struct btree *n[2]; @@ -1699,16 +1694,18 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans, if (ret) goto err; + bch2_btree_interior_update_will_free_node(as, b); + if (n3) { bch2_btree_update_get_open_buckets(as, n3); - bch2_btree_node_write(c, n3, SIX_LOCK_intent, 0); + bch2_btree_node_write_trans(trans, n3, SIX_LOCK_intent, 0); } if (n2) { bch2_btree_update_get_open_buckets(as, n2); - bch2_btree_node_write(c, n2, SIX_LOCK_intent, 0); + bch2_btree_node_write_trans(trans, n2, SIX_LOCK_intent, 0); } bch2_btree_update_get_open_buckets(as, n1); - bch2_btree_node_write(c, n1, SIX_LOCK_intent, 0); + bch2_btree_node_write_trans(trans, n1, SIX_LOCK_intent, 0); /* * The old node must be freed (in memory) _before_ unlocking the new @@ -1827,8 +1824,6 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t btree_update_updated_node(as, b); bch2_btree_node_unlock_write(trans, path, b); - - BUG_ON(bch2_btree_node_check_topology(trans, b)); return 0; split: /* @@ -1905,7 +1900,7 @@ static void __btree_increase_depth(struct btree_update *as, struct btree_trans * BUG_ON(ret); bch2_btree_update_get_open_buckets(as, n); - bch2_btree_node_write(c, n, SIX_LOCK_intent, 0); + bch2_btree_node_write_trans(trans, n, SIX_LOCK_intent, 0); bch2_trans_node_add(trans, path, n); six_unlock_intent(&n->c.lock); @@ -1953,8 +1948,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, u64 start_time = local_clock(); int ret = 0; - bch2_trans_verify_not_in_restart(trans); - bch2_trans_verify_not_unlocked(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); BUG_ON(!trans->paths[path].should_be_locked); BUG_ON(!btree_node_locked(&trans->paths[path], level)); @@ -2058,9 +2052,6 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, trace_and_count(c, btree_node_merge, trans, b); - bch2_btree_interior_update_will_free_node(as, b); - bch2_btree_interior_update_will_free_node(as, m); - n = bch2_btree_node_alloc(as, trans, b->c.level); SET_BTREE_NODE_SEQ(n->data, @@ -2096,10 +2087,13 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, if (ret) goto err_free_update; + bch2_btree_interior_update_will_free_node(as, b); + bch2_btree_interior_update_will_free_node(as, m); + bch2_trans_verify_paths(trans); bch2_btree_update_get_open_buckets(as, n); - bch2_btree_node_write(c, n, SIX_LOCK_intent, 0); + bch2_btree_node_write_trans(trans, n, SIX_LOCK_intent, 0); bch2_btree_node_free_inmem(trans, trans->paths + path, b); bch2_btree_node_free_inmem(trans, trans->paths + sib_path, m); @@ -2150,8 +2144,6 @@ int bch2_btree_node_rewrite(struct btree_trans *trans, if (ret) goto out; - bch2_btree_interior_update_will_free_node(as, b); - n = bch2_btree_node_alloc_replacement(as, trans, b); bch2_btree_build_aux_trees(n); @@ -2175,8 +2167,10 @@ int bch2_btree_node_rewrite(struct btree_trans *trans, if (ret) goto err; + bch2_btree_interior_update_will_free_node(as, b); + bch2_btree_update_get_open_buckets(as, n); - bch2_btree_node_write(c, n, SIX_LOCK_intent, 0); + bch2_btree_node_write_trans(trans, n, SIX_LOCK_intent, 0); bch2_btree_node_free_inmem(trans, btree_iter_path(trans, iter), b); @@ -2201,42 +2195,50 @@ struct async_btree_rewrite { struct list_head list; enum btree_id btree_id; unsigned level; - struct bpos pos; - __le64 seq; + struct bkey_buf key; }; static int async_btree_node_rewrite_trans(struct btree_trans *trans, struct async_btree_rewrite *a) { - struct bch_fs *c = trans->c; struct btree_iter iter; - struct btree *b; - int ret; - - bch2_trans_node_iter_init(trans, &iter, a->btree_id, a->pos, + bch2_trans_node_iter_init(trans, &iter, + a->btree_id, a->key.k->k.p, BTREE_MAX_DEPTH, a->level, 0); - b = bch2_btree_iter_peek_node(&iter); - ret = PTR_ERR_OR_ZERO(b); + struct btree *b = bch2_btree_iter_peek_node(&iter); + int ret = PTR_ERR_OR_ZERO(b); if (ret) goto out; - if (!b || b->data->keys.seq != a->seq) { + bool found = b && btree_ptr_hash_val(&b->key) == btree_ptr_hash_val(a->key.k); + ret = found + ? bch2_btree_node_rewrite(trans, &iter, b, 0) + : -ENOENT; + +#if 0 + /* Tracepoint... */ + if (!ret || ret == -ENOENT) { + struct bch_fs *c = trans->c; struct printbuf buf = PRINTBUF; - if (b) - bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); - else - prt_str(&buf, "(null"); - bch_info(c, "%s: node to rewrite not found:, searching for seq %llu, got\n%s", - __func__, a->seq, buf.buf); + if (!ret) { + prt_printf(&buf, "rewrite node:\n "); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(a->key.k)); + } else { + prt_printf(&buf, "node to rewrite not found:\n want: "); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(a->key.k)); + prt_printf(&buf, "\n got: "); + if (b) + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); + else + prt_str(&buf, "(null)"); + } + bch_info(c, "%s", buf.buf); printbuf_exit(&buf); - goto out; } - - ret = bch2_btree_node_rewrite(trans, &iter, b, 0); +#endif out: bch2_trans_iter_exit(trans, &iter); - return ret; } @@ -2247,81 +2249,97 @@ static void async_btree_node_rewrite_work(struct work_struct *work) struct bch_fs *c = a->c; int ret = bch2_trans_do(c, async_btree_node_rewrite_trans(trans, a)); - bch_err_fn_ratelimited(c, ret); + if (ret != -ENOENT) + bch_err_fn_ratelimited(c, ret); + + spin_lock(&c->btree_node_rewrites_lock); + list_del(&a->list); + spin_unlock(&c->btree_node_rewrites_lock); + + closure_wake_up(&c->btree_node_rewrites_wait); + + bch2_bkey_buf_exit(&a->key, c); bch2_write_ref_put(c, BCH_WRITE_REF_node_rewrite); kfree(a); } void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b) { - struct async_btree_rewrite *a; - int ret; - - a = kmalloc(sizeof(*a), GFP_NOFS); - if (!a) { - bch_err(c, "%s: error allocating memory", __func__); + struct async_btree_rewrite *a = kmalloc(sizeof(*a), GFP_NOFS); + if (!a) return; - } a->c = c; a->btree_id = b->c.btree_id; a->level = b->c.level; - a->pos = b->key.k.p; - a->seq = b->data->keys.seq; INIT_WORK(&a->work, async_btree_node_rewrite_work); - if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) { - mutex_lock(&c->pending_node_rewrites_lock); - list_add(&a->list, &c->pending_node_rewrites); - mutex_unlock(&c->pending_node_rewrites_lock); - return; - } + bch2_bkey_buf_init(&a->key); + bch2_bkey_buf_copy(&a->key, c, &b->key); - if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_node_rewrite)) { - if (test_bit(BCH_FS_started, &c->flags)) { - bch_err(c, "%s: error getting c->writes ref", __func__); - kfree(a); - return; - } + bool now = false, pending = false; - ret = bch2_fs_read_write_early(c); - bch_err_msg(c, ret, "going read-write"); - if (ret) { - kfree(a); - return; - } + spin_lock(&c->btree_node_rewrites_lock); + if (c->curr_recovery_pass > BCH_RECOVERY_PASS_journal_replay && + bch2_write_ref_tryget(c, BCH_WRITE_REF_node_rewrite)) { + list_add(&a->list, &c->btree_node_rewrites); + now = true; + } else if (!test_bit(BCH_FS_may_go_rw, &c->flags)) { + list_add(&a->list, &c->btree_node_rewrites_pending); + pending = true; + } + spin_unlock(&c->btree_node_rewrites_lock); - bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite); + if (now) { + queue_work(c->btree_node_rewrite_worker, &a->work); + } else if (pending) { + /* bch2_do_pending_node_rewrites will execute */ + } else { + bch2_bkey_buf_exit(&a->key, c); + kfree(a); } +} - queue_work(c->btree_node_rewrite_worker, &a->work); +void bch2_async_btree_node_rewrites_flush(struct bch_fs *c) +{ + closure_wait_event(&c->btree_node_rewrites_wait, + list_empty(&c->btree_node_rewrites)); } void bch2_do_pending_node_rewrites(struct bch_fs *c) { - struct async_btree_rewrite *a, *n; - - mutex_lock(&c->pending_node_rewrites_lock); - list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) { - list_del(&a->list); + while (1) { + spin_lock(&c->btree_node_rewrites_lock); + struct async_btree_rewrite *a = + list_pop_entry(&c->btree_node_rewrites_pending, + struct async_btree_rewrite, list); + if (a) + list_add(&a->list, &c->btree_node_rewrites); + spin_unlock(&c->btree_node_rewrites_lock); + + if (!a) + break; bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite); queue_work(c->btree_node_rewrite_worker, &a->work); } - mutex_unlock(&c->pending_node_rewrites_lock); } void bch2_free_pending_node_rewrites(struct bch_fs *c) { - struct async_btree_rewrite *a, *n; + while (1) { + spin_lock(&c->btree_node_rewrites_lock); + struct async_btree_rewrite *a = + list_pop_entry(&c->btree_node_rewrites_pending, + struct async_btree_rewrite, list); + spin_unlock(&c->btree_node_rewrites_lock); - mutex_lock(&c->pending_node_rewrites_lock); - list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) { - list_del(&a->list); + if (!a) + break; + bch2_bkey_buf_exit(&a->key, c); kfree(a); } - mutex_unlock(&c->pending_node_rewrites_lock); } static int __bch2_btree_node_update_key(struct btree_trans *trans, @@ -2575,8 +2593,9 @@ static void bch2_btree_update_to_text(struct printbuf *out, struct btree_update prt_printf(out, "%ps: ", (void *) as->ip_started); bch2_trans_commit_flags_to_text(out, as->flags); - prt_printf(out, " btree=%s l=%u-%u mode=%s nodes_written=%u cl.remaining=%u journal_seq=%llu\n", - bch2_btree_id_str(as->btree_id), + prt_str(out, " "); + bch2_btree_id_to_text(out, as->btree_id); + prt_printf(out, " l=%u-%u mode=%s nodes_written=%u cl.remaining=%u journal_seq=%llu\n", as->update_level_start, as->update_level_end, bch2_btree_update_modes[as->mode], @@ -2677,6 +2696,9 @@ void bch2_btree_reserve_cache_to_text(struct printbuf *out, struct bch_fs *c) void bch2_fs_btree_interior_update_exit(struct bch_fs *c) { + WARN_ON(!list_empty(&c->btree_node_rewrites)); + WARN_ON(!list_empty(&c->btree_node_rewrites_pending)); + if (c->btree_node_rewrite_worker) destroy_workqueue(c->btree_node_rewrite_worker); if (c->btree_interior_update_worker) @@ -2692,8 +2714,9 @@ void bch2_fs_btree_interior_update_init_early(struct bch_fs *c) mutex_init(&c->btree_interior_update_lock); INIT_WORK(&c->btree_interior_update_work, btree_interior_update_work); - INIT_LIST_HEAD(&c->pending_node_rewrites); - mutex_init(&c->pending_node_rewrites_lock); + INIT_LIST_HEAD(&c->btree_node_rewrites); + INIT_LIST_HEAD(&c->btree_node_rewrites_pending); + spin_lock_init(&c->btree_node_rewrites_lock); } int bch2_fs_btree_interior_update_init(struct bch_fs *c) diff --git a/fs/bcachefs/btree_update_interior.h b/fs/bcachefs/btree_update_interior.h index 10f400957f21..26d646e1275c 100644 --- a/fs/bcachefs/btree_update_interior.h +++ b/fs/bcachefs/btree_update_interior.h @@ -159,7 +159,7 @@ static inline int bch2_foreground_maybe_merge(struct btree_trans *trans, unsigned level, unsigned flags) { - bch2_trans_verify_not_unlocked(trans); + bch2_trans_verify_not_unlocked_or_in_restart(trans); return bch2_foreground_maybe_merge_sibling(trans, path, level, flags, btree_prev_sib) ?: @@ -278,12 +278,12 @@ static inline struct btree_node_entry *want_new_bset(struct bch_fs *c, struct bt { struct bset_tree *t = bset_tree_last(b); struct btree_node_entry *bne = max(write_block(b), - (void *) btree_bkey_last(b, bset_tree_last(b))); + (void *) btree_bkey_last(b, t)); ssize_t remaining_space = __bch2_btree_u64s_remaining(b, bne->keys.start); if (unlikely(bset_written(b, bset(b, t)))) { - if (remaining_space > (ssize_t) (block_bytes(c) >> 3)) + if (b->written + block_sectors(c) <= btree_sectors(c)) return bne; } else { if (unlikely(bset_u64s(t) * sizeof(u64) > btree_write_set_buffer(b)) && @@ -334,6 +334,7 @@ void bch2_journal_entry_to_btree_root(struct bch_fs *, struct jset_entry *); struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *, struct jset_entry *, unsigned long); +void bch2_async_btree_node_rewrites_flush(struct bch_fs *); void bch2_do_pending_node_rewrites(struct bch_fs *); void bch2_free_pending_node_rewrites(struct bch_fs *); diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c index 1639c60dffa0..b56c4987b8c9 100644 --- a/fs/bcachefs/btree_write_buffer.c +++ b/fs/bcachefs/btree_write_buffer.c @@ -19,8 +19,6 @@ static int bch2_btree_write_buffer_journal_flush(struct journal *, struct journal_entry_pin *, u64); -static int bch2_journal_keys_to_write_buffer(struct bch_fs *, struct journal_buf *); - static inline bool __wb_key_ref_cmp(const struct wb_key_ref *l, const struct wb_key_ref *r) { return (cmp_int(l->hi, r->hi) ?: @@ -314,6 +312,8 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans) darray_for_each(wb->sorted, i) { struct btree_write_buffered_key *k = &wb->flushing.keys.data[i->idx]; + BUG_ON(!btree_type_uses_write_buffer(k->btree)); + for (struct wb_key_ref *n = i + 1; n < min(i + 4, &darray_top(wb->sorted)); n++) prefetch(&wb->flushing.keys.data[n->idx]); @@ -481,21 +481,55 @@ err: return ret; } -static int fetch_wb_keys_from_journal(struct bch_fs *c, u64 seq) +static int bch2_journal_keys_to_write_buffer(struct bch_fs *c, struct journal_buf *buf) +{ + struct journal_keys_to_wb dst; + int ret = 0; + + bch2_journal_keys_to_write_buffer_start(c, &dst, le64_to_cpu(buf->data->seq)); + + for_each_jset_entry_type(entry, buf->data, BCH_JSET_ENTRY_write_buffer_keys) { + jset_entry_for_each_key(entry, k) { + ret = bch2_journal_key_to_wb(c, &dst, entry->btree_id, k); + if (ret) + goto out; + } + + entry->type = BCH_JSET_ENTRY_btree_keys; + } +out: + ret = bch2_journal_keys_to_write_buffer_end(c, &dst) ?: ret; + return ret; +} + +static int fetch_wb_keys_from_journal(struct bch_fs *c, u64 max_seq) { struct journal *j = &c->journal; struct journal_buf *buf; + bool blocked; int ret = 0; - while (!ret && (buf = bch2_next_write_buffer_flush_journal_buf(j, seq))) { + while (!ret && (buf = bch2_next_write_buffer_flush_journal_buf(j, max_seq, &blocked))) { ret = bch2_journal_keys_to_write_buffer(c, buf); + + if (!blocked && !ret) { + spin_lock(&j->lock); + buf->need_flush_to_write_buffer = false; + spin_unlock(&j->lock); + } + mutex_unlock(&j->buf_lock); + + if (blocked) { + bch2_journal_unblock(j); + break; + } } return ret; } -static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 seq, +static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 max_seq, bool *did_work) { struct bch_fs *c = trans->c; @@ -505,7 +539,7 @@ static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 seq, do { bch2_trans_unlock(trans); - fetch_from_journal_err = fetch_wb_keys_from_journal(c, seq); + fetch_from_journal_err = fetch_wb_keys_from_journal(c, max_seq); *did_work |= wb->inc.keys.nr || wb->flushing.keys.nr; @@ -518,8 +552,8 @@ static int btree_write_buffer_flush_seq(struct btree_trans *trans, u64 seq, mutex_unlock(&wb->flushing.lock); } while (!ret && (fetch_from_journal_err || - (wb->inc.pin.seq && wb->inc.pin.seq <= seq) || - (wb->flushing.pin.seq && wb->flushing.pin.seq <= seq))); + (wb->inc.pin.seq && wb->inc.pin.seq <= max_seq) || + (wb->flushing.pin.seq && wb->flushing.pin.seq <= max_seq))); return ret; } @@ -600,6 +634,14 @@ int bch2_btree_write_buffer_maybe_flush(struct btree_trans *trans, bch2_bkey_buf_init(&tmp); if (!bkey_and_val_eq(referring_k, bkey_i_to_s_c(last_flushed->k))) { + if (trace_write_buffer_maybe_flush_enabled()) { + struct printbuf buf = PRINTBUF; + + bch2_bkey_val_to_text(&buf, c, referring_k); + trace_write_buffer_maybe_flush(trans, _RET_IP_, buf.buf); + printbuf_exit(&buf); + } + bch2_bkey_buf_reassemble(&tmp, c, referring_k); if (bkey_is_btree_ptr(referring_k.k)) { @@ -771,31 +813,6 @@ int bch2_journal_keys_to_write_buffer_end(struct bch_fs *c, struct journal_keys_ return ret; } -static int bch2_journal_keys_to_write_buffer(struct bch_fs *c, struct journal_buf *buf) -{ - struct journal_keys_to_wb dst; - int ret = 0; - - bch2_journal_keys_to_write_buffer_start(c, &dst, le64_to_cpu(buf->data->seq)); - - for_each_jset_entry_type(entry, buf->data, BCH_JSET_ENTRY_write_buffer_keys) { - jset_entry_for_each_key(entry, k) { - ret = bch2_journal_key_to_wb(c, &dst, entry->btree_id, k); - if (ret) - goto out; - } - - entry->type = BCH_JSET_ENTRY_btree_keys; - } - - spin_lock(&c->journal.lock); - buf->need_flush_to_write_buffer = false; - spin_unlock(&c->journal.lock); -out: - ret = bch2_journal_keys_to_write_buffer_end(c, &dst) ?: ret; - return ret; -} - static int wb_keys_resize(struct btree_write_buffer_keys *wb, size_t new_size) { if (wb->keys.size >= new_size) diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c index ec7d9a59bea9..345b117a4a4a 100644 --- a/fs/bcachefs/buckets.c +++ b/fs/bcachefs/buckets.c @@ -18,7 +18,9 @@ #include "error.h" #include "inode.h" #include "movinggc.h" +#include "rebalance.h" #include "recovery.h" +#include "recovery_passes.h" #include "reflink.h" #include "replicas.h" #include "subvolume.h" @@ -260,8 +262,6 @@ int bch2_check_fix_ptrs(struct btree_trans *trans, struct printbuf buf = PRINTBUF; int ret = 0; - percpu_down_read(&c->mark_lock); - bkey_for_each_ptr_decode(k.k, ptrs_c, p, entry_c) { ret = bch2_check_fix_ptr(trans, k, p, entry_c, &do_update); if (ret) @@ -362,7 +362,6 @@ found: bch_info(c, "new key %s", buf.buf); } - percpu_up_read(&c->mark_lock); struct btree_iter iter; bch2_trans_node_iter_init(trans, &iter, btree, new->k.p, 0, level, BTREE_ITER_intent|BTREE_ITER_all_snapshots); @@ -371,8 +370,6 @@ found: BTREE_UPDATE_internal_snapshot_node| BTREE_TRIGGER_norun); bch2_trans_iter_exit(trans, &iter); - percpu_down_read(&c->mark_lock); - if (ret) goto err; @@ -380,7 +377,6 @@ found: bch2_btree_node_update_key_early(trans, btree, level - 1, k, new); } err: - percpu_up_read(&c->mark_lock); printbuf_exit(&buf); return ret; } @@ -401,8 +397,8 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca, BUG_ON(!sectors); if (gen_after(ptr->gen, b_gen)) { - bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, - ptr_gen_newer_than_bucket_gen, + bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations); + log_fsck_err(trans, ptr_gen_newer_than_bucket_gen, "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n" "while marking %s", ptr->dev, bucket_nr, b_gen, @@ -415,8 +411,8 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca, } if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) { - bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, - ptr_too_stale, + bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations); + log_fsck_err(trans, ptr_too_stale, "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n" "while marking %s", ptr->dev, bucket_nr, b_gen, @@ -435,8 +431,8 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca, } if (b_gen != ptr->gen) { - bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, - stale_dirty_ptr, + bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations); + log_fsck_err(trans, stale_dirty_ptr, "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n" "while marking %s", ptr->dev, bucket_nr, b_gen, @@ -451,8 +447,8 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca, } if (bucket_data_type_mismatch(bucket_data_type, ptr_data_type)) { - bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, - ptr_bucket_data_type_mismatch, + bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations); + log_fsck_err(trans, ptr_bucket_data_type_mismatch, "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n" "while marking %s", ptr->dev, bucket_nr, b_gen, @@ -466,8 +462,8 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca, } if ((u64) *bucket_sectors + sectors > U32_MAX) { - bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, - bucket_sector_count_overflow, + bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations); + log_fsck_err(trans, bucket_sector_count_overflow, "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX\n" "while marking %s", ptr->dev, bucket_nr, b_gen, @@ -485,7 +481,9 @@ out: printbuf_exit(&buf); return ret; err: +fsck_err: bch2_dump_trans_updates(trans); + bch2_inconsistent_error(c); ret = -BCH_ERR_bucket_ref_update; goto out; } @@ -543,7 +541,8 @@ static int __mark_pointer(struct btree_trans *trans, struct bch_dev *ca, struct bkey_s_c k, const struct extent_ptr_decoded *p, s64 sectors, enum bch_data_type ptr_data_type, - struct bch_alloc_v4 *a) + struct bch_alloc_v4 *a, + bool insert) { u32 *dst_sectors = p->has_ec ? &a->stripe_sectors : !p->ptr.cached ? &a->dirty_sectors : @@ -553,8 +552,8 @@ static int __mark_pointer(struct btree_trans *trans, struct bch_dev *ca, if (ret) return ret; - - alloc_data_type_set(a, ptr_data_type); + if (insert) + alloc_data_type_set(a, ptr_data_type); return 0; } @@ -570,8 +569,10 @@ static int bch2_trigger_pointer(struct btree_trans *trans, struct printbuf buf = PRINTBUF; int ret = 0; - u64 abs_sectors = ptr_disk_sectors(level ? btree_sectors(c) : k.k->size, p); - *sectors = insert ? abs_sectors : -abs_sectors; + struct bkey_i_backpointer bp; + bch2_extent_ptr_to_bp(c, btree_id, level, k, p, entry, &bp); + + *sectors = insert ? bp.v.bucket_len : -(s64) bp.v.bucket_len; struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev); if (unlikely(!ca)) { @@ -580,41 +581,36 @@ static int bch2_trigger_pointer(struct btree_trans *trans, goto err; } - struct bpos bucket; - struct bch_backpointer bp; - __bch2_extent_ptr_to_bp(trans->c, ca, btree_id, level, k, p, entry, &bucket, &bp, abs_sectors); + struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr); if (flags & BTREE_TRIGGER_transactional) { struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, bucket, 0); ret = PTR_ERR_OR_ZERO(a) ?: - __mark_pointer(trans, ca, k, &p, *sectors, bp.data_type, &a->v); + __mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &a->v, insert); if (ret) goto err; if (!p.ptr.cached) { - ret = bch2_bucket_backpointer_mod(trans, ca, bucket, bp, k, insert); + ret = bch2_bucket_backpointer_mod(trans, k, &bp, insert); if (ret) goto err; } } if (flags & BTREE_TRIGGER_gc) { - percpu_down_read(&c->mark_lock); struct bucket *g = gc_bucket(ca, bucket.offset); if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s", p.ptr.dev, (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { ret = -BCH_ERR_trigger_pointer; - goto err_unlock; + goto err; } bucket_lock(g); struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old; - ret = __mark_pointer(trans, ca, k, &p, *sectors, bp.data_type, &new); + ret = __mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &new, insert); alloc_to_bucket(g, new); bucket_unlock(g); -err_unlock: - percpu_up_read(&c->mark_lock); if (!ret) ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags); @@ -951,6 +947,7 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans, enum bch_data_type type, unsigned sectors) { + struct bch_fs *c = trans->c; struct btree_iter iter; int ret = 0; @@ -960,8 +957,8 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans, return PTR_ERR(a); if (a->v.data_type && type && a->v.data_type != type) { - bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, - bucket_metadata_type_mismatch, + bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations); + log_fsck_err(trans, bucket_metadata_type_mismatch, "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n" "while marking %s", iter.pos.inode, iter.pos.offset, a->v.gen, @@ -979,6 +976,7 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans, ret = bch2_trans_update(trans, &iter, &a->k_i, 0); } err: +fsck_err: bch2_trans_iter_exit(trans, &iter); return ret; } @@ -990,11 +988,10 @@ static int bch2_mark_metadata_bucket(struct btree_trans *trans, struct bch_dev * struct bch_fs *c = trans->c; int ret = 0; - percpu_down_read(&c->mark_lock); struct bucket *g = gc_bucket(ca, b); if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u when marking metadata type %s", ca->dev_idx, bch2_data_type_str(data_type))) - goto err_unlock; + goto err; bucket_lock(g); struct bch_alloc_v4 old = bucket_m_to_alloc(*g); @@ -1004,26 +1001,24 @@ static int bch2_mark_metadata_bucket(struct btree_trans *trans, struct bch_dev * "different types of data in same bucket: %s, %s", bch2_data_type_str(g->data_type), bch2_data_type_str(data_type))) - goto err; + goto err_unlock; if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c, "bucket %u:%llu gen %u data type %s sector count overflow: %u + %u > bucket size", ca->dev_idx, b, g->gen, bch2_data_type_str(g->data_type ?: data_type), g->dirty_sectors, sectors)) - goto err; + goto err_unlock; g->data_type = data_type; g->dirty_sectors += sectors; struct bch_alloc_v4 new = bucket_m_to_alloc(*g); bucket_unlock(g); - percpu_up_read(&c->mark_lock); ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags); return ret; -err: - bucket_unlock(g); err_unlock: - percpu_up_read(&c->mark_lock); + bucket_unlock(g); +err: return -BCH_ERR_metadata_bucket_inconsistency; } @@ -1155,6 +1150,31 @@ int bch2_trans_mark_dev_sbs(struct bch_fs *c) return bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_transactional); } +bool bch2_is_superblock_bucket(struct bch_dev *ca, u64 b) +{ + struct bch_sb_layout *layout = &ca->disk_sb.sb->layout; + u64 b_offset = bucket_to_sector(ca, b); + u64 b_end = bucket_to_sector(ca, b + 1); + unsigned i; + + if (!b) + return true; + + for (i = 0; i < layout->nr_superblocks; i++) { + u64 offset = le64_to_cpu(layout->sb_offset[i]); + u64 end = offset + (1 << layout->sb_max_size_bits); + + if (!(offset >= b_end || end <= b_offset)) + return true; + } + + for (i = 0; i < ca->journal.nr; i++) + if (b == ca->journal.buckets[i]) + return true; + + return false; +} + /* Disk reservations: */ #define SECTORS_CACHE 1024 @@ -1238,7 +1258,7 @@ int bch2_buckets_nouse_alloc(struct bch_fs *c) for_each_member_device(c, ca) { BUG_ON(ca->buckets_nouse); - ca->buckets_nouse = kvmalloc(BITS_TO_LONGS(ca->mi.nbuckets) * + ca->buckets_nouse = bch2_kvmalloc(BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long), GFP_KERNEL|__GFP_ZERO); if (!ca->buckets_nouse) { @@ -1264,10 +1284,15 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) bool resize = ca->bucket_gens != NULL; int ret; - BUG_ON(resize && ca->buckets_nouse); + if (resize) + lockdep_assert_held(&c->state_lock); + + if (resize && ca->buckets_nouse) + return -BCH_ERR_no_resize_with_buckets_nouse; - if (!(bucket_gens = kvmalloc(sizeof(struct bucket_gens) + nbuckets, - GFP_KERNEL|__GFP_ZERO))) { + bucket_gens = bch2_kvmalloc(struct_size(bucket_gens, b, nbuckets), + GFP_KERNEL|__GFP_ZERO); + if (!bucket_gens) { ret = -BCH_ERR_ENOMEM_bucket_gens; goto err; } @@ -1277,19 +1302,16 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) bucket_gens->nbuckets_minus_first = bucket_gens->nbuckets - bucket_gens->first_bucket; - if (resize) { - down_write(&ca->bucket_lock); - percpu_down_write(&c->mark_lock); - } - old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1); if (resize) { - size_t n = min(bucket_gens->nbuckets, old_bucket_gens->nbuckets); - + bucket_gens->nbuckets = min(bucket_gens->nbuckets, + old_bucket_gens->nbuckets); + bucket_gens->nbuckets_minus_first = + bucket_gens->nbuckets - bucket_gens->first_bucket; memcpy(bucket_gens->b, old_bucket_gens->b, - n); + bucket_gens->nbuckets); } rcu_assign_pointer(ca->bucket_gens, bucket_gens); @@ -1297,11 +1319,6 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) nbuckets = ca->mi.nbuckets; - if (resize) { - percpu_up_write(&c->mark_lock); - up_write(&ca->bucket_lock); - } - ret = 0; err: if (bucket_gens) diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h index ccc78bfe2fd4..a9acdd6c0c86 100644 --- a/fs/bcachefs/buckets.h +++ b/fs/bcachefs/buckets.h @@ -82,16 +82,15 @@ static inline void bucket_lock(struct bucket *b) static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b) { - return genradix_ptr(&ca->buckets_gc, b); + return bucket_valid(ca, b) + ? genradix_ptr(&ca->buckets_gc, b) + : NULL; } static inline struct bucket_gens *bucket_gens(struct bch_dev *ca) { return rcu_dereference_check(ca->bucket_gens, - !ca->fs || - percpu_rwsem_is_held(&ca->fs->mark_lock) || - lockdep_is_held(&ca->fs->state_lock) || - lockdep_is_held(&ca->bucket_lock)); + lockdep_is_held(&ca->fs->state_lock)); } static inline u8 *bucket_gen(struct bch_dev *ca, size_t b) @@ -308,26 +307,7 @@ int bch2_trans_mark_dev_sbs_flags(struct bch_fs *, enum btree_iter_update_trigger_flags); int bch2_trans_mark_dev_sbs(struct bch_fs *); -static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b) -{ - struct bch_sb_layout *layout = &ca->disk_sb.sb->layout; - u64 b_offset = bucket_to_sector(ca, b); - u64 b_end = bucket_to_sector(ca, b + 1); - unsigned i; - - if (!b) - return true; - - for (i = 0; i < layout->nr_superblocks; i++) { - u64 offset = le64_to_cpu(layout->sb_offset[i]); - u64 end = offset + (1 << layout->sb_max_size_bits); - - if (!(offset >= b_end || end <= b_offset)) - return true; - } - - return false; -} +bool bch2_is_superblock_bucket(struct bch_dev *, u64); static inline const char *bch2_data_type_str(enum bch_data_type type) { diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h index 28bd09a253c8..7174047b8e92 100644 --- a/fs/bcachefs/buckets_types.h +++ b/fs/bcachefs/buckets_types.h @@ -24,7 +24,7 @@ struct bucket_gens { u16 first_bucket; size_t nbuckets; size_t nbuckets_minus_first; - u8 b[]; + u8 b[] __counted_by(nbuckets); }; struct bch_dev_usage { diff --git a/fs/bcachefs/buckets_waiting_for_journal.c b/fs/bcachefs/buckets_waiting_for_journal.c index f9fb150eda70..c8a488e6b7b8 100644 --- a/fs/bcachefs/buckets_waiting_for_journal.c +++ b/fs/bcachefs/buckets_waiting_for_journal.c @@ -22,23 +22,21 @@ static void bucket_table_init(struct buckets_waiting_for_journal_table *t, size_ memset(t->d, 0, sizeof(t->d[0]) << t->bits); } -bool bch2_bucket_needs_journal_commit(struct buckets_waiting_for_journal *b, - u64 flushed_seq, - unsigned dev, u64 bucket) +u64 bch2_bucket_journal_seq_ready(struct buckets_waiting_for_journal *b, + unsigned dev, u64 bucket) { struct buckets_waiting_for_journal_table *t; u64 dev_bucket = (u64) dev << 56 | bucket; - bool ret = false; - unsigned i; + u64 ret = 0; mutex_lock(&b->lock); t = b->t; - for (i = 0; i < ARRAY_SIZE(t->hash_seeds); i++) { + for (unsigned i = 0; i < ARRAY_SIZE(t->hash_seeds); i++) { struct bucket_hashed *h = bucket_hash(t, i, dev_bucket); if (h->dev_bucket == dev_bucket) { - ret = h->journal_seq > flushed_seq; + ret = h->journal_seq; break; } } diff --git a/fs/bcachefs/buckets_waiting_for_journal.h b/fs/bcachefs/buckets_waiting_for_journal.h index d2ae19cbe18c..365619ca44c8 100644 --- a/fs/bcachefs/buckets_waiting_for_journal.h +++ b/fs/bcachefs/buckets_waiting_for_journal.h @@ -4,8 +4,8 @@ #include "buckets_waiting_for_journal_types.h" -bool bch2_bucket_needs_journal_commit(struct buckets_waiting_for_journal *, - u64, unsigned, u64); +u64 bch2_bucket_journal_seq_ready(struct buckets_waiting_for_journal *, + unsigned, u64); int bch2_set_bucket_needs_journal_commit(struct buckets_waiting_for_journal *, u64, unsigned, u64, u64); diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c index 2182b555c112..46e9e32105a9 100644 --- a/fs/bcachefs/chardev.c +++ b/fs/bcachefs/chardev.c @@ -6,11 +6,11 @@ #include "buckets.h" #include "chardev.h" #include "disk_accounting.h" +#include "fsck.h" #include "journal.h" #include "move.h" #include "recovery_passes.h" #include "replicas.h" -#include "super.h" #include "super-io.h" #include "thread_with_file.h" @@ -127,130 +127,6 @@ static long bch2_ioctl_incremental(struct bch_ioctl_incremental __user *user_arg } #endif -struct fsck_thread { - struct thread_with_stdio thr; - struct bch_fs *c; - struct bch_opts opts; -}; - -static void bch2_fsck_thread_exit(struct thread_with_stdio *_thr) -{ - struct fsck_thread *thr = container_of(_thr, struct fsck_thread, thr); - kfree(thr); -} - -static int bch2_fsck_offline_thread_fn(struct thread_with_stdio *stdio) -{ - struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr); - struct bch_fs *c = thr->c; - - int ret = PTR_ERR_OR_ZERO(c); - if (ret) - return ret; - - ret = bch2_fs_start(thr->c); - if (ret) - goto err; - - if (test_bit(BCH_FS_errors_fixed, &c->flags)) { - bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: errors fixed\n", c->name); - ret |= 1; - } - if (test_bit(BCH_FS_error, &c->flags)) { - bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: still has errors\n", c->name); - ret |= 4; - } -err: - bch2_fs_stop(c); - return ret; -} - -static const struct thread_with_stdio_ops bch2_offline_fsck_ops = { - .exit = bch2_fsck_thread_exit, - .fn = bch2_fsck_offline_thread_fn, -}; - -static long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_arg) -{ - struct bch_ioctl_fsck_offline arg; - struct fsck_thread *thr = NULL; - darray_str(devs) = {}; - long ret = 0; - - if (copy_from_user(&arg, user_arg, sizeof(arg))) - return -EFAULT; - - if (arg.flags) - return -EINVAL; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - for (size_t i = 0; i < arg.nr_devs; i++) { - u64 dev_u64; - ret = copy_from_user_errcode(&dev_u64, &user_arg->devs[i], sizeof(u64)); - if (ret) - goto err; - - char *dev_str = strndup_user((char __user *)(unsigned long) dev_u64, PATH_MAX); - ret = PTR_ERR_OR_ZERO(dev_str); - if (ret) - goto err; - - ret = darray_push(&devs, dev_str); - if (ret) { - kfree(dev_str); - goto err; - } - } - - thr = kzalloc(sizeof(*thr), GFP_KERNEL); - if (!thr) { - ret = -ENOMEM; - goto err; - } - - thr->opts = bch2_opts_empty(); - - if (arg.opts) { - char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16); - ret = PTR_ERR_OR_ZERO(optstr) ?: - bch2_parse_mount_opts(NULL, &thr->opts, NULL, optstr); - if (!IS_ERR(optstr)) - kfree(optstr); - - if (ret) - goto err; - } - - opt_set(thr->opts, stdio, (u64)(unsigned long)&thr->thr.stdio); - opt_set(thr->opts, read_only, 1); - opt_set(thr->opts, ratelimit_errors, 0); - - /* We need request_key() to be called before we punt to kthread: */ - opt_set(thr->opts, nostart, true); - - bch2_thread_with_stdio_init(&thr->thr, &bch2_offline_fsck_ops); - - thr->c = bch2_fs_open(devs.data, arg.nr_devs, thr->opts); - - if (!IS_ERR(thr->c) && - thr->c->opts.errors == BCH_ON_ERROR_panic) - thr->c->opts.errors = BCH_ON_ERROR_ro; - - ret = __bch2_run_thread_with_stdio(&thr->thr); -out: - darray_for_each(devs, i) - kfree(*i); - darray_exit(&devs); - return ret; -err: - if (thr) - bch2_fsck_thread_exit(&thr->thr); - pr_err("ret %s", bch2_err_str(ret)); - goto out; -} - static long bch2_global_ioctl(unsigned cmd, void __user *arg) { long ret; @@ -775,99 +651,6 @@ static long bch2_ioctl_disk_resize_journal(struct bch_fs *c, return ret; } -static int bch2_fsck_online_thread_fn(struct thread_with_stdio *stdio) -{ - struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr); - struct bch_fs *c = thr->c; - - c->stdio_filter = current; - c->stdio = &thr->thr.stdio; - - /* - * XXX: can we figure out a way to do this without mucking with c->opts? - */ - unsigned old_fix_errors = c->opts.fix_errors; - if (opt_defined(thr->opts, fix_errors)) - c->opts.fix_errors = thr->opts.fix_errors; - else - c->opts.fix_errors = FSCK_FIX_ask; - - c->opts.fsck = true; - set_bit(BCH_FS_fsck_running, &c->flags); - - c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info; - int ret = bch2_run_online_recovery_passes(c); - - clear_bit(BCH_FS_fsck_running, &c->flags); - bch_err_fn(c, ret); - - c->stdio = NULL; - c->stdio_filter = NULL; - c->opts.fix_errors = old_fix_errors; - - up(&c->online_fsck_mutex); - bch2_ro_ref_put(c); - return ret; -} - -static const struct thread_with_stdio_ops bch2_online_fsck_ops = { - .exit = bch2_fsck_thread_exit, - .fn = bch2_fsck_online_thread_fn, -}; - -static long bch2_ioctl_fsck_online(struct bch_fs *c, - struct bch_ioctl_fsck_online arg) -{ - struct fsck_thread *thr = NULL; - long ret = 0; - - if (arg.flags) - return -EINVAL; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - if (!bch2_ro_ref_tryget(c)) - return -EROFS; - - if (down_trylock(&c->online_fsck_mutex)) { - bch2_ro_ref_put(c); - return -EAGAIN; - } - - thr = kzalloc(sizeof(*thr), GFP_KERNEL); - if (!thr) { - ret = -ENOMEM; - goto err; - } - - thr->c = c; - thr->opts = bch2_opts_empty(); - - if (arg.opts) { - char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16); - - ret = PTR_ERR_OR_ZERO(optstr) ?: - bch2_parse_mount_opts(c, &thr->opts, NULL, optstr); - if (!IS_ERR(optstr)) - kfree(optstr); - - if (ret) - goto err; - } - - ret = bch2_run_thread_with_stdio(&thr->thr, &bch2_online_fsck_ops); -err: - if (ret < 0) { - bch_err_fn(c, ret); - if (thr) - bch2_fsck_thread_exit(&thr->thr); - up(&c->online_fsck_mutex); - bch2_ro_ref_put(c); - } - return ret; -} - #define BCH_IOCTL(_name, _argtype) \ do { \ _argtype i; \ diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c index ce8fc677bef9..23a383577d4c 100644 --- a/fs/bcachefs/checksum.c +++ b/fs/bcachefs/checksum.c @@ -2,6 +2,7 @@ #include "bcachefs.h" #include "checksum.h" #include "errcode.h" +#include "error.h" #include "super.h" #include "super-io.h" @@ -252,6 +253,10 @@ int bch2_encrypt(struct bch_fs *c, unsigned type, if (!bch2_csum_type_is_encryption(type)) return 0; + if (bch2_fs_inconsistent_on(!c->chacha20, + c, "attempting to encrypt without encryption key")) + return -BCH_ERR_no_encryption_key; + return do_encrypt(c->chacha20, nonce, data, len); } @@ -337,8 +342,9 @@ int __bch2_encrypt_bio(struct bch_fs *c, unsigned type, size_t sgl_len = 0; int ret = 0; - if (!bch2_csum_type_is_encryption(type)) - return 0; + if (bch2_fs_inconsistent_on(!c->chacha20, + c, "attempting to encrypt without encryption key")) + return -BCH_ERR_no_encryption_key; darray_init(&sgl); diff --git a/fs/bcachefs/checksum.h b/fs/bcachefs/checksum.h index e40499fde9a4..43b9d71f2f2b 100644 --- a/fs/bcachefs/checksum.h +++ b/fs/bcachefs/checksum.h @@ -109,7 +109,7 @@ int bch2_enable_encryption(struct bch_fs *, bool); void bch2_fs_encryption_exit(struct bch_fs *); int bch2_fs_encryption_init(struct bch_fs *); -static inline enum bch_csum_type bch2_csum_opt_to_type(enum bch_csum_opts type, +static inline enum bch_csum_type bch2_csum_opt_to_type(enum bch_csum_opt type, bool data) { switch (type) { diff --git a/fs/bcachefs/compress.c b/fs/bcachefs/compress.c index 1410365a8891..114bf2f3879f 100644 --- a/fs/bcachefs/compress.c +++ b/fs/bcachefs/compress.c @@ -2,13 +2,34 @@ #include "bcachefs.h" #include "checksum.h" #include "compress.h" +#include "error.h" #include "extents.h" +#include "io_write.h" +#include "opts.h" #include "super-io.h" #include <linux/lz4.h> #include <linux/zlib.h> #include <linux/zstd.h> +static inline enum bch_compression_opts bch2_compression_type_to_opt(enum bch_compression_type type) +{ + switch (type) { + case BCH_COMPRESSION_TYPE_none: + case BCH_COMPRESSION_TYPE_incompressible: + return BCH_COMPRESSION_OPT_none; + case BCH_COMPRESSION_TYPE_lz4_old: + case BCH_COMPRESSION_TYPE_lz4: + return BCH_COMPRESSION_OPT_lz4; + case BCH_COMPRESSION_TYPE_gzip: + return BCH_COMPRESSION_OPT_gzip; + case BCH_COMPRESSION_TYPE_zstd: + return BCH_COMPRESSION_OPT_zstd; + default: + BUG(); + } +} + /* Bounce buffer: */ struct bbuf { void *b; @@ -158,6 +179,19 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src, void *workspace; int ret; + enum bch_compression_opts opt = bch2_compression_type_to_opt(crc.compression_type); + mempool_t *workspace_pool = &c->compress_workspace[opt]; + if (unlikely(!mempool_initialized(workspace_pool))) { + if (fsck_err(c, compression_type_not_marked_in_sb, + "compression type %s set but not marked in superblock", + __bch2_compression_types[crc.compression_type])) + ret = bch2_check_set_has_compressed_data(c, opt); + else + ret = -BCH_ERR_compression_workspace_not_initialized; + if (ret) + goto out; + } + src_data = bio_map_or_bounce(c, src, READ); switch (crc.compression_type) { @@ -176,13 +210,13 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src, .avail_out = dst_len, }; - workspace = mempool_alloc(&c->decompress_workspace, GFP_NOFS); + workspace = mempool_alloc(workspace_pool, GFP_NOFS); zlib_set_workspace(&strm, workspace); zlib_inflateInit2(&strm, -MAX_WBITS); ret = zlib_inflate(&strm, Z_FINISH); - mempool_free(workspace, &c->decompress_workspace); + mempool_free(workspace, workspace_pool); if (ret != Z_STREAM_END) goto err; @@ -195,14 +229,14 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src, if (real_src_len > src_len - 4) goto err; - workspace = mempool_alloc(&c->decompress_workspace, GFP_NOFS); + workspace = mempool_alloc(workspace_pool, GFP_NOFS); ctx = zstd_init_dctx(workspace, zstd_dctx_workspace_bound()); ret = zstd_decompress_dctx(ctx, dst_data, dst_len, src_data.b + 4, real_src_len); - mempool_free(workspace, &c->decompress_workspace); + mempool_free(workspace, workspace_pool); if (ret != dst_len) goto err; @@ -212,6 +246,7 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src, BUG(); } ret = 0; +fsck_err: out: bio_unmap_or_unbounce(c, src_data); return ret; @@ -220,11 +255,14 @@ err: goto out; } -int bch2_bio_uncompress_inplace(struct bch_fs *c, struct bio *bio, - struct bch_extent_crc_unpacked *crc) +int bch2_bio_uncompress_inplace(struct bch_write_op *op, + struct bio *bio) { + struct bch_fs *c = op->c; + struct bch_extent_crc_unpacked *crc = &op->crc; struct bbuf data = { NULL }; size_t dst_len = crc->uncompressed_size << 9; + int ret = 0; /* bio must own its pages: */ BUG_ON(!bio->bi_vcnt); @@ -232,17 +270,26 @@ int bch2_bio_uncompress_inplace(struct bch_fs *c, struct bio *bio, if (crc->uncompressed_size << 9 > c->opts.encoded_extent_max || crc->compressed_size << 9 > c->opts.encoded_extent_max) { - bch_err(c, "error rewriting existing data: extent too big"); + struct printbuf buf = PRINTBUF; + bch2_write_op_error(&buf, op); + prt_printf(&buf, "error rewriting existing data: extent too big"); + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); return -EIO; } data = __bounce_alloc(c, dst_len, WRITE); if (__bio_uncompress(c, bio, data.b, *crc)) { - if (!c->opts.no_data_io) - bch_err(c, "error rewriting existing data: decompression error"); - bio_unmap_or_unbounce(c, data); - return -EIO; + if (!c->opts.no_data_io) { + struct printbuf buf = PRINTBUF; + bch2_write_op_error(&buf, op); + prt_printf(&buf, "error rewriting existing data: decompression error"); + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); + } + ret = -EIO; + goto err; } /* @@ -259,9 +306,9 @@ int bch2_bio_uncompress_inplace(struct bch_fs *c, struct bio *bio, crc->uncompressed_size = crc->live_size; crc->offset = 0; crc->csum = (struct bch_csum) { 0, 0 }; - +err: bio_unmap_or_unbounce(c, data); - return 0; + return ret; } int bch2_bio_uncompress(struct bch_fs *c, struct bio *src, @@ -394,8 +441,21 @@ static unsigned __bio_compress(struct bch_fs *c, unsigned pad; int ret = 0; - BUG_ON(compression_type >= BCH_COMPRESSION_TYPE_NR); - BUG_ON(!mempool_initialized(&c->compress_workspace[compression_type])); + /* bch2_compression_decode catches unknown compression types: */ + BUG_ON(compression.type >= BCH_COMPRESSION_OPT_NR); + + mempool_t *workspace_pool = &c->compress_workspace[compression.type]; + if (unlikely(!mempool_initialized(workspace_pool))) { + if (fsck_err(c, compression_opt_not_marked_in_sb, + "compression opt %s set but not marked in superblock", + bch2_compression_opts[compression.type])) { + ret = bch2_check_set_has_compressed_data(c, compression.type); + if (ret) /* memory allocation failure, don't compress */ + return 0; + } else { + return 0; + } + } /* If it's only one block, don't bother trying to compress: */ if (src->bi_iter.bi_size <= c->opts.block_size) @@ -404,7 +464,7 @@ static unsigned __bio_compress(struct bch_fs *c, dst_data = bio_map_or_bounce(c, dst, WRITE); src_data = bio_map_or_bounce(c, src, READ); - workspace = mempool_alloc(&c->compress_workspace[compression_type], GFP_NOFS); + workspace = mempool_alloc(workspace_pool, GFP_NOFS); *src_len = src->bi_iter.bi_size; *dst_len = dst->bi_iter.bi_size; @@ -447,7 +507,7 @@ static unsigned __bio_compress(struct bch_fs *c, *src_len = round_down(*src_len, block_bytes(c)); } - mempool_free(workspace, &c->compress_workspace[compression_type]); + mempool_free(workspace, workspace_pool); if (ret) goto err; @@ -477,6 +537,9 @@ out: err: ret = BCH_COMPRESSION_TYPE_incompressible; goto out; +fsck_err: + ret = 0; + goto out; } unsigned bch2_bio_compress(struct bch_fs *c, @@ -559,7 +622,6 @@ void bch2_fs_compress_exit(struct bch_fs *c) { unsigned i; - mempool_exit(&c->decompress_workspace); for (i = 0; i < ARRAY_SIZE(c->compress_workspace); i++) mempool_exit(&c->compress_workspace[i]); mempool_exit(&c->compression_bounce[WRITE]); @@ -568,7 +630,6 @@ void bch2_fs_compress_exit(struct bch_fs *c) static int __bch2_fs_compress_init(struct bch_fs *c, u64 features) { - size_t decompress_workspace_size = 0; ZSTD_parameters params = zstd_get_params(zstd_max_clevel(), c->opts.encoded_extent_max); @@ -576,19 +637,17 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features) struct { unsigned feature; - enum bch_compression_type type; + enum bch_compression_opts type; size_t compress_workspace; - size_t decompress_workspace; } compression_types[] = { - { BCH_FEATURE_lz4, BCH_COMPRESSION_TYPE_lz4, - max_t(size_t, LZ4_MEM_COMPRESS, LZ4HC_MEM_COMPRESS), - 0 }, - { BCH_FEATURE_gzip, BCH_COMPRESSION_TYPE_gzip, - zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL), - zlib_inflate_workspacesize(), }, - { BCH_FEATURE_zstd, BCH_COMPRESSION_TYPE_zstd, - c->zstd_workspace_size, - zstd_dctx_workspace_bound() }, + { BCH_FEATURE_lz4, BCH_COMPRESSION_OPT_lz4, + max_t(size_t, LZ4_MEM_COMPRESS, LZ4HC_MEM_COMPRESS) }, + { BCH_FEATURE_gzip, BCH_COMPRESSION_OPT_gzip, + max(zlib_deflate_workspacesize(MAX_WBITS, DEF_MEM_LEVEL), + zlib_inflate_workspacesize()) }, + { BCH_FEATURE_zstd, BCH_COMPRESSION_OPT_zstd, + max(c->zstd_workspace_size, + zstd_dctx_workspace_bound()) }, }, *i; bool have_compressed = false; @@ -613,9 +672,6 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features) for (i = compression_types; i < compression_types + ARRAY_SIZE(compression_types); i++) { - decompress_workspace_size = - max(decompress_workspace_size, i->decompress_workspace); - if (!(features & (1 << i->feature))) continue; @@ -628,11 +684,6 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features) return -BCH_ERR_ENOMEM_compression_workspace_init; } - if (!mempool_initialized(&c->decompress_workspace) && - mempool_init_kvmalloc_pool(&c->decompress_workspace, - 1, decompress_workspace_size)) - return -BCH_ERR_ENOMEM_decompression_workspace_init; - return 0; } diff --git a/fs/bcachefs/compress.h b/fs/bcachefs/compress.h index 607fd5e232c9..bec2f05bfd52 100644 --- a/fs/bcachefs/compress.h +++ b/fs/bcachefs/compress.h @@ -47,8 +47,8 @@ static inline enum bch_compression_type bch2_compression_opt_to_type(unsigned v) return __bch2_compression_opt_to_type[bch2_compression_decode(v).type]; } -int bch2_bio_uncompress_inplace(struct bch_fs *, struct bio *, - struct bch_extent_crc_unpacked *); +struct bch_write_op; +int bch2_bio_uncompress_inplace(struct bch_write_op *, struct bio *); int bch2_bio_uncompress(struct bch_fs *, struct bio *, struct bio *, struct bvec_iter, struct bch_extent_crc_unpacked); unsigned bch2_bio_compress(struct bch_fs *, struct bio *, size_t *, diff --git a/fs/bcachefs/darray.h b/fs/bcachefs/darray.h index 8f4c3f0665c4..c6151495985f 100644 --- a/fs/bcachefs/darray.h +++ b/fs/bcachefs/darray.h @@ -83,7 +83,7 @@ int __bch2_darray_resize_noprof(darray_char *, size_t, size_t, gfp_t); for (typeof(&(_d).data[0]) _i = (_d).data; _i < (_d).data + (_d).nr; _i++) #define darray_for_each_reverse(_d, _i) \ - for (typeof(&(_d).data[0]) _i = (_d).data + (_d).nr - 1; _i >= (_d).data; --_i) + for (typeof(&(_d).data[0]) _i = (_d).data + (_d).nr - 1; _i >= (_d).data && (_d).nr; --_i) #define darray_init(_d) \ do { \ diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c index 8e75a852b358..337494facac6 100644 --- a/fs/bcachefs/data_update.c +++ b/fs/bcachefs/data_update.c @@ -91,15 +91,28 @@ static bool bkey_nocow_lock(struct bch_fs *c, struct moving_context *ctxt, struc return true; } -static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k) +static noinline void trace_move_extent_finish2(struct data_update *u, + struct bkey_i *new, + struct bkey_i *insert) { - if (trace_move_extent_finish_enabled()) { - struct printbuf buf = PRINTBUF; + struct bch_fs *c = u->op.c; + struct printbuf buf = PRINTBUF; - bch2_bkey_val_to_text(&buf, c, k); - trace_move_extent_finish(c, buf.buf); - printbuf_exit(&buf); - } + prt_newline(&buf); + + bch2_data_update_to_text(&buf, u); + prt_newline(&buf); + + prt_str_indented(&buf, "new replicas:\t"); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new)); + prt_newline(&buf); + + prt_str_indented(&buf, "insert:\t"); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert)); + prt_newline(&buf); + + trace_move_extent_finish(c, buf.buf); + printbuf_exit(&buf); } static void trace_move_extent_fail2(struct data_update *m, @@ -110,11 +123,8 @@ static void trace_move_extent_fail2(struct data_update *m, { struct bch_fs *c = m->op.c; struct bkey_s_c old = bkey_i_to_s_c(m->k.k); - const union bch_extent_entry *entry; - struct bch_extent_ptr *ptr; - struct extent_ptr_decoded p; struct printbuf buf = PRINTBUF; - unsigned i, rewrites_found = 0; + unsigned rewrites_found = 0; if (!trace_move_extent_fail_enabled()) return; @@ -122,27 +132,25 @@ static void trace_move_extent_fail2(struct data_update *m, prt_str(&buf, msg); if (insert) { - i = 0; + const union bch_extent_entry *entry; + struct bch_extent_ptr *ptr; + struct extent_ptr_decoded p; + + unsigned ptr_bit = 1; bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) { - if (((1U << i) & m->data_opts.rewrite_ptrs) && + if ((ptr_bit & m->data_opts.rewrite_ptrs) && (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) && !ptr->cached) - rewrites_found |= 1U << i; - i++; + rewrites_found |= ptr_bit; + ptr_bit <<= 1; } } - prt_printf(&buf, "\nrewrite ptrs: %u%u%u%u", - (m->data_opts.rewrite_ptrs & (1 << 0)) != 0, - (m->data_opts.rewrite_ptrs & (1 << 1)) != 0, - (m->data_opts.rewrite_ptrs & (1 << 2)) != 0, - (m->data_opts.rewrite_ptrs & (1 << 3)) != 0); + prt_str(&buf, "rewrites found:\t"); + bch2_prt_u64_base2(&buf, rewrites_found); + prt_newline(&buf); - prt_printf(&buf, "\nrewrites found: %u%u%u%u", - (rewrites_found & (1 << 0)) != 0, - (rewrites_found & (1 << 1)) != 0, - (rewrites_found & (1 << 2)) != 0, - (rewrites_found & (1 << 3)) != 0); + bch2_data_update_opts_to_text(&buf, c, &m->op.opts, &m->data_opts); prt_str(&buf, "\nold: "); bch2_bkey_val_to_text(&buf, c, old); @@ -194,7 +202,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans, struct bpos next_pos; bool should_check_enospc; s64 i_sectors_delta = 0, disk_sectors_delta = 0; - unsigned rewrites_found = 0, durability, i; + unsigned rewrites_found = 0, durability, ptr_bit; bch2_trans_begin(trans); @@ -231,16 +239,16 @@ static int __bch2_data_update_index_update(struct btree_trans *trans, * * Fist, drop rewrite_ptrs from @new: */ - i = 0; + ptr_bit = 1; bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) { - if (((1U << i) & m->data_opts.rewrite_ptrs) && + if ((ptr_bit & m->data_opts.rewrite_ptrs) && (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) && !ptr->cached) { bch2_extent_ptr_set_cached(c, &m->op.opts, bkey_i_to_s(insert), ptr); - rewrites_found |= 1U << i; + rewrites_found |= ptr_bit; } - i++; + ptr_bit <<= 1; } if (m->data_opts.rewrite_ptrs && @@ -323,8 +331,11 @@ restart_drop_extra_replicas: * it's been hard to reproduce, so this should give us some more * information when it does occur: */ - int invalid = bch2_bkey_validate(c, bkey_i_to_s_c(insert), __btree_node_type(0, m->btree_id), - BCH_VALIDATE_commit); + int invalid = bch2_bkey_validate(c, bkey_i_to_s_c(insert), + (struct bkey_validate_context) { + .btree = m->btree_id, + .flags = BCH_VALIDATE_commit, + }); if (invalid) { struct printbuf buf = PRINTBUF; @@ -362,7 +373,7 @@ restart_drop_extra_replicas: k.k->p, bkey_start_pos(&insert->k)) ?: bch2_insert_snapshot_whiteouts(trans, m->btree_id, k.k->p, insert->k.p) ?: - bch2_bkey_set_needs_rebalance(c, insert, &op->opts) ?: + bch2_bkey_set_needs_rebalance(c, &op->opts, insert) ?: bch2_trans_update(trans, &iter, insert, BTREE_UPDATE_internal_snapshot_node) ?: bch2_trans_commit(trans, &op->res, @@ -374,7 +385,8 @@ restart_drop_extra_replicas: bch2_btree_iter_set_pos(&iter, next_pos); this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size); - trace_move_extent_finish2(c, bkey_i_to_s_c(&new->k_i)); + if (trace_move_extent_finish_enabled()) + trace_move_extent_finish2(m, &new->k_i, insert); } err: if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) @@ -527,34 +539,38 @@ void bch2_data_update_opts_to_text(struct printbuf *out, struct bch_fs *c, struct data_update_opts *data_opts) { printbuf_tabstop_push(out, 20); - prt_str(out, "rewrite ptrs:\t"); + + prt_str_indented(out, "rewrite ptrs:\t"); bch2_prt_u64_base2(out, data_opts->rewrite_ptrs); prt_newline(out); - prt_str(out, "kill ptrs:\t"); + prt_str_indented(out, "kill ptrs:\t"); bch2_prt_u64_base2(out, data_opts->kill_ptrs); prt_newline(out); - prt_str(out, "target:\t"); + prt_str_indented(out, "target:\t"); bch2_target_to_text(out, c, data_opts->target); prt_newline(out); - prt_str(out, "compression:\t"); - bch2_compression_opt_to_text(out, background_compression(*io_opts)); + prt_str_indented(out, "compression:\t"); + bch2_compression_opt_to_text(out, io_opts->background_compression); prt_newline(out); - prt_str(out, "opts.replicas:\t"); + prt_str_indented(out, "opts.replicas:\t"); prt_u64(out, io_opts->data_replicas); + prt_newline(out); - prt_str(out, "extra replicas:\t"); + prt_str_indented(out, "extra replicas:\t"); prt_u64(out, data_opts->extra_replicas); } void bch2_data_update_to_text(struct printbuf *out, struct data_update *m) { - bch2_bkey_val_to_text(out, m->op.c, bkey_i_to_s_c(m->k.k)); - prt_newline(out); bch2_data_update_opts_to_text(out, m->op.c, &m->op.opts, &m->data_opts); + prt_newline(out); + + prt_str_indented(out, "old key:\t"); + bch2_bkey_val_to_text(out, m->op.c, bkey_i_to_s_c(m->k.k)); } int bch2_extent_drop_ptrs(struct btree_trans *trans, @@ -614,7 +630,7 @@ int bch2_data_update_init(struct btree_trans *trans, struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); const union bch_extent_entry *entry; struct extent_ptr_decoded p; - unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas; + unsigned reserve_sectors = k.k->size * data_opts.extra_replicas; int ret = 0; /* @@ -622,7 +638,7 @@ int bch2_data_update_init(struct btree_trans *trans, * and we have to check for this because we go rw before repairing the * snapshots table - just skip it, we can move it later. */ - if (unlikely(k.k->p.snapshot && !bch2_snapshot_equiv(c, k.k->p.snapshot))) + if (unlikely(k.k->p.snapshot && !bch2_snapshot_exists(c, k.k->p.snapshot))) return -BCH_ERR_data_update_done; if (!bkey_get_dev_refs(c, k)) @@ -652,22 +668,22 @@ int bch2_data_update_init(struct btree_trans *trans, BCH_WRITE_DATA_ENCODED| BCH_WRITE_MOVE| m->data_opts.write_flags; - m->op.compression_opt = background_compression(io_opts); + m->op.compression_opt = io_opts.background_compression; m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK; unsigned durability_have = 0, durability_removing = 0; - i = 0; + unsigned ptr_bit = 1; bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { if (!p.ptr.cached) { rcu_read_lock(); - if (BIT(i) & m->data_opts.rewrite_ptrs) { + if (ptr_bit & m->data_opts.rewrite_ptrs) { if (crc_is_compressed(p.crc)) reserve_sectors += k.k->size; m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p); durability_removing += bch2_extent_ptr_desired_durability(c, &p); - } else if (!(BIT(i) & m->data_opts.kill_ptrs)) { + } else if (!(ptr_bit & m->data_opts.kill_ptrs)) { bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev); durability_have += bch2_extent_ptr_durability(c, &p); } @@ -687,7 +703,7 @@ int bch2_data_update_init(struct btree_trans *trans, if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible) m->op.incompressible = true; - i++; + ptr_bit <<= 1; } unsigned durability_required = max(0, (int) (io_opts.data_replicas - durability_have)); @@ -750,14 +766,14 @@ out: void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts) { struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); - unsigned i = 0; + unsigned ptr_bit = 1; bkey_for_each_ptr(ptrs, ptr) { - if ((opts->rewrite_ptrs & (1U << i)) && ptr->cached) { - opts->kill_ptrs |= 1U << i; - opts->rewrite_ptrs ^= 1U << i; + if ((opts->rewrite_ptrs & ptr_bit) && ptr->cached) { + opts->kill_ptrs |= ptr_bit; + opts->rewrite_ptrs ^= ptr_bit; } - i++; + ptr_bit <<= 1; } } diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c index 45aec1afdb0e..55333e82d1fe 100644 --- a/fs/bcachefs/debug.c +++ b/fs/bcachefs/debug.c @@ -20,6 +20,7 @@ #include "extents.h" #include "fsck.h" #include "inode.h" +#include "journal_reclaim.h" #include "super.h" #include <linux/console.h> @@ -472,7 +473,9 @@ static void bch2_cached_btree_node_to_text(struct printbuf *out, struct bch_fs * if (!out->nr_tabstops) printbuf_tabstop_push(out, 32); - prt_printf(out, "%px btree=%s l=%u\n", b, bch2_btree_id_str(b->c.btree_id), b->c.level); + prt_printf(out, "%px ", b); + bch2_btree_id_level_to_text(out, b->c.btree_id, b->c.level); + prt_printf(out, "\n"); printbuf_indent_add(out, 2); diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c index faffc98d5605..600eee936f13 100644 --- a/fs/bcachefs/dirent.c +++ b/fs/bcachefs/dirent.c @@ -101,7 +101,7 @@ const struct bch_hash_desc bch2_dirent_hash_desc = { }; int bch2_dirent_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k); struct qstr d_name = bch2_dirent_get_name(d); @@ -120,7 +120,7 @@ int bch2_dirent_validate(struct bch_fs *c, struct bkey_s_c k, * Check new keys don't exceed the max length * (older keys may be larger.) */ - bkey_fsck_err_on((flags & BCH_VALIDATE_commit) && d_name.len > BCH_NAME_MAX, + bkey_fsck_err_on((from.flags & BCH_VALIDATE_commit) && d_name.len > BCH_NAME_MAX, c, dirent_name_too_long, "dirent name too big (%u > %u)", d_name.len, BCH_NAME_MAX); @@ -266,7 +266,7 @@ int bch2_dirent_read_target(struct btree_trans *trans, subvol_inum dir, } else { target->subvol = le32_to_cpu(d.v->d_child_subvol); - ret = bch2_subvolume_get(trans, target->subvol, true, BTREE_ITER_cached, &s); + ret = bch2_subvolume_get(trans, target->subvol, true, &s); target->inum = le64_to_cpu(s.inode); } @@ -500,7 +500,7 @@ int bch2_empty_dir_snapshot(struct btree_trans *trans, u64 dir, u32 subvol, u32 struct bkey_s_c k; int ret; - for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_dirents, + for_each_btree_key_max_norestart(trans, iter, BTREE_ID_dirents, SPOS(dir, 0, snapshot), POS(dir, U64_MAX), 0, k, ret) if (k.k->type == KEY_TYPE_dirent) { @@ -549,7 +549,7 @@ int bch2_readdir(struct bch_fs *c, subvol_inum inum, struct dir_context *ctx) bch2_bkey_buf_init(&sk); int ret = bch2_trans_run(c, - for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_dirents, + for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_dirents, POS(inum.inum, ctx->pos), POS(inum.inum, U64_MAX), inum.subvol, 0, k, ({ diff --git a/fs/bcachefs/dirent.h b/fs/bcachefs/dirent.h index 53ad99666022..a633f83c1ac7 100644 --- a/fs/bcachefs/dirent.h +++ b/fs/bcachefs/dirent.h @@ -4,10 +4,10 @@ #include "str_hash.h" -enum bch_validate_flags; extern const struct bch_hash_desc bch2_dirent_hash_desc; -int bch2_dirent_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_dirent_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); void bch2_dirent_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); #define bch2_bkey_ops_dirent ((struct bkey_ops) { \ @@ -31,6 +31,11 @@ static inline unsigned dirent_val_u64s(unsigned len) sizeof(u64)); } +static inline unsigned int dirent_occupied_size(const struct qstr *name) +{ + return (BKEY_U64s + dirent_val_u64s(name->len)) * sizeof(u64); +} + int bch2_dirent_read_target(struct btree_trans *, subvol_inum, struct bkey_s_c_dirent, subvol_inum *); diff --git a/fs/bcachefs/disk_accounting.c b/fs/bcachefs/disk_accounting.c index 07eb8fa1b026..b32e91ba8be8 100644 --- a/fs/bcachefs/disk_accounting.c +++ b/fs/bcachefs/disk_accounting.c @@ -79,6 +79,8 @@ static inline void accounting_key_init(struct bkey_i *k, struct disk_accounting_ memcpy_u64s_small(acc->v.d, d, nr); } +static int bch2_accounting_update_sb_one(struct bch_fs *, struct bpos); + int bch2_disk_accounting_mod(struct btree_trans *trans, struct disk_accounting_pos *k, s64 *d, unsigned nr, bool gc) @@ -96,9 +98,16 @@ int bch2_disk_accounting_mod(struct btree_trans *trans, accounting_key_init(&k_i.k, k, d, nr); - return likely(!gc) - ? bch2_trans_update_buffered(trans, BTREE_ID_accounting, &k_i.k) - : bch2_accounting_mem_add(trans, bkey_i_to_s_c_accounting(&k_i.k), true); + if (unlikely(gc)) { + int ret = bch2_accounting_mem_add(trans, bkey_i_to_s_c_accounting(&k_i.k), true); + if (ret == -BCH_ERR_btree_insert_need_mark_replicas) + ret = drop_locks_do(trans, + bch2_accounting_update_sb_one(trans->c, disk_accounting_pos_to_bpos(k))) ?: + bch2_accounting_mem_add(trans, bkey_i_to_s_c_accounting(&k_i.k), true); + return ret; + } else { + return bch2_trans_update_buffered(trans, BTREE_ID_accounting, &k_i.k); + } } int bch2_mod_dev_cached_sectors(struct btree_trans *trans, @@ -127,14 +136,15 @@ static inline bool is_zero(char *start, char *end) #define field_end(p, member) (((void *) (&p.member)) + sizeof(p.member)) int bch2_accounting_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct disk_accounting_pos acc_k; bpos_to_disk_accounting_pos(&acc_k, k.k->p); void *end = &acc_k + 1; int ret = 0; - bkey_fsck_err_on(bversion_zero(k.k->bversion), + bkey_fsck_err_on((from.flags & BCH_VALIDATE_commit) && + bversion_zero(k.k->bversion), c, accounting_key_version_0, "accounting key with version=0"); @@ -217,7 +227,8 @@ void bch2_accounting_key_to_text(struct printbuf *out, struct disk_accounting_po prt_printf(out, "id=%u", k->snapshot.id); break; case BCH_DISK_ACCOUNTING_btree: - prt_printf(out, "btree=%s", bch2_btree_id_str(k->btree.id)); + prt_str(out, "btree="); + bch2_btree_id_to_text(out, k->btree.id); break; } } @@ -243,10 +254,10 @@ void bch2_accounting_swab(struct bkey_s k) } static inline void __accounting_to_replicas(struct bch_replicas_entry_v1 *r, - struct disk_accounting_pos acc) + struct disk_accounting_pos *acc) { - unsafe_memcpy(r, &acc.replicas, - replicas_entry_bytes(&acc.replicas), + unsafe_memcpy(r, &acc->replicas, + replicas_entry_bytes(&acc->replicas), "variable length struct"); } @@ -257,7 +268,7 @@ static inline bool accounting_to_replicas(struct bch_replicas_entry_v1 *r, struc switch (acc_k.type) { case BCH_DISK_ACCOUNTING_replicas: - __accounting_to_replicas(r, acc_k); + __accounting_to_replicas(r, &acc_k); return true; default: return false; @@ -322,6 +333,14 @@ static int __bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accoun eytzinger0_sort(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]), accounting_pos_cmp, NULL); + + if (trace_accounting_mem_insert_enabled()) { + struct printbuf buf = PRINTBUF; + + bch2_accounting_to_text(&buf, c, a.s_c); + trace_accounting_mem_insert(c, buf.buf); + printbuf_exit(&buf); + } return 0; err: free_percpu(n.v[1]); @@ -461,32 +480,6 @@ int bch2_fs_accounting_read(struct bch_fs *c, darray_char *out_buf, unsigned acc return ret; } -void bch2_fs_accounting_to_text(struct printbuf *out, struct bch_fs *c) -{ - struct bch_accounting_mem *acc = &c->accounting; - - percpu_down_read(&c->mark_lock); - out->atomic++; - - eytzinger0_for_each(i, acc->k.nr) { - struct disk_accounting_pos acc_k; - bpos_to_disk_accounting_pos(&acc_k, acc->k.data[i].pos); - - bch2_accounting_key_to_text(out, &acc_k); - - u64 v[BCH_ACCOUNTING_MAX_COUNTERS]; - bch2_accounting_mem_read_counters(acc, i, v, ARRAY_SIZE(v), false); - - prt_str(out, ":"); - for (unsigned j = 0; j < acc->k.data[i].nr_counters; j++) - prt_printf(out, " %llu", v[j]); - prt_newline(out); - } - - --out->atomic; - percpu_up_read(&c->mark_lock); -} - static void bch2_accounting_free_counters(struct bch_accounting_mem *acc, bool gc) { darray_for_each(acc->k, e) { @@ -625,7 +618,7 @@ static int bch2_disk_accounting_validate_late(struct btree_trans *trans, switch (acc.type) { case BCH_DISK_ACCOUNTING_replicas: { struct bch_replicas_padded r; - __accounting_to_replicas(&r.e, acc); + __accounting_to_replicas(&r.e, &acc); for (unsigned i = 0; i < r.e.nr_devs; i++) if (r.e.devs[i] != BCH_SB_MEMBER_INVALID && @@ -699,11 +692,45 @@ int bch2_accounting_read(struct bch_fs *c) struct btree_trans *trans = bch2_trans_get(c); struct printbuf buf = PRINTBUF; - int ret = for_each_btree_key(trans, iter, - BTREE_ID_accounting, POS_MIN, + /* + * We might run more than once if we rewind to start topology repair or + * btree node scan - and those might cause us to get different results, + * so we can't just skip if we've already run. + * + * Instead, zero out any accounting we have: + */ + percpu_down_write(&c->mark_lock); + darray_for_each(acc->k, e) + percpu_memset(e->v[0], 0, sizeof(u64) * e->nr_counters); + for_each_member_device(c, ca) + percpu_memset(ca->usage, 0, sizeof(*ca->usage)); + percpu_memset(c->usage, 0, sizeof(*c->usage)); + percpu_up_write(&c->mark_lock); + + struct btree_iter iter; + bch2_trans_iter_init(trans, &iter, BTREE_ID_accounting, POS_MIN, + BTREE_ITER_prefetch|BTREE_ITER_all_snapshots); + iter.flags &= ~BTREE_ITER_with_journal; + int ret = for_each_btree_key_continue(trans, iter, BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, ({ struct bkey u; struct bkey_s_c k = bch2_btree_path_peek_slot_exact(btree_iter_path(trans, &iter), &u); + + if (k.k->type != KEY_TYPE_accounting) + continue; + + struct disk_accounting_pos acc_k; + bpos_to_disk_accounting_pos(&acc_k, k.k->p); + + if (acc_k.type >= BCH_DISK_ACCOUNTING_TYPE_NR) + break; + + if (!bch2_accounting_is_mem(acc_k)) { + struct disk_accounting_pos next = { .type = acc_k.type + 1 }; + bch2_btree_iter_set_pos(&iter, disk_accounting_pos_to_bpos(&next)); + continue; + } + accounting_read_key(trans, k); })); if (ret) @@ -715,6 +742,12 @@ int bch2_accounting_read(struct bch_fs *c) darray_for_each(*keys, i) { if (i->k->k.type == KEY_TYPE_accounting) { + struct disk_accounting_pos acc_k; + bpos_to_disk_accounting_pos(&acc_k, i->k->k.p); + + if (!bch2_accounting_is_mem(acc_k)) + continue; + struct bkey_s_c k = bkey_i_to_s_c(i->k); unsigned idx = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]), @@ -748,15 +781,16 @@ int bch2_accounting_read(struct bch_fs *c) keys->gap = keys->nr = dst - keys->data; percpu_down_write(&c->mark_lock); - unsigned i = 0; - while (i < acc->k.nr) { - unsigned idx = inorder_to_eytzinger0(i, acc->k.nr); + darray_for_each_reverse(acc->k, i) { struct disk_accounting_pos acc_k; - bpos_to_disk_accounting_pos(&acc_k, acc->k.data[idx].pos); + bpos_to_disk_accounting_pos(&acc_k, i->pos); u64 v[BCH_ACCOUNTING_MAX_COUNTERS]; - bch2_accounting_mem_read_counters(acc, idx, v, ARRAY_SIZE(v), false); + memset(v, 0, sizeof(v)); + + for (unsigned j = 0; j < i->nr_counters; j++) + v[j] = percpu_u64_get(i->v[0] + j); /* * If the entry counters are zeroed, it should be treated as @@ -765,26 +799,25 @@ int bch2_accounting_read(struct bch_fs *c) * Remove it, so that if it's re-added it gets re-marked in the * superblock: */ - ret = bch2_is_zero(v, sizeof(v[0]) * acc->k.data[idx].nr_counters) + ret = bch2_is_zero(v, sizeof(v[0]) * i->nr_counters) ? -BCH_ERR_remove_disk_accounting_entry - : bch2_disk_accounting_validate_late(trans, acc_k, - v, acc->k.data[idx].nr_counters); + : bch2_disk_accounting_validate_late(trans, acc_k, v, i->nr_counters); if (ret == -BCH_ERR_remove_disk_accounting_entry) { - free_percpu(acc->k.data[idx].v[0]); - free_percpu(acc->k.data[idx].v[1]); - darray_remove_item(&acc->k, &acc->k.data[idx]); - eytzinger0_sort(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]), - accounting_pos_cmp, NULL); + free_percpu(i->v[0]); + free_percpu(i->v[1]); + darray_remove_item(&acc->k, i); ret = 0; continue; } if (ret) goto fsck_err; - i++; } + eytzinger0_sort(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]), + accounting_pos_cmp, NULL); + preempt_disable(); struct bch_fs_usage_base *usage = this_cpu_ptr(c->usage); @@ -804,7 +837,7 @@ int bch2_accounting_read(struct bch_fs *c) break; case BCH_DISK_ACCOUNTING_dev_data_type: rcu_read_lock(); - struct bch_dev *ca = bch2_dev_rcu(c, k.dev_data_type.dev); + struct bch_dev *ca = bch2_dev_rcu_noerror(c, k.dev_data_type.dev); if (ca) { struct bch_dev_usage_type __percpu *d = &ca->usage->d[k.dev_data_type.data_type]; percpu_u64_set(&d->buckets, v[0]); @@ -881,10 +914,13 @@ void bch2_verify_accounting_clean(struct bch_fs *c) bpos_to_disk_accounting_pos(&acc_k, k.k->p); if (acc_k.type >= BCH_DISK_ACCOUNTING_TYPE_NR) - continue; + break; - if (acc_k.type == BCH_DISK_ACCOUNTING_inum) + if (!bch2_accounting_is_mem(acc_k)) { + struct disk_accounting_pos next = { .type = acc_k.type + 1 }; + bch2_btree_iter_set_pos(&iter, disk_accounting_pos_to_bpos(&next)); continue; + } bch2_accounting_mem_read(c, k.k->p, v, nr); @@ -910,7 +946,7 @@ void bch2_verify_accounting_clean(struct bch_fs *c) break; case BCH_DISK_ACCOUNTING_dev_data_type: { rcu_read_lock(); - struct bch_dev *ca = bch2_dev_rcu(c, acc_k.dev_data_type.dev); + struct bch_dev *ca = bch2_dev_rcu_noerror(c, acc_k.dev_data_type.dev); if (!ca) { rcu_read_unlock(); continue; diff --git a/fs/bcachefs/disk_accounting.h b/fs/bcachefs/disk_accounting.h index 4ea6c8a092bc..f4372cafea2e 100644 --- a/fs/bcachefs/disk_accounting.h +++ b/fs/bcachefs/disk_accounting.h @@ -2,6 +2,7 @@ #ifndef _BCACHEFS_DISK_ACCOUNTING_H #define _BCACHEFS_DISK_ACCOUNTING_H +#include "btree_update.h" #include "eytzinger.h" #include "sb-members.h" @@ -62,27 +63,32 @@ static inline void fs_usage_data_type_to_base(struct bch_fs_usage_base *fs_usage static inline void bpos_to_disk_accounting_pos(struct disk_accounting_pos *acc, struct bpos p) { - acc->_pad = p; + BUILD_BUG_ON(sizeof(*acc) != sizeof(p)); + #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ - bch2_bpos_swab(&acc->_pad); + acc->_pad = p; +#else + memcpy_swab(acc, &p, sizeof(p)); #endif } -static inline struct bpos disk_accounting_pos_to_bpos(struct disk_accounting_pos *k) +static inline struct bpos disk_accounting_pos_to_bpos(struct disk_accounting_pos *acc) { - struct bpos ret = k->_pad; - + struct bpos p; #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ - bch2_bpos_swab(&ret); + p = acc->_pad; +#else + memcpy_swab(&p, acc, sizeof(p)); #endif - return ret; + return p; } int bch2_disk_accounting_mod(struct btree_trans *, struct disk_accounting_pos *, s64 *, unsigned, bool); int bch2_mod_dev_cached_sectors(struct btree_trans *, unsigned, s64, bool); -int bch2_accounting_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_accounting_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); void bch2_accounting_key_to_text(struct printbuf *, struct disk_accounting_pos *); void bch2_accounting_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); void bch2_accounting_swab(struct bkey_s); @@ -112,6 +118,12 @@ enum bch_accounting_mode { int bch2_accounting_mem_insert(struct bch_fs *, struct bkey_s_c_accounting, enum bch_accounting_mode); void bch2_accounting_mem_gc(struct bch_fs *); +static inline bool bch2_accounting_is_mem(struct disk_accounting_pos acc) +{ + return acc.type < BCH_DISK_ACCOUNTING_TYPE_NR && + acc.type != BCH_DISK_ACCOUNTING_inum; +} + /* * Update in memory counters so they match the btree update we're doing; called * from transaction commit path @@ -126,9 +138,10 @@ static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, bpos_to_disk_accounting_pos(&acc_k, a.k->p); bool gc = mode == BCH_ACCOUNTING_gc; - EBUG_ON(gc && !acc->gc_running); + if (gc && !acc->gc_running) + return 0; - if (acc_k.type == BCH_DISK_ACCOUNTING_inum) + if (!bch2_accounting_is_mem(acc_k)) return 0; if (mode == BCH_ACCOUNTING_normal) { @@ -141,7 +154,7 @@ static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, break; case BCH_DISK_ACCOUNTING_dev_data_type: rcu_read_lock(); - struct bch_dev *ca = bch2_dev_rcu(c, acc_k.dev_data_type.dev); + struct bch_dev *ca = bch2_dev_rcu_noerror(c, acc_k.dev_data_type.dev); if (ca) { this_cpu_add(ca->usage->d[acc_k.dev_data_type.data_type].buckets, a.v->d[0]); this_cpu_add(ca->usage->d[acc_k.dev_data_type.data_type].sectors, a.v->d[1]); @@ -197,16 +210,54 @@ static inline void bch2_accounting_mem_read_counters(struct bch_accounting_mem * static inline void bch2_accounting_mem_read(struct bch_fs *c, struct bpos p, u64 *v, unsigned nr) { + percpu_down_read(&c->mark_lock); struct bch_accounting_mem *acc = &c->accounting; unsigned idx = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]), accounting_pos_cmp, &p); bch2_accounting_mem_read_counters(acc, idx, v, nr, false); + percpu_up_read(&c->mark_lock); +} + +static inline struct bversion journal_pos_to_bversion(struct journal_res *res, unsigned offset) +{ + EBUG_ON(!res->ref); + + return (struct bversion) { + .hi = res->seq >> 32, + .lo = (res->seq << 32) | (res->offset + offset), + }; +} + +static inline int bch2_accounting_trans_commit_hook(struct btree_trans *trans, + struct bkey_i_accounting *a, + unsigned commit_flags) +{ + a->k.bversion = journal_pos_to_bversion(&trans->journal_res, + (u64 *) a - (u64 *) trans->journal_entries); + + EBUG_ON(bversion_zero(a->k.bversion)); + + return likely(!(commit_flags & BCH_TRANS_COMMIT_skip_accounting_apply)) + ? bch2_accounting_mem_mod_locked(trans, accounting_i_to_s_c(a), BCH_ACCOUNTING_normal) + : 0; +} + +static inline void bch2_accounting_trans_commit_revert(struct btree_trans *trans, + struct bkey_i_accounting *a_i, + unsigned commit_flags) +{ + if (likely(!(commit_flags & BCH_TRANS_COMMIT_skip_accounting_apply))) { + struct bkey_s_accounting a = accounting_i_to_s(a_i); + + bch2_accounting_neg(a); + bch2_accounting_mem_mod_locked(trans, a.c, BCH_ACCOUNTING_normal); + bch2_accounting_neg(a); + } } int bch2_fs_replicas_usage_read(struct bch_fs *, darray_char *); int bch2_fs_accounting_read(struct bch_fs *, darray_char *, unsigned); -void bch2_fs_accounting_to_text(struct printbuf *, struct bch_fs *); int bch2_gc_accounting_start(struct bch_fs *); int bch2_gc_accounting_done(struct bch_fs *); diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c index 6094afb0c6be..d2a5e76e6479 100644 --- a/fs/bcachefs/ec.c +++ b/fs/bcachefs/ec.c @@ -26,6 +26,7 @@ #include "util.h" #include <linux/sort.h> +#include <linux/string_choices.h> #ifdef __KERNEL__ @@ -109,7 +110,7 @@ struct ec_bio { /* Stripes btree keys: */ int bch2_stripe_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { const struct bch_stripe *s = bkey_s_c_to_stripe(k).v; int ret = 0; @@ -129,7 +130,7 @@ int bch2_stripe_validate(struct bch_fs *c, struct bkey_s_c k, "invalid csum granularity (%u >= 64)", s->csum_granularity_bits); - ret = bch2_bkey_ptrs_validate(c, k, flags); + ret = bch2_bkey_ptrs_validate(c, k, from); fsck_err: return ret; } @@ -304,13 +305,12 @@ static int mark_stripe_bucket(struct btree_trans *trans, } if (flags & BTREE_TRIGGER_gc) { - percpu_down_read(&c->mark_lock); struct bucket *g = gc_bucket(ca, bucket.offset); if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s", ptr->dev, (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) { ret = -BCH_ERR_mark_stripe; - goto err_unlock; + goto err; } bucket_lock(g); @@ -318,8 +318,7 @@ static int mark_stripe_bucket(struct btree_trans *trans, ret = __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &new, flags); alloc_to_bucket(g, new); bucket_unlock(g); -err_unlock: - percpu_up_read(&c->mark_lock); + if (!ret) ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags); } @@ -732,7 +731,7 @@ static void ec_block_endio(struct bio *bio) ? BCH_MEMBER_ERROR_write : BCH_MEMBER_ERROR_read, "erasure coding %s error: %s", - bio_data_dir(bio) ? "write" : "read", + str_write_read(bio_data_dir(bio)), bch2_blk_status_to_str(bio->bi_status))) clear_bit(ec_bio->idx, ec_bio->buf->valid); @@ -909,7 +908,7 @@ err: bch2_bkey_val_to_text(&msgbuf, c, orig_k); bch_err_ratelimited(c, "error doing reconstruct read: %s\n %s", msg, msgbuf.buf); - printbuf_exit(&msgbuf);; + printbuf_exit(&msgbuf); ret = -BCH_ERR_stripe_reconstruct; goto out; } @@ -1266,11 +1265,11 @@ static int ec_stripe_update_extent(struct btree_trans *trans, struct bch_dev *ca, struct bpos bucket, u8 gen, struct ec_stripe_buf *s, - struct bpos *bp_pos) + struct bkey_s_c_backpointer bp, + struct bkey_buf *last_flushed) { struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; struct bch_fs *c = trans->c; - struct bch_backpointer bp; struct btree_iter iter; struct bkey_s_c k; const struct bch_extent_ptr *ptr_c; @@ -1279,33 +1278,26 @@ static int ec_stripe_update_extent(struct btree_trans *trans, struct bkey_i *n; int ret, dev, block; - ret = bch2_get_next_backpointer(trans, ca, bucket, gen, - bp_pos, &bp, BTREE_ITER_cached); - if (ret) - return ret; - if (bpos_eq(*bp_pos, SPOS_MAX)) - return 0; - - if (bp.level) { + if (bp.v->level) { struct printbuf buf = PRINTBUF; struct btree_iter node_iter; struct btree *b; - b = bch2_backpointer_get_node(trans, &node_iter, *bp_pos, bp); + b = bch2_backpointer_get_node(trans, bp, &node_iter, last_flushed); bch2_trans_iter_exit(trans, &node_iter); if (!b) return 0; prt_printf(&buf, "found btree node in erasure coded bucket: b=%px\n", b); - bch2_backpointer_to_text(&buf, &bp); + bch2_bkey_val_to_text(&buf, c, bp.s_c); bch2_fs_inconsistent(c, "%s", buf.buf); printbuf_exit(&buf); return -EIO; } - k = bch2_backpointer_get_key(trans, &iter, *bp_pos, bp, BTREE_ITER_intent); + k = bch2_backpointer_get_key(trans, bp, &iter, BTREE_ITER_intent, last_flushed); ret = bkey_err(k); if (ret) return ret; @@ -1364,7 +1356,6 @@ static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_b struct bch_fs *c = trans->c; struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; struct bch_extent_ptr ptr = v->ptrs[block]; - struct bpos bp_pos = POS_MIN; int ret = 0; struct bch_dev *ca = bch2_dev_tryget(c, ptr.dev); @@ -1373,19 +1364,27 @@ static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_b struct bpos bucket_pos = PTR_BUCKET_POS(ca, &ptr); - while (1) { - ret = commit_do(trans, NULL, NULL, - BCH_TRANS_COMMIT_no_check_rw| - BCH_TRANS_COMMIT_no_enospc, - ec_stripe_update_extent(trans, ca, bucket_pos, ptr.gen, s, &bp_pos)); - if (ret) - break; - if (bkey_eq(bp_pos, POS_MAX)) + struct bkey_buf last_flushed; + bch2_bkey_buf_init(&last_flushed); + bkey_init(&last_flushed.k->k); + + ret = for_each_btree_key_max_commit(trans, bp_iter, BTREE_ID_backpointers, + bucket_pos_to_bp_start(ca, bucket_pos), + bucket_pos_to_bp_end(ca, bucket_pos), 0, bp_k, + NULL, NULL, + BCH_TRANS_COMMIT_no_check_rw| + BCH_TRANS_COMMIT_no_enospc, ({ + if (bkey_ge(bp_k.k->p, bucket_pos_to_bp(ca, bpos_nosnap_successor(bucket_pos), 0))) break; - bp_pos = bpos_nosnap_successor(bp_pos); - } + if (bp_k.k->type != KEY_TYPE_backpointer) + continue; + ec_stripe_update_extent(trans, ca, bucket_pos, ptr.gen, s, + bkey_s_c_to_backpointer(bp_k), &last_flushed); + })); + + bch2_bkey_buf_exit(&last_flushed, c); bch2_dev_put(ca); return ret; } @@ -1707,7 +1706,7 @@ static void ec_stripe_key_init(struct bch_fs *c, set_bkey_val_u64s(&s->k, u64s); } -static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h) +static struct ec_stripe_new *ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h) { struct ec_stripe_new *s; @@ -1715,7 +1714,7 @@ static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h) s = kzalloc(sizeof(*s), GFP_KERNEL); if (!s) - return -BCH_ERR_ENOMEM_ec_new_stripe_alloc; + return NULL; mutex_init(&s->lock); closure_init(&s->iodone, NULL); @@ -1730,10 +1729,7 @@ static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h) ec_stripe_key_init(c, &s->new_stripe.key, s->nr_data, s->nr_parity, h->blocksize, h->disk_label); - - h->s = s; - h->nr_created++; - return 0; + return s; } static void ec_stripe_head_devs_update(struct bch_fs *c, struct ec_stripe_head *h) @@ -1878,25 +1874,26 @@ err: return h; } -static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_head *h, +static int new_stripe_alloc_buckets(struct btree_trans *trans, + struct ec_stripe_head *h, struct ec_stripe_new *s, enum bch_watermark watermark, struct closure *cl) { struct bch_fs *c = trans->c; struct bch_devs_mask devs = h->devs; struct open_bucket *ob; struct open_buckets buckets; - struct bch_stripe *v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v; + struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v; unsigned i, j, nr_have_parity = 0, nr_have_data = 0; bool have_cache = true; int ret = 0; - BUG_ON(v->nr_blocks != h->s->nr_data + h->s->nr_parity); - BUG_ON(v->nr_redundant != h->s->nr_parity); + BUG_ON(v->nr_blocks != s->nr_data + s->nr_parity); + BUG_ON(v->nr_redundant != s->nr_parity); /* * We bypass the sector allocator which normally does this: */ bitmap_and(devs.d, devs.d, c->rw_devs[BCH_DATA_user].d, BCH_SB_MEMBERS_MAX); - for_each_set_bit(i, h->s->blocks_gotten, v->nr_blocks) { + for_each_set_bit(i, s->blocks_gotten, v->nr_blocks) { /* * Note: we don't yet repair invalid blocks (failed/removed * devices) when reusing stripes - we still need a codepath to @@ -1906,21 +1903,21 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_ if (v->ptrs[i].dev != BCH_SB_MEMBER_INVALID) __clear_bit(v->ptrs[i].dev, devs.d); - if (i < h->s->nr_data) + if (i < s->nr_data) nr_have_data++; else nr_have_parity++; } - BUG_ON(nr_have_data > h->s->nr_data); - BUG_ON(nr_have_parity > h->s->nr_parity); + BUG_ON(nr_have_data > s->nr_data); + BUG_ON(nr_have_parity > s->nr_parity); buckets.nr = 0; - if (nr_have_parity < h->s->nr_parity) { + if (nr_have_parity < s->nr_parity) { ret = bch2_bucket_alloc_set_trans(trans, &buckets, &h->parity_stripe, &devs, - h->s->nr_parity, + s->nr_parity, &nr_have_parity, &have_cache, 0, BCH_DATA_parity, @@ -1928,14 +1925,14 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_ cl); open_bucket_for_each(c, &buckets, ob, i) { - j = find_next_zero_bit(h->s->blocks_gotten, - h->s->nr_data + h->s->nr_parity, - h->s->nr_data); - BUG_ON(j >= h->s->nr_data + h->s->nr_parity); + j = find_next_zero_bit(s->blocks_gotten, + s->nr_data + s->nr_parity, + s->nr_data); + BUG_ON(j >= s->nr_data + s->nr_parity); - h->s->blocks[j] = buckets.v[i]; + s->blocks[j] = buckets.v[i]; v->ptrs[j] = bch2_ob_ptr(c, ob); - __set_bit(j, h->s->blocks_gotten); + __set_bit(j, s->blocks_gotten); } if (ret) @@ -1943,11 +1940,11 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_ } buckets.nr = 0; - if (nr_have_data < h->s->nr_data) { + if (nr_have_data < s->nr_data) { ret = bch2_bucket_alloc_set_trans(trans, &buckets, &h->block_stripe, &devs, - h->s->nr_data, + s->nr_data, &nr_have_data, &have_cache, 0, BCH_DATA_user, @@ -1955,13 +1952,13 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_ cl); open_bucket_for_each(c, &buckets, ob, i) { - j = find_next_zero_bit(h->s->blocks_gotten, - h->s->nr_data, 0); - BUG_ON(j >= h->s->nr_data); + j = find_next_zero_bit(s->blocks_gotten, + s->nr_data, 0); + BUG_ON(j >= s->nr_data); - h->s->blocks[j] = buckets.v[i]; + s->blocks[j] = buckets.v[i]; v->ptrs[j] = bch2_ob_ptr(c, ob); - __set_bit(j, h->s->blocks_gotten); + __set_bit(j, s->blocks_gotten); } if (ret) @@ -2007,73 +2004,78 @@ static s64 get_existing_stripe(struct bch_fs *c, return ret; } -static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stripe_head *h) +static int init_new_stripe_from_existing(struct bch_fs *c, struct ec_stripe_new *s) { - struct bch_fs *c = trans->c; - struct bch_stripe *new_v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v; - struct bch_stripe *existing_v; + struct bch_stripe *new_v = &bkey_i_to_stripe(&s->new_stripe.key)->v; + struct bch_stripe *existing_v = &bkey_i_to_stripe(&s->existing_stripe.key)->v; unsigned i; - s64 idx; - int ret; - - /* - * If we can't allocate a new stripe, and there's no stripes with empty - * blocks for us to reuse, that means we have to wait on copygc: - */ - idx = get_existing_stripe(c, h); - if (idx < 0) - return -BCH_ERR_stripe_alloc_blocked; - - ret = get_stripe_key_trans(trans, idx, &h->s->existing_stripe); - bch2_fs_fatal_err_on(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart), c, - "reading stripe key: %s", bch2_err_str(ret)); - if (ret) { - bch2_stripe_close(c, h->s); - return ret; - } - existing_v = &bkey_i_to_stripe(&h->s->existing_stripe.key)->v; - - BUG_ON(existing_v->nr_redundant != h->s->nr_parity); - h->s->nr_data = existing_v->nr_blocks - + BUG_ON(existing_v->nr_redundant != s->nr_parity); + s->nr_data = existing_v->nr_blocks - existing_v->nr_redundant; - ret = ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize); + int ret = ec_stripe_buf_init(&s->existing_stripe, 0, le16_to_cpu(existing_v->sectors)); if (ret) { - bch2_stripe_close(c, h->s); + bch2_stripe_close(c, s); return ret; } - BUG_ON(h->s->existing_stripe.size != h->blocksize); - BUG_ON(h->s->existing_stripe.size != le16_to_cpu(existing_v->sectors)); + BUG_ON(s->existing_stripe.size != le16_to_cpu(existing_v->sectors)); /* * Free buckets we initially allocated - they might conflict with * blocks from the stripe we're reusing: */ - for_each_set_bit(i, h->s->blocks_gotten, new_v->nr_blocks) { - bch2_open_bucket_put(c, c->open_buckets + h->s->blocks[i]); - h->s->blocks[i] = 0; + for_each_set_bit(i, s->blocks_gotten, new_v->nr_blocks) { + bch2_open_bucket_put(c, c->open_buckets + s->blocks[i]); + s->blocks[i] = 0; } - memset(h->s->blocks_gotten, 0, sizeof(h->s->blocks_gotten)); - memset(h->s->blocks_allocated, 0, sizeof(h->s->blocks_allocated)); + memset(s->blocks_gotten, 0, sizeof(s->blocks_gotten)); + memset(s->blocks_allocated, 0, sizeof(s->blocks_allocated)); - for (i = 0; i < existing_v->nr_blocks; i++) { + for (unsigned i = 0; i < existing_v->nr_blocks; i++) { if (stripe_blockcount_get(existing_v, i)) { - __set_bit(i, h->s->blocks_gotten); - __set_bit(i, h->s->blocks_allocated); + __set_bit(i, s->blocks_gotten); + __set_bit(i, s->blocks_allocated); } - ec_block_io(c, &h->s->existing_stripe, READ, i, &h->s->iodone); + ec_block_io(c, &s->existing_stripe, READ, i, &s->iodone); } - bkey_copy(&h->s->new_stripe.key, &h->s->existing_stripe.key); - h->s->have_existing_stripe = true; + bkey_copy(&s->new_stripe.key, &s->existing_stripe.key); + s->have_existing_stripe = true; return 0; } -static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_stripe_head *h) +static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stripe_head *h, + struct ec_stripe_new *s) +{ + struct bch_fs *c = trans->c; + s64 idx; + int ret; + + /* + * If we can't allocate a new stripe, and there's no stripes with empty + * blocks for us to reuse, that means we have to wait on copygc: + */ + idx = get_existing_stripe(c, h); + if (idx < 0) + return -BCH_ERR_stripe_alloc_blocked; + + ret = get_stripe_key_trans(trans, idx, &s->existing_stripe); + bch2_fs_fatal_err_on(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart), c, + "reading stripe key: %s", bch2_err_str(ret)); + if (ret) { + bch2_stripe_close(c, s); + return ret; + } + + return init_new_stripe_from_existing(c, s); +} + +static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_stripe_head *h, + struct ec_stripe_new *s) { struct bch_fs *c = trans->c; struct btree_iter iter; @@ -2082,15 +2084,19 @@ static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_st struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint)); int ret; - if (!h->s->res.sectors) { - ret = bch2_disk_reservation_get(c, &h->s->res, + if (!s->res.sectors) { + ret = bch2_disk_reservation_get(c, &s->res, h->blocksize, - h->s->nr_parity, + s->nr_parity, BCH_DISK_RESERVATION_NOFAIL); if (ret) return ret; } + /* + * Allocate stripe slot + * XXX: we're going to need a bitrange btree of free stripes + */ for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos, BTREE_ITER_slots|BTREE_ITER_intent, k, ret) { if (bkey_gt(k.k->p, POS(0, U32_MAX))) { @@ -2105,7 +2111,7 @@ static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_st } if (bkey_deleted(k.k) && - bch2_try_open_stripe(c, h->s, k.k->p.offset)) + bch2_try_open_stripe(c, s, k.k->p.offset)) break; } @@ -2116,16 +2122,16 @@ static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_st ret = ec_stripe_mem_alloc(trans, &iter); if (ret) { - bch2_stripe_close(c, h->s); + bch2_stripe_close(c, s); goto err; } - h->s->new_stripe.key.k.p = iter.pos; + s->new_stripe.key.k.p = iter.pos; out: bch2_trans_iter_exit(trans, &iter); return ret; err: - bch2_disk_reservation_put(c, &h->s->res); + bch2_disk_reservation_put(c, &s->res); goto out; } @@ -2156,22 +2162,27 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, return h; if (!h->s) { - ret = ec_new_stripe_alloc(c, h); - if (ret) { + h->s = ec_new_stripe_alloc(c, h); + if (!h->s) { + ret = -BCH_ERR_ENOMEM_ec_new_stripe_alloc; bch_err(c, "failed to allocate new stripe"); goto err; } + + h->nr_created++; } - if (h->s->allocated) + struct ec_stripe_new *s = h->s; + + if (s->allocated) goto allocated; - if (h->s->have_existing_stripe) + if (s->have_existing_stripe) goto alloc_existing; /* First, try to allocate a full stripe: */ - ret = new_stripe_alloc_buckets(trans, h, BCH_WATERMARK_stripe, NULL) ?: - __bch2_ec_stripe_head_reserve(trans, h); + ret = new_stripe_alloc_buckets(trans, h, s, BCH_WATERMARK_stripe, NULL) ?: + __bch2_ec_stripe_head_reserve(trans, h, s); if (!ret) goto allocate_buf; if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || @@ -2183,15 +2194,15 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans, * existing stripe: */ while (1) { - ret = __bch2_ec_stripe_head_reuse(trans, h); + ret = __bch2_ec_stripe_head_reuse(trans, h, s); if (!ret) break; if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked) goto err; if (watermark == BCH_WATERMARK_copygc) { - ret = new_stripe_alloc_buckets(trans, h, watermark, NULL) ?: - __bch2_ec_stripe_head_reserve(trans, h); + ret = new_stripe_alloc_buckets(trans, h, s, watermark, NULL) ?: + __bch2_ec_stripe_head_reserve(trans, h, s); if (ret) goto err; goto allocate_buf; @@ -2209,19 +2220,19 @@ alloc_existing: * Retry allocating buckets, with the watermark for this * particular write: */ - ret = new_stripe_alloc_buckets(trans, h, watermark, cl); + ret = new_stripe_alloc_buckets(trans, h, s, watermark, cl); if (ret) goto err; allocate_buf: - ret = ec_stripe_buf_init(&h->s->new_stripe, 0, h->blocksize); + ret = ec_stripe_buf_init(&s->new_stripe, 0, h->blocksize); if (ret) goto err; - h->s->allocated = true; + s->allocated = true; allocated: - BUG_ON(!h->s->idx); - BUG_ON(!h->s->new_stripe.data[0]); + BUG_ON(!s->idx); + BUG_ON(!s->new_stripe.data[0]); BUG_ON(trans->restarted); return h; err: @@ -2286,7 +2297,7 @@ err: int bch2_dev_remove_stripes(struct bch_fs *c, unsigned dev_idx) { return bch2_trans_run(c, - for_each_btree_key_upto_commit(trans, iter, + for_each_btree_key_max_commit(trans, iter, BTREE_ID_alloc, POS(dev_idx, 0), POS(dev_idx, U64_MAX), BTREE_ITER_intent, k, NULL, NULL, 0, ({ @@ -2449,11 +2460,9 @@ void bch2_fs_ec_exit(struct bch_fs *c) while (1) { mutex_lock(&c->ec_stripe_head_lock); - h = list_first_entry_or_null(&c->ec_stripe_head_list, - struct ec_stripe_head, list); - if (h) - list_del(&h->list); + h = list_pop_entry(&c->ec_stripe_head_list, struct ec_stripe_head, list); mutex_unlock(&c->ec_stripe_head_lock); + if (!h) break; diff --git a/fs/bcachefs/ec.h b/fs/bcachefs/ec.h index 43326370b410..583ca6a226da 100644 --- a/fs/bcachefs/ec.h +++ b/fs/bcachefs/ec.h @@ -6,9 +6,8 @@ #include "buckets_types.h" #include "extents_types.h" -enum bch_validate_flags; - -int bch2_stripe_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_stripe_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); void bch2_stripe_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); int bch2_trigger_stripe(struct btree_trans *, enum btree_id, unsigned, diff --git a/fs/bcachefs/ec_format.h b/fs/bcachefs/ec_format.h index 64ef52e00078..b9770f24f213 100644 --- a/fs/bcachefs/ec_format.h +++ b/fs/bcachefs/ec_format.h @@ -20,6 +20,23 @@ struct bch_stripe { */ __u8 disk_label; + /* + * Variable length sections: + * - Pointers + * - Checksums + * 2D array of [stripe block/device][csum block], with checksum block + * size given by csum_granularity_bits + * - Block sector counts: per-block array of u16s + * + * XXX: + * Either checksums should have come last, or we should have included a + * checksum_size field (the size in bytes of the checksum itself, not + * the blocksize the checksum covers). + * + * Currently we aren't able to access the block sector counts if the + * checksum type is unknown. + */ + struct bch_extent_ptr ptrs[]; } __packed __aligned(8); diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h index 9c4fe5cdbfb7..4590cd0c7c90 100644 --- a/fs/bcachefs/errcode.h +++ b/fs/bcachefs/errcode.h @@ -54,7 +54,8 @@ x(ENOMEM, ENOMEM_compression_bounce_read_init) \ x(ENOMEM, ENOMEM_compression_bounce_write_init) \ x(ENOMEM, ENOMEM_compression_workspace_init) \ - x(ENOMEM, ENOMEM_decompression_workspace_init) \ + x(ENOMEM, ENOMEM_backpointer_mismatches_bitmap) \ + x(EIO, compression_workspace_not_initialized) \ x(ENOMEM, ENOMEM_bucket_gens) \ x(ENOMEM, ENOMEM_buckets_nouse) \ x(ENOMEM, ENOMEM_usage_init) \ @@ -116,6 +117,8 @@ x(ENOENT, ENOENT_dirent_doesnt_match_inode) \ x(ENOENT, ENOENT_dev_not_found) \ x(ENOENT, ENOENT_dev_idx_not_found) \ + x(ENOENT, ENOENT_inode_no_backpointer) \ + x(ENOENT, ENOENT_no_snapshot_tree_subvol) \ x(ENOTEMPTY, ENOTEMPTY_dir_not_empty) \ x(ENOTEMPTY, ENOTEMPTY_subvol_not_empty) \ x(EEXIST, EEXIST_str_hash_set) \ @@ -148,6 +151,7 @@ x(BCH_ERR_transaction_restart, transaction_restart_split_race) \ x(BCH_ERR_transaction_restart, transaction_restart_write_buffer_flush) \ x(BCH_ERR_transaction_restart, transaction_restart_nested) \ + x(BCH_ERR_transaction_restart, transaction_restart_commit) \ x(0, no_btree_node) \ x(BCH_ERR_no_btree_node, no_btree_node_relock) \ x(BCH_ERR_no_btree_node, no_btree_node_upgrade) \ @@ -164,7 +168,6 @@ x(BCH_ERR_btree_insert_fail, btree_insert_need_journal_res) \ x(BCH_ERR_btree_insert_fail, btree_insert_need_journal_reclaim) \ x(0, backpointer_to_overwritten_btree_node) \ - x(0, lock_fail_root_changed) \ x(0, journal_reclaim_would_deadlock) \ x(EINVAL, fsck) \ x(BCH_ERR_fsck, fsck_fix) \ @@ -173,7 +176,9 @@ x(BCH_ERR_fsck, fsck_errors_not_fixed) \ x(BCH_ERR_fsck, fsck_repair_unimplemented) \ x(BCH_ERR_fsck, fsck_repair_impossible) \ - x(0, restart_recovery) \ + x(EINVAL, restart_recovery) \ + x(EINVAL, not_in_recovery) \ + x(EINVAL, cannot_rewind_recovery) \ x(0, data_update_done) \ x(EINVAL, device_state_not_allowed) \ x(EINVAL, member_info_missing) \ @@ -192,7 +197,9 @@ x(EINVAL, opt_parse_error) \ x(EINVAL, remove_with_metadata_missing_unimplemented)\ x(EINVAL, remove_would_lose_data) \ - x(EINVAL, btree_iter_with_journal_not_supported) \ + x(EINVAL, no_resize_with_buckets_nouse) \ + x(EINVAL, inode_unpack_error) \ + x(EINVAL, varint_decode_error) \ x(EROFS, erofs_trans_commit) \ x(EROFS, erofs_no_writes) \ x(EROFS, erofs_journal_err) \ @@ -241,7 +248,10 @@ x(BCH_ERR_invalid_sb, invalid_sb_downgrade) \ x(BCH_ERR_invalid, invalid_bkey) \ x(BCH_ERR_operation_blocked, nocow_lock_blocked) \ + x(EIO, journal_shutdown) \ + x(EIO, journal_flush_err) \ x(EIO, btree_node_read_err) \ + x(BCH_ERR_btree_node_read_err, btree_node_read_err_cached) \ x(EIO, sb_not_downgraded) \ x(EIO, btree_node_write_all_failed) \ x(EIO, btree_node_read_error) \ @@ -257,6 +267,8 @@ x(EIO, no_device_to_read_from) \ x(EIO, missing_indirect_extent) \ x(EIO, invalidate_stripe_to_dev) \ + x(EIO, no_encryption_key) \ + x(EIO, insufficient_journal_devices) \ x(BCH_ERR_btree_node_read_err, btree_node_read_err_fixable) \ x(BCH_ERR_btree_node_read_err, btree_node_read_err_want_retry) \ x(BCH_ERR_btree_node_read_err, btree_node_read_err_must_retry) \ @@ -305,6 +317,7 @@ static inline long bch2_err_class(long err) #define BLK_STS_REMOVED ((__force blk_status_t)128) +#include <linux/blk_types.h> const char *bch2_blk_status_to_str(blk_status_t); #endif /* _BCACHFES_ERRCODE_H */ diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c index b679def8fb98..038da6a61f6b 100644 --- a/fs/bcachefs/error.c +++ b/fs/bcachefs/error.c @@ -1,7 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" +#include "btree_cache.h" #include "btree_iter.h" #include "error.h" +#include "fs-common.h" #include "journal.h" #include "recovery_passes.h" #include "super.h" @@ -33,7 +35,7 @@ bool bch2_inconsistent_error(struct bch_fs *c) int bch2_topology_error(struct bch_fs *c) { set_bit(BCH_FS_topology_error, &c->flags); - if (!test_bit(BCH_FS_fsck_running, &c->flags)) { + if (!test_bit(BCH_FS_recovery_running, &c->flags)) { bch2_inconsistent_error(c); return -BCH_ERR_btree_need_topology_repair; } else { @@ -218,6 +220,30 @@ static const u8 fsck_flags_extra[] = { #undef x }; +static int do_fsck_ask_yn(struct bch_fs *c, + struct btree_trans *trans, + struct printbuf *question, + const char *action) +{ + prt_str(question, ", "); + prt_str(question, action); + + if (bch2_fs_stdio_redirect(c)) + bch2_print(c, "%s", question->buf); + else + bch2_print_string_as_lines(KERN_ERR, question->buf); + + int ask = bch2_fsck_ask_yn(c, trans); + + if (trans) { + int ret = bch2_trans_relock(trans); + if (ret) + return ret; + } + + return ask; +} + int __bch2_fsck_err(struct bch_fs *c, struct btree_trans *trans, enum bch_fsck_flags flags, @@ -226,7 +252,7 @@ int __bch2_fsck_err(struct bch_fs *c, { struct fsck_err_state *s = NULL; va_list args; - bool print = true, suppressing = false, inconsistent = false; + bool print = true, suppressing = false, inconsistent = false, exiting = false; struct printbuf buf = PRINTBUF, *out = &buf; int ret = -BCH_ERR_fsck_ignore; const char *action_orig = "fix?", *action = action_orig; @@ -256,9 +282,10 @@ int __bch2_fsck_err(struct bch_fs *c, !trans && bch2_current_has_btree_trans(c)); - if ((flags & FSCK_CAN_FIX) && - test_bit(err, c->sb.errors_silent)) - return -BCH_ERR_fsck_fix; + if (test_bit(err, c->sb.errors_silent)) + return flags & FSCK_CAN_FIX + ? -BCH_ERR_fsck_fix + : -BCH_ERR_fsck_ignore; bch2_sb_error_count(c, err); @@ -289,16 +316,14 @@ int __bch2_fsck_err(struct bch_fs *c, */ if (s->last_msg && !strcmp(buf.buf, s->last_msg)) { ret = s->ret; - mutex_unlock(&c->fsck_error_msgs_lock); - goto err; + goto err_unlock; } kfree(s->last_msg); s->last_msg = kstrdup(buf.buf, GFP_KERNEL); if (!s->last_msg) { - mutex_unlock(&c->fsck_error_msgs_lock); ret = -ENOMEM; - goto err; + goto err_unlock; } if (c->opts.ratelimit_errors && @@ -318,13 +343,19 @@ int __bch2_fsck_err(struct bch_fs *c, prt_printf(out, bch2_log_msg(c, "")); #endif - if ((flags & FSCK_CAN_FIX) && - (flags & FSCK_AUTOFIX) && + if ((flags & FSCK_AUTOFIX) && (c->opts.errors == BCH_ON_ERROR_continue || c->opts.errors == BCH_ON_ERROR_fix_safe)) { prt_str(out, ", "); - prt_actioning(out, action); - ret = -BCH_ERR_fsck_fix; + if (flags & FSCK_CAN_FIX) { + prt_actioning(out, action); + ret = -BCH_ERR_fsck_fix; + } else { + prt_str(out, ", continuing"); + ret = -BCH_ERR_fsck_ignore; + } + + goto print; } else if (!test_bit(BCH_FS_fsck_running, &c->flags)) { if (c->opts.errors != BCH_ON_ERROR_continue || !(flags & (FSCK_CAN_FIX|FSCK_CAN_IGNORE))) { @@ -348,31 +379,18 @@ int __bch2_fsck_err(struct bch_fs *c, : c->opts.fix_errors; if (fix == FSCK_FIX_ask) { - prt_str(out, ", "); - prt_str(out, action); - - if (bch2_fs_stdio_redirect(c)) - bch2_print(c, "%s", out->buf); - else - bch2_print_string_as_lines(KERN_ERR, out->buf); print = false; - int ask = bch2_fsck_ask_yn(c, trans); - - if (trans) { - ret = bch2_trans_relock(trans); - if (ret) { - mutex_unlock(&c->fsck_error_msgs_lock); - goto err; - } - } + ret = do_fsck_ask_yn(c, trans, out, action); + if (ret < 0) + goto err_unlock; - if (ask >= YN_ALLNO && s) - s->fix = ask == YN_ALLNO + if (ret >= YN_ALLNO && s) + s->fix = ret == YN_ALLNO ? FSCK_FIX_no : FSCK_FIX_yes; - ret = ask & 1 + ret = ret & 1 ? -BCH_ERR_fsck_fix : -BCH_ERR_fsck_ignore; } else if (fix == FSCK_FIX_yes || @@ -385,9 +403,7 @@ int __bch2_fsck_err(struct bch_fs *c, prt_str(out, ", not "); prt_actioning(out, action); } - } else if (flags & FSCK_NEED_FSCK) { - prt_str(out, " (run fsck to correct)"); - } else { + } else if (!(flags & FSCK_CAN_IGNORE)) { prt_str(out, " (repair unimplemented)"); } @@ -396,14 +412,13 @@ int __bch2_fsck_err(struct bch_fs *c, !(flags & FSCK_CAN_IGNORE))) ret = -BCH_ERR_fsck_errors_not_fixed; - bool exiting = - test_bit(BCH_FS_fsck_running, &c->flags) && - (ret != -BCH_ERR_fsck_fix && - ret != -BCH_ERR_fsck_ignore); - - if (exiting) + if (test_bit(BCH_FS_fsck_running, &c->flags) && + (ret != -BCH_ERR_fsck_fix && + ret != -BCH_ERR_fsck_ignore)) { + exiting = true; print = true; - + } +print: if (print) { if (bch2_fs_stdio_redirect(c)) bch2_print(c, "%s\n", out->buf); @@ -419,17 +434,24 @@ int __bch2_fsck_err(struct bch_fs *c, if (s) s->ret = ret; - mutex_unlock(&c->fsck_error_msgs_lock); - if (inconsistent) bch2_inconsistent_error(c); - if (ret == -BCH_ERR_fsck_fix) { - set_bit(BCH_FS_errors_fixed, &c->flags); - } else { - set_bit(BCH_FS_errors_not_fixed, &c->flags); - set_bit(BCH_FS_error, &c->flags); + /* + * We don't yet track whether the filesystem currently has errors, for + * log_fsck_err()s: that would require us to track for every error type + * which recovery pass corrects it, to get the fsck exit status correct: + */ + if (flags & FSCK_CAN_FIX) { + if (ret == -BCH_ERR_fsck_fix) { + set_bit(BCH_FS_errors_fixed, &c->flags); + } else { + set_bit(BCH_FS_errors_not_fixed, &c->flags); + set_bit(BCH_FS_error, &c->flags); + } } +err_unlock: + mutex_unlock(&c->fsck_error_msgs_lock); err: if (action != action_orig) kfree(action); @@ -437,28 +459,52 @@ err: return ret; } +static const char * const bch2_bkey_validate_contexts[] = { +#define x(n) #n, + BKEY_VALIDATE_CONTEXTS() +#undef x + NULL +}; + int __bch2_bkey_fsck_err(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags validate_flags, + struct bkey_validate_context from, enum bch_sb_error_id err, const char *fmt, ...) { - if (validate_flags & BCH_VALIDATE_silent) + if (from.flags & BCH_VALIDATE_silent) return -BCH_ERR_fsck_delete_bkey; unsigned fsck_flags = 0; - if (!(validate_flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit))) + if (!(from.flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit))) { + if (test_bit(err, c->sb.errors_silent)) + return -BCH_ERR_fsck_delete_bkey; + fsck_flags |= FSCK_AUTOFIX|FSCK_CAN_FIX; + } + if (!WARN_ON(err >= ARRAY_SIZE(fsck_flags_extra))) + fsck_flags |= fsck_flags_extra[err]; struct printbuf buf = PRINTBUF; - va_list args; + prt_printf(&buf, "invalid bkey in %s", + bch2_bkey_validate_contexts[from.from]); + + if (from.from == BKEY_VALIDATE_journal) + prt_printf(&buf, " journal seq=%llu offset=%u", + from.journal_seq, from.journal_offset); + + prt_str(&buf, " btree="); + bch2_btree_id_to_text(&buf, from.btree); + prt_printf(&buf, " level=%u: ", from.level); - prt_str(&buf, "invalid bkey "); bch2_bkey_val_to_text(&buf, c, k); prt_str(&buf, "\n "); + + va_list args; va_start(args, fmt); prt_vprintf(&buf, fmt, args); va_end(args); + prt_str(&buf, ": delete?"); int ret = __bch2_fsck_err(c, NULL, fsck_flags, err, "%s", buf.buf); @@ -483,3 +529,36 @@ void bch2_flush_fsck_errs(struct bch_fs *c) mutex_unlock(&c->fsck_error_msgs_lock); } + +int bch2_inum_err_msg_trans(struct btree_trans *trans, struct printbuf *out, subvol_inum inum) +{ + u32 restart_count = trans->restart_count; + int ret = 0; + + /* XXX: we don't yet attempt to print paths when we don't know the subvol */ + if (inum.subvol) + ret = lockrestart_do(trans, bch2_inum_to_path(trans, inum, out)); + if (!inum.subvol || ret) + prt_printf(out, "inum %llu:%llu", inum.subvol, inum.inum); + + return trans_was_restarted(trans, restart_count); +} + +int bch2_inum_offset_err_msg_trans(struct btree_trans *trans, struct printbuf *out, + subvol_inum inum, u64 offset) +{ + int ret = bch2_inum_err_msg_trans(trans, out, inum); + prt_printf(out, " offset %llu: ", offset); + return ret; +} + +void bch2_inum_err_msg(struct bch_fs *c, struct printbuf *out, subvol_inum inum) +{ + bch2_trans_run(c, bch2_inum_err_msg_trans(trans, out, inum)); +} + +void bch2_inum_offset_err_msg(struct bch_fs *c, struct printbuf *out, + subvol_inum inum, u64 offset) +{ + bch2_trans_run(c, bch2_inum_offset_err_msg_trans(trans, out, inum, offset)); +} diff --git a/fs/bcachefs/error.h b/fs/bcachefs/error.h index 6551ada926b6..7acf2a27ca28 100644 --- a/fs/bcachefs/error.h +++ b/fs/bcachefs/error.h @@ -45,32 +45,11 @@ int bch2_topology_error(struct bch_fs *); bch2_inconsistent_error(c); \ }) -#define bch2_fs_inconsistent_on(cond, c, ...) \ +#define bch2_fs_inconsistent_on(cond, ...) \ ({ \ bool _ret = unlikely(!!(cond)); \ - \ - if (_ret) \ - bch2_fs_inconsistent(c, __VA_ARGS__); \ - _ret; \ -}) - -/* - * Later we might want to mark only the particular device inconsistent, not the - * entire filesystem: - */ - -#define bch2_dev_inconsistent(ca, ...) \ -do { \ - bch_err(ca, __VA_ARGS__); \ - bch2_inconsistent_error((ca)->fs); \ -} while (0) - -#define bch2_dev_inconsistent_on(cond, ca, ...) \ -({ \ - bool _ret = unlikely(!!(cond)); \ - \ if (_ret) \ - bch2_dev_inconsistent(ca, __VA_ARGS__); \ + bch2_fs_inconsistent(__VA_ARGS__); \ _ret; \ }) @@ -123,9 +102,9 @@ int __bch2_fsck_err(struct bch_fs *, struct btree_trans *, void bch2_flush_fsck_errs(struct bch_fs *); -#define __fsck_err(c, _flags, _err_type, ...) \ +#define fsck_err_wrap(_do) \ ({ \ - int _ret = bch2_fsck_err(c, _flags, _err_type, __VA_ARGS__); \ + int _ret = _do; \ if (_ret != -BCH_ERR_fsck_fix && \ _ret != -BCH_ERR_fsck_ignore) { \ ret = _ret; \ @@ -135,6 +114,8 @@ void bch2_flush_fsck_errs(struct bch_fs *); _ret == -BCH_ERR_fsck_fix; \ }) +#define __fsck_err(...) fsck_err_wrap(bch2_fsck_err(__VA_ARGS__)) + /* These macros return true if error should be fixed: */ /* XXX: mark in superblock that filesystem contains errors, if we ignore: */ @@ -149,12 +130,6 @@ void bch2_flush_fsck_errs(struct bch_fs *); (unlikely(cond) ? __fsck_err(c, _flags, _err_type, __VA_ARGS__) : false);\ }) -#define need_fsck_err_on(cond, c, _err_type, ...) \ - __fsck_err_on(cond, c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, _err_type, __VA_ARGS__) - -#define need_fsck_err(c, _err_type, ...) \ - __fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, _err_type, __VA_ARGS__) - #define mustfix_fsck_err(c, _err_type, ...) \ __fsck_err(c, FSCK_CAN_FIX, _err_type, __VA_ARGS__) @@ -167,11 +142,22 @@ void bch2_flush_fsck_errs(struct bch_fs *); #define fsck_err_on(cond, c, _err_type, ...) \ __fsck_err_on(cond, c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, _err_type, __VA_ARGS__) +#define log_fsck_err(c, _err_type, ...) \ + __fsck_err(c, FSCK_CAN_IGNORE, _err_type, __VA_ARGS__) + +#define log_fsck_err_on(cond, ...) \ +({ \ + bool _ret = unlikely(!!(cond)); \ + if (_ret) \ + log_fsck_err(__VA_ARGS__); \ + _ret; \ +}) + enum bch_validate_flags; __printf(5, 6) int __bch2_bkey_fsck_err(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags, + struct bkey_validate_context from, enum bch_sb_error_id, const char *, ...); @@ -181,7 +167,7 @@ int __bch2_bkey_fsck_err(struct bch_fs *, */ #define bkey_fsck_err(c, _err_type, _err_msg, ...) \ do { \ - int _ret = __bch2_bkey_fsck_err(c, k, flags, \ + int _ret = __bch2_bkey_fsck_err(c, k, from, \ BCH_FSCK_ERR_##_err_type, \ _err_msg, ##__VA_ARGS__); \ if (_ret != -BCH_ERR_fsck_fix && \ @@ -252,4 +238,10 @@ void bch2_io_error(struct bch_dev *, enum bch_member_error_type); _ret; \ }) +int bch2_inum_err_msg_trans(struct btree_trans *, struct printbuf *, subvol_inum); +int bch2_inum_offset_err_msg_trans(struct btree_trans *, struct printbuf *, subvol_inum, u64); + +void bch2_inum_err_msg(struct bch_fs *, struct printbuf *, subvol_inum); +void bch2_inum_offset_err_msg(struct bch_fs *, struct printbuf *, subvol_inum, u64); + #endif /* _BCACHEFS_ERROR_H */ diff --git a/fs/bcachefs/extent_update.c b/fs/bcachefs/extent_update.c index 5f4fecb358da..6aac579a692a 100644 --- a/fs/bcachefs/extent_update.c +++ b/fs/bcachefs/extent_update.c @@ -64,7 +64,7 @@ static int count_iters_for_insert(struct btree_trans *trans, break; case KEY_TYPE_reflink_p: { struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k); - u64 idx = le64_to_cpu(p.v->idx); + u64 idx = REFLINK_P_IDX(p.v); unsigned sectors = bpos_min(*end, p.k->p).offset - bkey_start_offset(p.k); struct btree_iter iter; @@ -128,7 +128,7 @@ int bch2_extent_atomic_end(struct btree_trans *trans, bch2_trans_copy_iter(©, iter); - for_each_btree_key_upto_continue_norestart(copy, insert->k.p, 0, k, ret) { + for_each_btree_key_max_continue_norestart(copy, insert->k.p, 0, k, ret) { unsigned offset = 0; if (bkey_gt(bkey_start_pos(&insert->k), bkey_start_pos(k.k))) diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c index 37e3d69bec06..05d5f71a7ca9 100644 --- a/fs/bcachefs/extents.c +++ b/fs/bcachefs/extents.c @@ -21,6 +21,7 @@ #include "extents.h" #include "inode.h" #include "journal.h" +#include "rebalance.h" #include "replicas.h" #include "super.h" #include "super-io.h" @@ -88,6 +89,14 @@ static inline bool ptr_better(struct bch_fs *c, u64 l1 = dev_latency(c, p1.ptr.dev); u64 l2 = dev_latency(c, p2.ptr.dev); + /* + * Square the latencies, to bias more in favor of the faster + * device - we never want to stop issuing reads to the slower + * device altogether, so that we can update our latency numbers: + */ + l1 *= l1; + l2 *= l2; + /* Pick at random, biased in favor of the faster device: */ return bch2_rand_range(l1 + l2) > l1; @@ -169,7 +178,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k, /* KEY_TYPE_btree_ptr: */ int bch2_btree_ptr_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { int ret = 0; @@ -177,7 +186,7 @@ int bch2_btree_ptr_validate(struct bch_fs *c, struct bkey_s_c k, c, btree_ptr_val_too_big, "value too big (%zu > %u)", bkey_val_u64s(k.k), BCH_REPLICAS_MAX); - ret = bch2_bkey_ptrs_validate(c, k, flags); + ret = bch2_bkey_ptrs_validate(c, k, from); fsck_err: return ret; } @@ -189,7 +198,7 @@ void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c, } int bch2_btree_ptr_v2_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k); int ret = 0; @@ -203,12 +212,13 @@ int bch2_btree_ptr_v2_validate(struct bch_fs *c, struct bkey_s_c k, c, btree_ptr_v2_min_key_bad, "min_key > key"); - if (flags & BCH_VALIDATE_write) + if ((from.flags & BCH_VALIDATE_write) && + c->sb.version_min >= bcachefs_metadata_version_btree_ptr_sectors_written) bkey_fsck_err_on(!bp.v->sectors_written, c, btree_ptr_v2_written_0, "sectors_written == 0"); - ret = bch2_bkey_ptrs_validate(c, k, flags); + ret = bch2_bkey_ptrs_validate(c, k, from); fsck_err: return ret; } @@ -395,7 +405,7 @@ bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r) /* KEY_TYPE_reservation: */ int bch2_reservation_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k); int ret = 0; @@ -1120,6 +1130,57 @@ void bch2_extent_crc_unpacked_to_text(struct printbuf *out, struct bch_extent_cr bch2_prt_compression_type(out, crc->compression_type); } +static void bch2_extent_rebalance_to_text(struct printbuf *out, struct bch_fs *c, + const struct bch_extent_rebalance *r) +{ + prt_str(out, "rebalance:"); + + prt_printf(out, " replicas=%u", r->data_replicas); + if (r->data_replicas_from_inode) + prt_str(out, " (inode)"); + + prt_str(out, " checksum="); + bch2_prt_csum_opt(out, r->data_checksum); + if (r->data_checksum_from_inode) + prt_str(out, " (inode)"); + + if (r->background_compression || r->background_compression_from_inode) { + prt_str(out, " background_compression="); + bch2_compression_opt_to_text(out, r->background_compression); + + if (r->background_compression_from_inode) + prt_str(out, " (inode)"); + } + + if (r->background_target || r->background_target_from_inode) { + prt_str(out, " background_target="); + if (c) + bch2_target_to_text(out, c, r->background_target); + else + prt_printf(out, "%u", r->background_target); + + if (r->background_target_from_inode) + prt_str(out, " (inode)"); + } + + if (r->promote_target || r->promote_target_from_inode) { + prt_str(out, " promote_target="); + if (c) + bch2_target_to_text(out, c, r->promote_target); + else + prt_printf(out, "%u", r->promote_target); + + if (r->promote_target_from_inode) + prt_str(out, " (inode)"); + } + + if (r->erasure_code || r->erasure_code_from_inode) { + prt_printf(out, " ec=%u", r->erasure_code); + if (r->erasure_code_from_inode) + prt_str(out, " (inode)"); + } +} + void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) { @@ -1155,18 +1216,10 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c, (u64) ec->idx, ec->block); break; } - case BCH_EXTENT_ENTRY_rebalance: { - const struct bch_extent_rebalance *r = &entry->rebalance; - - prt_str(out, "rebalance: target "); - if (c) - bch2_target_to_text(out, c, r->target); - else - prt_printf(out, "%u", r->target); - prt_str(out, " compression "); - bch2_compression_opt_to_text(out, r->compression); + case BCH_EXTENT_ENTRY_rebalance: + bch2_extent_rebalance_to_text(out, c, &entry->rebalance); break; - } + default: prt_printf(out, "(invalid extent entry %.16llx)", *((u64 *) entry)); return; @@ -1178,13 +1231,19 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c, static int extent_ptr_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags, + struct bkey_validate_context from, const struct bch_extent_ptr *ptr, unsigned size_ondisk, bool metadata) { int ret = 0; + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + bkey_for_each_ptr(ptrs, ptr2) + bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev, + c, ptr_to_duplicate_device, + "multiple pointers to same device (%u)", ptr->dev); + /* bad pointers are repaired by check_fix_ptrs(): */ rcu_read_lock(); struct bch_dev *ca = bch2_dev_rcu_noerror(c, ptr->dev); @@ -1199,13 +1258,6 @@ static int extent_ptr_validate(struct bch_fs *c, unsigned bucket_size = ca->mi.bucket_size; rcu_read_unlock(); - struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); - bkey_for_each_ptr(ptrs, ptr2) - bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev, - c, ptr_to_duplicate_device, - "multiple pointers to same device (%u)", ptr->dev); - - bkey_fsck_err_on(bucket >= nbuckets, c, ptr_after_last_bucket, "pointer past last bucket (%llu > %llu)", bucket, nbuckets); @@ -1221,7 +1273,7 @@ fsck_err: } int bch2_bkey_ptrs_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); const union bch_extent_entry *entry; @@ -1248,7 +1300,7 @@ int bch2_bkey_ptrs_validate(struct bch_fs *c, struct bkey_s_c k, switch (extent_entry_type(entry)) { case BCH_EXTENT_ENTRY_ptr: - ret = extent_ptr_validate(c, k, flags, &entry->ptr, size_ondisk, false); + ret = extent_ptr_validate(c, k, from, &entry->ptr, size_ondisk, false); if (ret) return ret; @@ -1270,9 +1322,6 @@ int bch2_bkey_ptrs_validate(struct bch_fs *c, struct bkey_s_c k, case BCH_EXTENT_ENTRY_crc128: crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry)); - bkey_fsck_err_on(crc.offset + crc.live_size > crc.uncompressed_size, - c, ptr_crc_uncompressed_size_too_small, - "checksum offset + key size > uncompressed size"); bkey_fsck_err_on(!bch2_checksum_type_valid(c, crc.csum_type), c, ptr_crc_csum_type_unknown, "invalid checksum type"); @@ -1280,6 +1329,19 @@ int bch2_bkey_ptrs_validate(struct bch_fs *c, struct bkey_s_c k, c, ptr_crc_compression_type_unknown, "invalid compression type"); + bkey_fsck_err_on(crc.offset + crc.live_size > crc.uncompressed_size, + c, ptr_crc_uncompressed_size_too_small, + "checksum offset + key size > uncompressed size"); + bkey_fsck_err_on(crc_is_encoded(crc) && + (crc.uncompressed_size > c->opts.encoded_extent_max >> 9) && + (from.flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit)), + c, ptr_crc_uncompressed_size_too_big, + "too large encoded extent"); + bkey_fsck_err_on(!crc_is_compressed(crc) && + crc.compressed_size != crc.uncompressed_size, + c, ptr_crc_uncompressed_size_mismatch, + "not compressed but compressed != uncompressed size"); + if (bch2_csum_type_is_encryption(crc.csum_type)) { if (nonce == UINT_MAX) nonce = crc.offset + crc.nonce; @@ -1293,12 +1355,6 @@ int bch2_bkey_ptrs_validate(struct bch_fs *c, struct bkey_s_c k, "redundant crc entry"); crc_since_last_ptr = true; - bkey_fsck_err_on(crc_is_encoded(crc) && - (crc.uncompressed_size > c->opts.encoded_extent_max >> 9) && - (flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit)), - c, ptr_crc_uncompressed_size_too_big, - "too large encoded extent"); - size_ondisk = crc.compressed_size; break; case BCH_EXTENT_ENTRY_stripe_ptr: @@ -1391,166 +1447,6 @@ void bch2_ptr_swab(struct bkey_s k) } } -const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k) -{ - struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); - const union bch_extent_entry *entry; - - bkey_extent_entry_for_each(ptrs, entry) - if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance) - return &entry->rebalance; - - return NULL; -} - -unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c, struct bkey_s_c k, - unsigned target, unsigned compression) -{ - struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); - unsigned rewrite_ptrs = 0; - - if (compression) { - unsigned compression_type = bch2_compression_opt_to_type(compression); - const union bch_extent_entry *entry; - struct extent_ptr_decoded p; - unsigned i = 0; - - bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { - if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible || - p.ptr.unwritten) { - rewrite_ptrs = 0; - goto incompressible; - } - - if (!p.ptr.cached && p.crc.compression_type != compression_type) - rewrite_ptrs |= 1U << i; - i++; - } - } -incompressible: - if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) { - unsigned i = 0; - - bkey_for_each_ptr(ptrs, ptr) { - if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, target)) - rewrite_ptrs |= 1U << i; - i++; - } - } - - return rewrite_ptrs; -} - -bool bch2_bkey_needs_rebalance(struct bch_fs *c, struct bkey_s_c k) -{ - const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k); - - /* - * If it's an indirect extent, we don't delete the rebalance entry when - * done so that we know what options were applied - check if it still - * needs work done: - */ - if (r && - k.k->type == KEY_TYPE_reflink_v && - !bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression)) - r = NULL; - - return r != NULL; -} - -static u64 __bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k, - unsigned target, unsigned compression) -{ - struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); - const union bch_extent_entry *entry; - struct extent_ptr_decoded p; - u64 sectors = 0; - - if (compression) { - unsigned compression_type = bch2_compression_opt_to_type(compression); - - bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { - if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible || - p.ptr.unwritten) { - sectors = 0; - goto incompressible; - } - - if (!p.ptr.cached && p.crc.compression_type != compression_type) - sectors += p.crc.compressed_size; - } - } -incompressible: - if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) { - bkey_for_each_ptr_decode(k.k, ptrs, p, entry) - if (!p.ptr.cached && !bch2_dev_in_target(c, p.ptr.dev, target)) - sectors += p.crc.compressed_size; - } - - return sectors; -} - -u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k) -{ - const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k); - - return r ? __bch2_bkey_sectors_need_rebalance(c, k, r->target, r->compression) : 0; -} - -int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bkey_i *_k, - struct bch_io_opts *opts) -{ - struct bkey_s k = bkey_i_to_s(_k); - struct bch_extent_rebalance *r; - unsigned target = opts->background_target; - unsigned compression = background_compression(*opts); - bool needs_rebalance; - - if (!bkey_extent_is_direct_data(k.k)) - return 0; - - /* get existing rebalance entry: */ - r = (struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c); - if (r) { - if (k.k->type == KEY_TYPE_reflink_v) { - /* - * indirect extents: existing options take precedence, - * so that we don't move extents back and forth if - * they're referenced by different inodes with different - * options: - */ - if (r->target) - target = r->target; - if (r->compression) - compression = r->compression; - } - - r->target = target; - r->compression = compression; - } - - needs_rebalance = bch2_bkey_ptrs_need_rebalance(c, k.s_c, target, compression); - - if (needs_rebalance && !r) { - union bch_extent_entry *new = bkey_val_end(k); - - new->rebalance.type = 1U << BCH_EXTENT_ENTRY_rebalance; - new->rebalance.compression = compression; - new->rebalance.target = target; - new->rebalance.unused = 0; - k.k->u64s += extent_entry_u64s(new); - } else if (!needs_rebalance && r && k.k->type != KEY_TYPE_reflink_v) { - /* - * For indirect extents, don't delete the rebalance entry when - * we're finished so that we know we specifically moved it or - * compressed it to its current location/compression type - */ - extent_entry_drop(k, (union bch_extent_entry *) r); - } - - return 0; -} - /* Generic extent code: */ int bch2_cut_front_s(struct bpos where, struct bkey_s k) @@ -1610,7 +1506,7 @@ int bch2_cut_front_s(struct bpos where, struct bkey_s k) case KEY_TYPE_reflink_p: { struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k); - le64_add_cpu(&p.v->idx, sub); + SET_REFLINK_P_IDX(p.v, REFLINK_P_IDX(p.v) + sub); break; } case KEY_TYPE_inline_data: diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h index bcffcf60aaaf..620b284aa34f 100644 --- a/fs/bcachefs/extents.h +++ b/fs/bcachefs/extents.h @@ -8,7 +8,6 @@ struct bch_fs; struct btree_trans; -enum bch_validate_flags; /* extent entries: */ @@ -410,12 +409,12 @@ int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c, /* KEY_TYPE_btree_ptr: */ int bch2_btree_ptr_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); int bch2_btree_ptr_v2_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); void bch2_btree_ptr_v2_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned, int, struct bkey_s); @@ -452,7 +451,7 @@ bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c); /* KEY_TYPE_reservation: */ int bch2_reservation_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c); @@ -696,7 +695,7 @@ void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *, const struct void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); int bch2_bkey_ptrs_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); static inline bool bch2_extent_ptr_eq(struct bch_extent_ptr ptr1, struct bch_extent_ptr ptr2) @@ -710,15 +709,6 @@ static inline bool bch2_extent_ptr_eq(struct bch_extent_ptr ptr1, void bch2_ptr_swab(struct bkey_s); -const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c); -unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *, struct bkey_s_c, - unsigned, unsigned); -bool bch2_bkey_needs_rebalance(struct bch_fs *, struct bkey_s_c); -u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *, struct bkey_s_c); - -int bch2_bkey_set_needs_rebalance(struct bch_fs *, struct bkey_i *, - struct bch_io_opts *); - /* Generic extent code: */ enum bch_extent_overlap { diff --git a/fs/bcachefs/extents_format.h b/fs/bcachefs/extents_format.h index 3bd2fdbb0817..c198dfc376d6 100644 --- a/fs/bcachefs/extents_format.h +++ b/fs/bcachefs/extents_format.h @@ -201,19 +201,8 @@ struct bch_extent_stripe_ptr { #endif }; -struct bch_extent_rebalance { -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u64 type:6, - unused:34, - compression:8, /* enum bch_compression_opt */ - target:16; -#elif defined (__BIG_ENDIAN_BITFIELD) - __u64 target:16, - compression:8, - unused:34, - type:6; -#endif -}; +/* bch_extent_rebalance: */ +#include "rebalance_format.h" union bch_extent_entry { #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ || __BITS_PER_LONG == 64 diff --git a/fs/bcachefs/fs-common.c b/fs/bcachefs/fs-common.c index 7e10a9ddcfd9..d70d9f634cea 100644 --- a/fs/bcachefs/fs-common.c +++ b/fs/bcachefs/fs-common.c @@ -69,9 +69,7 @@ int bch2_create_trans(struct btree_trans *trans, if (!snapshot_src.inum) { /* Inode wasn't specified, just snapshot: */ struct bch_subvolume s; - - ret = bch2_subvolume_get(trans, snapshot_src.subvol, true, - BTREE_ITER_cached, &s); + ret = bch2_subvolume_get(trans, snapshot_src.subvol, true, &s); if (ret) goto err; @@ -154,6 +152,7 @@ int bch2_create_trans(struct btree_trans *trans, if (is_subdir_for_nlink(new_inode)) dir_u->bi_nlink++; dir_u->bi_mtime = dir_u->bi_ctime = now; + dir_u->bi_size += dirent_occupied_size(name); ret = bch2_inode_write(trans, &dir_iter, dir_u); if (ret) @@ -172,6 +171,10 @@ int bch2_create_trans(struct btree_trans *trans, new_inode->bi_dir_offset = dir_offset; } + if (S_ISDIR(mode) && + !new_inode->bi_subvol) + new_inode->bi_depth = dir_u->bi_depth + 1; + inode_iter.flags &= ~BTREE_ITER_all_snapshots; bch2_btree_iter_set_snapshot(&inode_iter, snapshot); @@ -218,6 +221,7 @@ int bch2_link_trans(struct btree_trans *trans, } dir_u->bi_mtime = dir_u->bi_ctime = now; + dir_u->bi_size += dirent_occupied_size(name); dir_hash = bch2_hash_info_init(c, dir_u); @@ -320,6 +324,7 @@ int bch2_unlink_trans(struct btree_trans *trans, dir_u->bi_mtime = dir_u->bi_ctime = inode_u->bi_ctime = now; dir_u->bi_nlink -= is_subdir_for_nlink(inode_u); + dir_u->bi_size -= dirent_occupied_size(name); ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc, &dir_hash, &dirent_iter, @@ -458,6 +463,14 @@ int bch2_rename_trans(struct btree_trans *trans, goto err; } + if (mode == BCH_RENAME) { + src_dir_u->bi_size -= dirent_occupied_size(src_name); + dst_dir_u->bi_size += dirent_occupied_size(dst_name); + } + + if (mode == BCH_RENAME_OVERWRITE) + src_dir_u->bi_size -= dirent_occupied_size(src_name); + if (src_inode_u->bi_parent_subvol) src_inode_u->bi_parent_subvol = dst_dir.subvol; @@ -512,6 +525,15 @@ int bch2_rename_trans(struct btree_trans *trans, dst_dir_u->bi_nlink++; } + if (S_ISDIR(src_inode_u->bi_mode) && + !src_inode_u->bi_subvol) + src_inode_u->bi_depth = dst_dir_u->bi_depth + 1; + + if (mode == BCH_RENAME_EXCHANGE && + S_ISDIR(dst_inode_u->bi_mode) && + !dst_inode_u->bi_subvol) + dst_inode_u->bi_depth = src_dir_u->bi_depth + 1; + if (dst_inum.inum && is_subdir_for_nlink(dst_inode_u)) { dst_dir_u->bi_nlink--; src_dir_u->bi_nlink += mode == BCH_RENAME_EXCHANGE; @@ -548,3 +570,94 @@ err: bch2_trans_iter_exit(trans, &src_dir_iter); return ret; } + +static inline void prt_bytes_reversed(struct printbuf *out, const void *b, unsigned n) +{ + bch2_printbuf_make_room(out, n); + + unsigned can_print = min(n, printbuf_remaining(out)); + + b += n; + + for (unsigned i = 0; i < can_print; i++) + out->buf[out->pos++] = *((char *) --b); + + printbuf_nul_terminate(out); +} + +static inline void prt_str_reversed(struct printbuf *out, const char *s) +{ + prt_bytes_reversed(out, s, strlen(s)); +} + +static inline void reverse_bytes(void *b, size_t n) +{ + char *e = b + n, *s = b; + + while (s < e) { + --e; + swap(*s, *e); + s++; + } +} + +/* XXX: we don't yet attempt to print paths when we don't know the subvol */ +int bch2_inum_to_path(struct btree_trans *trans, subvol_inum inum, struct printbuf *path) +{ + unsigned orig_pos = path->pos; + int ret = 0; + + while (!(inum.subvol == BCACHEFS_ROOT_SUBVOL && + inum.inum == BCACHEFS_ROOT_INO)) { + struct bch_inode_unpacked inode; + ret = bch2_inode_find_by_inum_trans(trans, inum, &inode); + if (ret) + goto disconnected; + + if (!inode.bi_dir && !inode.bi_dir_offset) { + ret = -BCH_ERR_ENOENT_inode_no_backpointer; + goto disconnected; + } + + inum.subvol = inode.bi_parent_subvol ?: inum.subvol; + inum.inum = inode.bi_dir; + + u32 snapshot; + ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot); + if (ret) + goto disconnected; + + struct btree_iter d_iter; + struct bkey_s_c_dirent d = bch2_bkey_get_iter_typed(trans, &d_iter, + BTREE_ID_dirents, SPOS(inode.bi_dir, inode.bi_dir_offset, snapshot), + 0, dirent); + ret = bkey_err(d.s_c); + if (ret) + goto disconnected; + + struct qstr dirent_name = bch2_dirent_get_name(d); + prt_bytes_reversed(path, dirent_name.name, dirent_name.len); + + prt_char(path, '/'); + + bch2_trans_iter_exit(trans, &d_iter); + } + + if (orig_pos == path->pos) + prt_char(path, '/'); +out: + ret = path->allocation_failure ? -ENOMEM : 0; + if (ret) + goto err; + + reverse_bytes(path->buf + orig_pos, path->pos - orig_pos); + return 0; +err: + return ret; +disconnected: + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + goto err; + + prt_str_reversed(path, "(disconnected)"); + goto out; +} diff --git a/fs/bcachefs/fs-common.h b/fs/bcachefs/fs-common.h index c934e807b380..2b59210bb5e8 100644 --- a/fs/bcachefs/fs-common.h +++ b/fs/bcachefs/fs-common.h @@ -42,4 +42,6 @@ int bch2_rename_trans(struct btree_trans *, bool bch2_reinherit_attrs(struct bch_inode_unpacked *, struct bch_inode_unpacked *); +int bch2_inum_to_path(struct btree_trans *, subvol_inum, struct printbuf *); + #endif /* _BCACHEFS_FS_COMMON_H */ diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c index 95972809e76d..ab1d5db2fa56 100644 --- a/fs/bcachefs/fs-io-buffered.c +++ b/fs/bcachefs/fs-io-buffered.c @@ -164,7 +164,8 @@ static void bchfs_read(struct btree_trans *trans, BTREE_ITER_slots); while (1) { struct bkey_s_c k; - unsigned bytes, sectors, offset_into_extent; + unsigned bytes, sectors; + s64 offset_into_extent; enum btree_id data_btree = BTREE_ID_extents; bch2_trans_begin(trans); @@ -197,7 +198,7 @@ static void bchfs_read(struct btree_trans *trans, k = bkey_i_to_s_c(sk.k); - sectors = min(sectors, k.k->size - offset_into_extent); + sectors = min_t(unsigned, sectors, k.k->size - offset_into_extent); if (readpages_iter) { ret = readpage_bio_extend(trans, readpages_iter, &rbio->bio, sectors, @@ -230,10 +231,12 @@ err: bch2_trans_iter_exit(trans, &iter); if (ret) { - bch_err_inum_offset_ratelimited(c, - iter.pos.inode, - iter.pos.offset << 9, - "read error %i from btree lookup", ret); + struct printbuf buf = PRINTBUF; + bch2_inum_offset_err_msg_trans(trans, &buf, inum, iter.pos.offset << 9); + prt_printf(&buf, "read error %i from btree lookup", ret); + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); + rbio->bio.bi_status = BLK_STS_IOERR; bio_endio(&rbio->bio); } @@ -248,6 +251,7 @@ void bch2_readahead(struct readahead_control *ractl) struct bch_io_opts opts; struct folio *folio; struct readpages_iter readpages_iter; + struct blk_plug plug; bch2_inode_opts_get(&opts, c, &inode->ei_inode); @@ -255,6 +259,16 @@ void bch2_readahead(struct readahead_control *ractl) if (ret) return; + /* + * Besides being a general performance optimization, plugging helps with + * avoiding btree transaction srcu warnings - submitting a bio can + * block, and we don't want todo that with the transaction locked. + * + * However, plugged bios are submitted when we schedule; we ideally + * would have our own scheduler hook to call unlock_long() before + * scheduling. + */ + blk_start_plug(&plug); bch2_pagecache_add_get(inode); struct btree_trans *trans = bch2_trans_get(c); @@ -281,7 +295,7 @@ void bch2_readahead(struct readahead_control *ractl) bch2_trans_put(trans); bch2_pagecache_add_put(inode); - + blk_finish_plug(&plug); darray_exit(&readpages_iter.folios); } @@ -296,9 +310,13 @@ int bch2_read_single_folio(struct folio *folio, struct address_space *mapping) struct bch_fs *c = inode->v.i_sb->s_fs_info; struct bch_read_bio *rbio; struct bch_io_opts opts; + struct blk_plug plug; int ret; DECLARE_COMPLETION_ONSTACK(done); + BUG_ON(folio_test_uptodate(folio)); + BUG_ON(folio_test_dirty(folio)); + if (!bch2_folio_create(folio, GFP_KERNEL)) return -ENOMEM; @@ -313,7 +331,9 @@ int bch2_read_single_folio(struct folio *folio, struct address_space *mapping) rbio->bio.bi_iter.bi_sector = folio_sector(folio); BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0)); + blk_start_plug(&plug); bch2_trans_run(c, (bchfs_read(trans, rbio, inode_inum(inode), NULL), 0)); + blk_finish_plug(&plug); wait_for_completion(&done); ret = blk_status_to_errno(rbio->bio.bi_status); @@ -605,15 +625,6 @@ do_io: BUG_ON(!bio_add_folio(&w->io->op.wbio.bio, folio, sectors << 9, offset << 9)); - /* Check for writing past i_size: */ - WARN_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) > - round_up(i_size, block_bytes(c)) && - !test_bit(BCH_FS_emergency_ro, &c->flags), - "writing past i_size: %llu > %llu (unrounded %llu)\n", - bio_end_sector(&w->io->op.wbio.bio) << 9, - round_up(i_size, block_bytes(c)), - i_size); - w->io->op.res.sectors += reserved_sectors; w->io->op.i_sectors_delta -= dirty_sectors; w->io->op.new_i_size = i_size; @@ -669,7 +680,7 @@ int bch2_write_begin(struct file *file, struct address_space *mapping, folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, FGP_WRITEBEGIN | fgf_set_order(len), mapping_gfp_mask(mapping)); - if (IS_ERR_OR_NULL(folio)) + if (IS_ERR(folio)) goto err_unlock; offset = pos - folio_pos(folio); diff --git a/fs/bcachefs/fs-io-direct.c b/fs/bcachefs/fs-io-direct.c index 6d3a05ae5da8..2089c36b5866 100644 --- a/fs/bcachefs/fs-io-direct.c +++ b/fs/bcachefs/fs-io-direct.c @@ -70,6 +70,7 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter) struct bch_io_opts opts; struct dio_read *dio; struct bio *bio; + struct blk_plug plug; loff_t offset = req->ki_pos; bool sync = is_sync_kiocb(req); size_t shorten; @@ -128,6 +129,8 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter) */ dio->should_dirty = iter_is_iovec(iter); + blk_start_plug(&plug); + goto start; while (iter->count) { bio = bio_alloc_bioset(NULL, @@ -160,6 +163,8 @@ start: bch2_read(c, rbio_init(bio, opts), inode_inum(inode)); } + blk_finish_plug(&plug); + iter->count += shorten; if (sync) { diff --git a/fs/bcachefs/fs-io-pagecache.c b/fs/bcachefs/fs-io-pagecache.c index 1d4910ea0f1d..e072900e6a5b 100644 --- a/fs/bcachefs/fs-io-pagecache.c +++ b/fs/bcachefs/fs-io-pagecache.c @@ -29,7 +29,7 @@ int bch2_filemap_get_contig_folios_d(struct address_space *mapping, break; f = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, fgp_flags, gfp); - if (IS_ERR_OR_NULL(f)) + if (IS_ERR(f)) break; BUG_ON(fs->nr && folio_pos(f) != pos); @@ -199,7 +199,7 @@ int bch2_folio_set(struct bch_fs *c, subvol_inum inum, unsigned folio_idx = 0; return bch2_trans_run(c, - for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_extents, + for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_extents, POS(inum.inum, offset), POS(inum.inum, U64_MAX), inum.subvol, BTREE_ITER_slots, k, ({ diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c index 2456c41b215e..94bf34b9b65f 100644 --- a/fs/bcachefs/fs-io.c +++ b/fs/bcachefs/fs-io.c @@ -167,6 +167,34 @@ void __bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode, /* fsync: */ +static int bch2_get_inode_journal_seq_trans(struct btree_trans *trans, subvol_inum inum, + u64 *seq) +{ + struct printbuf buf = PRINTBUF; + struct bch_inode_unpacked u; + struct btree_iter iter; + int ret = bch2_inode_peek(trans, &iter, &u, inum, 0); + if (ret) + return ret; + + u64 cur_seq = journal_cur_seq(&trans->c->journal); + *seq = min(cur_seq, u.bi_journal_seq); + + if (fsck_err_on(u.bi_journal_seq > cur_seq, + trans, inode_journal_seq_in_future, + "inode journal seq in future (currently at %llu)\n%s", + cur_seq, + (bch2_inode_unpacked_to_text(&buf, &u), + buf.buf))) { + u.bi_journal_seq = cur_seq; + ret = bch2_inode_write(trans, &iter, &u); + } +fsck_err: + bch2_trans_iter_exit(trans, &iter); + printbuf_exit(&buf); + return ret; +} + /* * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an * insert trigger: look up the btree inode instead @@ -180,9 +208,10 @@ static int bch2_flush_inode(struct bch_fs *c, if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fsync)) return -EROFS; - struct bch_inode_unpacked u; - int ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u) ?: - bch2_journal_flush_seq(&c->journal, u.bi_journal_seq, TASK_INTERRUPTIBLE) ?: + u64 seq; + int ret = bch2_trans_commit_do(c, NULL, NULL, 0, + bch2_get_inode_journal_seq_trans(trans, inode_inum(inode), &seq)) ?: + bch2_journal_flush_seq(&c->journal, seq, TASK_INTERRUPTIBLE) ?: bch2_inode_flush_nocow_writes(c, inode); bch2_write_ref_put(c, BCH_WRITE_REF_fsync); return ret; @@ -222,7 +251,7 @@ static inline int range_has_data(struct bch_fs *c, u32 subvol, struct bpos end) { return bch2_trans_run(c, - for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_extents, start, end, + for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_extents, start, end, subvol, 0, k, ({ bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k); }))); @@ -256,7 +285,7 @@ static int __bch2_truncate_folio(struct bch_inode_info *inode, folio = __filemap_get_folio(mapping, index, FGP_LOCK|FGP_CREAT, GFP_KERNEL); - if (IS_ERR_OR_NULL(folio)) { + if (IS_ERR(folio)) { ret = -ENOMEM; goto out; } @@ -806,7 +835,7 @@ static int quota_reserve_range(struct bch_inode_info *inode, u64 sectors = end - start; int ret = bch2_trans_run(c, - for_each_btree_key_in_subvolume_upto(trans, iter, + for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_extents, POS(inode->v.i_ino, start), POS(inode->v.i_ino, end - 1), @@ -877,11 +906,18 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src, bch2_mark_pagecache_unallocated(src, pos_src >> 9, (pos_src + aligned_len) >> 9); + /* + * XXX: we'd like to be telling bch2_remap_range() if we have + * permission to write to the source file, and thus if io path option + * changes should be propagated through the copy, but we need mnt_idmap + * from the pathwalk, awkward + */ ret = bch2_remap_range(c, inode_inum(dst), pos_dst >> 9, inode_inum(src), pos_src >> 9, aligned_len >> 9, - pos_dst + len, &i_sectors_delta); + pos_dst + len, &i_sectors_delta, + false); if (ret < 0) goto err; @@ -922,7 +958,7 @@ static loff_t bch2_seek_data(struct file *file, u64 offset) return -ENXIO; int ret = bch2_trans_run(c, - for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_extents, + for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_extents, POS(inode->v.i_ino, offset >> 9), POS(inode->v.i_ino, U64_MAX), inum.subvol, 0, k, ({ @@ -958,7 +994,7 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset) return -ENXIO; int ret = bch2_trans_run(c, - for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_extents, + for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_extents, POS(inode->v.i_ino, offset >> 9), POS(inode->v.i_ino, U64_MAX), inum.subvol, BTREE_ITER_slots, k, ({ diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c index 405cf08bda34..15725b4ce393 100644 --- a/fs/bcachefs/fs-ioctl.c +++ b/fs/bcachefs/fs-ioctl.c @@ -406,7 +406,7 @@ static long bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp, sync_inodes_sb(c->vfs_sb); up_read(&c->vfs_sb->s_umount); } -retry: + if (arg.src_ptr) { error = user_path_at(arg.dirfd, (const char __user *)(unsigned long)arg.src_ptr, @@ -486,11 +486,6 @@ err3: err2: if (arg.src_ptr) path_put(&src_path); - - if (retry_estale(error, lookup_flags)) { - lookup_flags |= LOOKUP_REVAL; - goto retry; - } err1: return error; } diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c index a41d0d8a2f7b..90ade8f648d9 100644 --- a/fs/bcachefs/fs.c +++ b/fs/bcachefs/fs.c @@ -23,6 +23,7 @@ #include "journal.h" #include "keylist.h" #include "quota.h" +#include "rebalance.h" #include "snapshot.h" #include "super.h" #include "xattr.h" @@ -38,6 +39,7 @@ #include <linux/posix_acl.h> #include <linux/random.h> #include <linux/seq_file.h> +#include <linux/siphash.h> #include <linux/statfs.h> #include <linux/string.h> #include <linux/xattr.h> @@ -65,6 +67,9 @@ void bch2_inode_update_after_write(struct btree_trans *trans, i_gid_write(&inode->v, bi->bi_gid); inode->v.i_mode = bi->bi_mode; + if (fields & ATTR_SIZE) + i_size_write(&inode->v, bi->bi_size); + if (fields & ATTR_ATIME) inode_set_atime_to_ts(&inode->v, bch2_time_to_timespec(c, bi->bi_atime)); if (fields & ATTR_MTIME) @@ -89,10 +94,25 @@ int __must_check bch2_write_inode(struct bch_fs *c, retry: bch2_trans_begin(trans); - ret = bch2_inode_peek(trans, &iter, &inode_u, inode_inum(inode), - BTREE_ITER_intent) ?: - (set ? set(trans, inode, &inode_u, p) : 0) ?: - bch2_inode_write(trans, &iter, &inode_u) ?: + ret = bch2_inode_peek(trans, &iter, &inode_u, inode_inum(inode), BTREE_ITER_intent); + if (ret) + goto err; + + struct bch_extent_rebalance old_r = bch2_inode_rebalance_opts_get(c, &inode_u); + + ret = (set ? set(trans, inode, &inode_u, p) : 0); + if (ret) + goto err; + + struct bch_extent_rebalance new_r = bch2_inode_rebalance_opts_get(c, &inode_u); + + if (memcmp(&old_r, &new_r, sizeof(new_r))) { + ret = bch2_set_rebalance_needs_scan_trans(trans, inode_u.bi_inum); + if (ret) + goto err; + } + + ret = bch2_inode_write(trans, &iter, &inode_u) ?: bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); /* @@ -101,7 +121,7 @@ retry: */ if (!ret) bch2_inode_update_after_write(trans, inode, &inode_u, fields); - +err: bch2_trans_iter_exit(trans, &iter); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) @@ -160,8 +180,9 @@ static bool subvol_inum_eq(subvol_inum a, subvol_inum b) static u32 bch2_vfs_inode_hash_fn(const void *data, u32 len, u32 seed) { const subvol_inum *inum = data; + siphash_key_t k = { .key[0] = seed }; - return jhash(&inum->inum, sizeof(inum->inum), seed); + return siphash_2u64(inum->subvol, inum->inum, &k); } static u32 bch2_vfs_inode_obj_hash_fn(const void *data, u32 len, u32 seed) @@ -190,11 +211,18 @@ static const struct rhashtable_params bch2_vfs_inodes_params = { .automatic_shrinking = true, }; +static const struct rhashtable_params bch2_vfs_inodes_by_inum_params = { + .head_offset = offsetof(struct bch_inode_info, by_inum_hash), + .key_offset = offsetof(struct bch_inode_info, ei_inum.inum), + .key_len = sizeof(u64), + .automatic_shrinking = true, +}; + int bch2_inode_or_descendents_is_open(struct btree_trans *trans, struct bpos p) { struct bch_fs *c = trans->c; - struct rhashtable *ht = &c->vfs_inodes_table; - subvol_inum inum = (subvol_inum) { .inum = p.offset }; + struct rhltable *ht = &c->vfs_inodes_by_inum_table; + u64 inum = p.offset; DARRAY(u32) subvols; int ret = 0; @@ -219,15 +247,15 @@ restart_from_top: struct rhash_lock_head __rcu *const *bkt; struct rhash_head *he; unsigned int hash; - struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); + struct bucket_table *tbl = rht_dereference_rcu(ht->ht.tbl, &ht->ht); restart: - hash = rht_key_hashfn(ht, tbl, &inum, bch2_vfs_inodes_params); + hash = rht_key_hashfn(&ht->ht, tbl, &inum, bch2_vfs_inodes_by_inum_params); bkt = rht_bucket(tbl, hash); do { struct bch_inode_info *inode; rht_for_each_entry_rcu_from(inode, he, rht_ptr_rcu(bkt), tbl, hash, hash) { - if (inode->ei_inum.inum == inum.inum) { + if (inode->ei_inum.inum == inum) { ret = darray_push_gfp(&subvols, inode->ei_inum.subvol, GFP_NOWAIT|__GFP_NOWARN); if (ret) { @@ -248,7 +276,7 @@ restart: /* Ensure we see any new tables. */ smp_rmb(); - tbl = rht_dereference_rcu(tbl->future_tbl, ht); + tbl = rht_dereference_rcu(tbl->future_tbl, &ht->ht); if (unlikely(tbl)) goto restart; rcu_read_unlock(); @@ -327,7 +355,11 @@ static void bch2_inode_hash_remove(struct bch_fs *c, struct bch_inode_info *inod spin_unlock(&inode->v.i_lock); if (remove) { - int ret = rhashtable_remove_fast(&c->vfs_inodes_table, + int ret = rhltable_remove(&c->vfs_inodes_by_inum_table, + &inode->by_inum_hash, bch2_vfs_inodes_by_inum_params); + BUG_ON(ret); + + ret = rhashtable_remove_fast(&c->vfs_inodes_table, &inode->hash, bch2_vfs_inodes_params); BUG_ON(ret); inode->v.i_hash.pprev = NULL; @@ -372,6 +404,11 @@ retry: discard_new_inode(&inode->v); return old; } else { + int ret = rhltable_insert(&c->vfs_inodes_by_inum_table, + &inode->by_inum_hash, + bch2_vfs_inodes_by_inum_params); + BUG_ON(ret); + inode_fake_hash(&inode->v); inode_sb_list_add(&inode->v); @@ -465,7 +502,7 @@ struct inode *bch2_vfs_inode_get(struct bch_fs *c, subvol_inum inum) struct bch_inode_unpacked inode_u; struct bch_subvolume subvol; int ret = lockrestart_do(trans, - bch2_subvolume_get(trans, inum.subvol, true, 0, &subvol) ?: + bch2_subvolume_get(trans, inum.subvol, true, &subvol) ?: bch2_inode_find_by_inum_trans(trans, inum, &inode_u)) ?: PTR_ERR_OR_ZERO(inode = bch2_inode_hash_init_insert(trans, inum, &inode_u, &subvol)); bch2_trans_put(trans); @@ -535,8 +572,7 @@ retry: inum.subvol = inode_u.bi_subvol ?: dir->ei_inum.subvol; inum.inum = inode_u.bi_inum; - ret = bch2_subvolume_get(trans, inum.subvol, true, - BTREE_ITER_with_updates, &subvol) ?: + ret = bch2_subvolume_get(trans, inum.subvol, true, &subvol) ?: bch2_trans_commit(trans, NULL, &journal_seq, 0); if (unlikely(ret)) { bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, -1, @@ -549,7 +585,7 @@ err_before_quota: if (!(flags & BCH_CREATE_TMPFILE)) { bch2_inode_update_after_write(trans, dir, &dir_u, - ATTR_MTIME|ATTR_CTIME); + ATTR_MTIME|ATTR_CTIME|ATTR_SIZE); mutex_unlock(&dir->ei_update_lock); } @@ -617,7 +653,7 @@ static struct bch_inode_info *bch2_lookup_trans(struct btree_trans *trans, struct bch_subvolume subvol; struct bch_inode_unpacked inode_u; - ret = bch2_subvolume_get(trans, inum.subvol, true, 0, &subvol) ?: + ret = bch2_subvolume_get(trans, inum.subvol, true, &subvol) ?: bch2_inode_find_by_inum_nowarn_trans(trans, inum, &inode_u) ?: PTR_ERR_OR_ZERO(inode = bch2_inode_hash_init_insert(trans, inum, &inode_u, &subvol)); @@ -628,7 +664,7 @@ static struct bch_inode_info *bch2_lookup_trans(struct btree_trans *trans, goto err; /* regular files may have hardlinks: */ - if (bch2_fs_inconsistent_on(bch2_inode_should_have_bp(&inode_u) && + if (bch2_fs_inconsistent_on(bch2_inode_should_have_single_bp(&inode_u) && !bkey_eq(k.k->p, POS(inode_u.bi_dir, inode_u.bi_dir_offset)), c, "dirent points to inode that does not point back:\n %s", @@ -706,7 +742,7 @@ static int __bch2_link(struct bch_fs *c, if (likely(!ret)) { bch2_inode_update_after_write(trans, dir, &dir_u, - ATTR_MTIME|ATTR_CTIME); + ATTR_MTIME|ATTR_CTIME|ATTR_SIZE); bch2_inode_update_after_write(trans, inode, &inode_u, ATTR_CTIME); } @@ -759,7 +795,7 @@ int __bch2_unlink(struct inode *vdir, struct dentry *dentry, goto err; bch2_inode_update_after_write(trans, dir, &dir_u, - ATTR_MTIME|ATTR_CTIME); + ATTR_MTIME|ATTR_CTIME|ATTR_SIZE); bch2_inode_update_after_write(trans, inode, &inode_u, ATTR_MTIME); @@ -937,11 +973,11 @@ err_tx_restart: dst_inode->v.i_ino != dst_inode_u.bi_inum); bch2_inode_update_after_write(trans, src_dir, &src_dir_u, - ATTR_MTIME|ATTR_CTIME); + ATTR_MTIME|ATTR_CTIME|ATTR_SIZE); if (src_dir != dst_dir) bch2_inode_update_after_write(trans, dst_dir, &dst_dir_u, - ATTR_MTIME|ATTR_CTIME); + ATTR_MTIME|ATTR_CTIME|ATTR_SIZE); bch2_inode_update_after_write(trans, src_inode, &src_inode_u, ATTR_CTIME); @@ -1245,7 +1281,6 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, struct btree_iter iter; struct bkey_s_c k; struct bkey_buf cur, prev; - unsigned offset_into_extent, sectors; bool have_extent = false; int ret = 0; @@ -1278,7 +1313,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, bch2_btree_iter_set_snapshot(&iter, snapshot); - k = bch2_btree_iter_peek_upto(&iter, end); + k = bch2_btree_iter_peek_max(&iter, end); ret = bkey_err(k); if (ret) continue; @@ -1292,9 +1327,8 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, continue; } - offset_into_extent = iter.pos.offset - - bkey_start_offset(k.k); - sectors = k.k->size - offset_into_extent; + s64 offset_into_extent = iter.pos.offset - bkey_start_offset(k.k); + unsigned sectors = k.k->size - offset_into_extent; bch2_bkey_buf_reassemble(&cur, c, k); @@ -1306,7 +1340,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info, k = bkey_i_to_s_c(cur.k); bch2_bkey_buf_realloc(&prev, c, k.k->u64s); - sectors = min(sectors, k.k->size - offset_into_extent); + sectors = min_t(unsigned, sectors, k.k->size - offset_into_extent); bch2_cut_front(POS(k.k->p.inode, bkey_start_offset(k.k) + @@ -1736,7 +1770,6 @@ static void bch2_vfs_inode_init(struct btree_trans *trans, bch2_inode_update_after_write(trans, inode, bi, ~0); inode->v.i_blocks = bi->bi_sectors; - inode->v.i_ino = bi->bi_inum; inode->v.i_rdev = bi->bi_dev; inode->v.i_generation = bi->bi_generation; inode->v.i_size = bi->bi_size; @@ -2200,7 +2233,8 @@ got_sb: sb->s_time_gran = c->sb.nsec_per_time_unit; sb->s_time_min = div_s64(S64_MIN, c->sb.time_units_per_sec) + 1; sb->s_time_max = div_s64(S64_MAX, c->sb.time_units_per_sec); - sb->s_uuid = c->sb.user_uuid; + super_set_uuid(sb, c->sb.user_uuid.b, sizeof(c->sb.user_uuid)); + super_set_sysfs_name_uuid(sb); sb->s_shrink->seeks = 0; c->vfs_sb = sb; strscpy(sb->s_id, c->name, sizeof(sb->s_id)); @@ -2345,13 +2379,16 @@ static int bch2_init_fs_context(struct fs_context *fc) void bch2_fs_vfs_exit(struct bch_fs *c) { + if (c->vfs_inodes_by_inum_table.ht.tbl) + rhltable_destroy(&c->vfs_inodes_by_inum_table); if (c->vfs_inodes_table.tbl) rhashtable_destroy(&c->vfs_inodes_table); } int bch2_fs_vfs_init(struct bch_fs *c) { - return rhashtable_init(&c->vfs_inodes_table, &bch2_vfs_inodes_params); + return rhashtable_init(&c->vfs_inodes_table, &bch2_vfs_inodes_params) ?: + rhltable_init(&c->vfs_inodes_by_inum_table, &bch2_vfs_inodes_by_inum_params); } static struct file_system_type bcache_fs_type = { diff --git a/fs/bcachefs/fs.h b/fs/bcachefs/fs.h index 59f9f7ae728d..dd2198541455 100644 --- a/fs/bcachefs/fs.h +++ b/fs/bcachefs/fs.h @@ -14,6 +14,7 @@ struct bch_inode_info { struct inode v; struct rhash_head hash; + struct rhlist_head by_inum_hash; subvol_inum ei_inum; struct list_head ei_vfs_inode_list; diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c index 75c8a97a6954..53a421ff136d 100644 --- a/fs/bcachefs/fsck.c +++ b/fs/bcachefs/fsck.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" +#include "bcachefs_ioctl.h" #include "bkey_buf.h" #include "btree_cache.h" #include "btree_update.h" @@ -16,6 +17,7 @@ #include "recovery_passes.h" #include "snapshot.h" #include "super.h" +#include "thread_with_file.h" #include "xattr.h" #include <linux/bsearch.h> @@ -73,7 +75,7 @@ static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum, { u64 sectors = 0; - int ret = for_each_btree_key_upto(trans, iter, BTREE_ID_extents, + int ret = for_each_btree_key_max(trans, iter, BTREE_ID_extents, SPOS(inum, 0, snapshot), POS(inum, U64_MAX), 0, k, ({ @@ -90,7 +92,7 @@ static s64 bch2_count_subdirs(struct btree_trans *trans, u64 inum, { u64 subdirs = 0; - int ret = for_each_btree_key_upto(trans, iter, BTREE_ID_dirents, + int ret = for_each_btree_key_max(trans, iter, BTREE_ID_dirents, SPOS(inum, 0, snapshot), POS(inum, U64_MAX), 0, k, ({ @@ -107,7 +109,7 @@ static int subvol_lookup(struct btree_trans *trans, u32 subvol, u32 *snapshot, u64 *inum) { struct bch_subvolume s; - int ret = bch2_subvolume_get(trans, subvol, false, 0, &s); + int ret = bch2_subvolume_get(trans, subvol, false, &s); *snapshot = le32_to_cpu(s.snapshot); *inum = le64_to_cpu(s.inode); @@ -170,7 +172,7 @@ static int lookup_dirent_in_snapshot(struct btree_trans *trans, if (ret) return ret; - struct bkey_s_c_dirent d = bkey_s_c_to_dirent(bch2_btree_iter_peek_slot(&iter)); + struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k); *target = le64_to_cpu(d.v->d_inum); *type = d.v->d_type; bch2_trans_iter_exit(trans, &iter); @@ -203,6 +205,36 @@ err: return ret; } +/* + * Find any subvolume associated with a tree of snapshots + * We can't rely on master_subvol - it might have been deleted. + */ +static int find_snapshot_tree_subvol(struct btree_trans *trans, + u32 tree_id, u32 *subvol) +{ + struct btree_iter iter; + struct bkey_s_c k; + int ret; + + for_each_btree_key_norestart(trans, iter, BTREE_ID_snapshots, POS_MIN, 0, k, ret) { + if (k.k->type != KEY_TYPE_snapshot) + continue; + + struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k); + if (le32_to_cpu(s.v->tree) != tree_id) + continue; + + if (s.v->subvol) { + *subvol = le32_to_cpu(s.v->subvol); + goto found; + } + } + ret = -BCH_ERR_ENOENT_no_snapshot_tree_subvol; +found: + bch2_trans_iter_exit(trans, &iter); + return ret; +} + /* Get lost+found, create if it doesn't exist: */ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot, struct bch_inode_unpacked *lostfound, @@ -210,6 +242,7 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot, { struct bch_fs *c = trans->c; struct qstr lostfound_str = QSTR("lost+found"); + struct btree_iter lostfound_iter = { NULL }; u64 inum = 0; unsigned d_type = 0; int ret; @@ -220,20 +253,24 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot, if (ret) return ret; - subvol_inum root_inum = { .subvol = le32_to_cpu(st.master_subvol) }; + u32 subvolid; + ret = find_snapshot_tree_subvol(trans, + bch2_snapshot_tree(c, snapshot), &subvolid); + bch_err_msg(c, ret, "finding subvol associated with snapshot tree %u", + bch2_snapshot_tree(c, snapshot)); + if (ret) + return ret; struct bch_subvolume subvol; - ret = bch2_subvolume_get(trans, le32_to_cpu(st.master_subvol), - false, 0, &subvol); - bch_err_msg(c, ret, "looking up root subvol %u for snapshot %u", - le32_to_cpu(st.master_subvol), snapshot); + ret = bch2_subvolume_get(trans, subvolid, false, &subvol); + bch_err_msg(c, ret, "looking up subvol %u for snapshot %u", subvolid, snapshot); if (ret) return ret; if (!subvol.inode) { struct btree_iter iter; struct bkey_i_subvolume *subvol = bch2_bkey_get_mut_typed(trans, &iter, - BTREE_ID_subvolumes, POS(0, le32_to_cpu(st.master_subvol)), + BTREE_ID_subvolumes, POS(0, subvolid), 0, subvolume); ret = PTR_ERR_OR_ZERO(subvol); if (ret) @@ -243,13 +280,16 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot, bch2_trans_iter_exit(trans, &iter); } - root_inum.inum = le64_to_cpu(subvol.inode); + subvol_inum root_inum = { + .subvol = subvolid, + .inum = le64_to_cpu(subvol.inode) + }; struct bch_inode_unpacked root_inode; struct bch_hash_info root_hash_info; ret = lookup_inode(trans, root_inum.inum, snapshot, &root_inode); bch_err_msg(c, ret, "looking up root inode %llu for subvol %u", - root_inum.inum, le32_to_cpu(st.master_subvol)); + root_inum.inum, subvolid); if (ret) return ret; @@ -288,11 +328,16 @@ create_lostfound: * XXX: we could have a nicer log message here if we had a nice way to * walk backpointers to print a path */ - bch_notice(c, "creating lost+found in subvol %llu snapshot %u", - root_inum.subvol, le32_to_cpu(st.root_snapshot)); + struct printbuf path = PRINTBUF; + ret = bch2_inum_to_path(trans, root_inum, &path); + if (ret) + goto err; + + bch_notice(c, "creating %s/lost+found in subvol %llu snapshot %u", + path.buf, root_inum.subvol, snapshot); + printbuf_exit(&path); u64 now = bch2_current_time(c); - struct btree_iter lostfound_iter = { NULL }; u64 cpu = raw_smp_processor_id(); bch2_inode_init_early(c, lostfound); @@ -405,7 +450,7 @@ static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked * return ret; struct bch_hash_info dir_hash = bch2_hash_info_init(c, &lostfound); - struct qstr name = (struct qstr) QSTR(name_buf); + struct qstr name = QSTR(name_buf); inode->bi_dir = lostfound.bi_inum; @@ -451,7 +496,9 @@ static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked * continue; struct bch_inode_unpacked child_inode; - bch2_inode_unpack(k, &child_inode); + ret = bch2_inode_unpack(k, &child_inode); + if (ret) + break; if (!inode_should_reattach(&child_inode)) { ret = maybe_delete_dirent(trans, @@ -482,6 +529,13 @@ static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked * return ret; } +static struct bkey_s_c_dirent dirent_get_by_pos(struct btree_trans *trans, + struct btree_iter *iter, + struct bpos pos) +{ + return bch2_bkey_get_iter_typed(trans, iter, BTREE_ID_dirents, pos, 0, dirent); +} + static int remove_backpointer(struct btree_trans *trans, struct bch_inode_unpacked *inode) { @@ -490,13 +544,11 @@ static int remove_backpointer(struct btree_trans *trans, struct bch_fs *c = trans->c; struct btree_iter iter; - struct bkey_s_c_dirent d = - bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_dirents, - SPOS(inode->bi_dir, inode->bi_dir_offset, inode->bi_snapshot), 0, - dirent); - int ret = bkey_err(d) ?: - dirent_points_to_inode(c, d, inode) ?: - __remove_dirent(trans, d.k->p); + struct bkey_s_c_dirent d = dirent_get_by_pos(trans, &iter, + SPOS(inode->bi_dir, inode->bi_dir_offset, inode->bi_snapshot)); + int ret = bkey_err(d) ?: + dirent_points_to_inode(c, d, inode) ?: + __remove_dirent(trans, d.k->p); bch2_trans_iter_exit(trans, &iter); return ret; } @@ -613,7 +665,7 @@ static int reconstruct_inode(struct btree_trans *trans, enum btree_id btree, u32 struct btree_iter iter = {}; bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, SPOS(inum, U64_MAX, snapshot), 0); - struct bkey_s_c k = bch2_btree_iter_peek_prev(&iter); + struct bkey_s_c k = bch2_btree_iter_peek_prev_min(&iter, POS(inum, 0)); bch2_trans_iter_exit(trans, &iter); int ret = bkey_err(k); if (ret) @@ -780,11 +832,13 @@ struct inode_walker { struct bpos last_pos; DARRAY(struct inode_walker_entry) inodes; + snapshot_id_list deletes; }; static void inode_walker_exit(struct inode_walker *w) { darray_exit(&w->inodes); + darray_exit(&w->deletes); } static struct inode_walker inode_walker_init(void) @@ -797,9 +851,8 @@ static int add_inode(struct bch_fs *c, struct inode_walker *w, { struct bch_inode_unpacked u; - BUG_ON(bch2_inode_unpack(inode, &u)); - - return darray_push(&w->inodes, ((struct inode_walker_entry) { + return bch2_inode_unpack(inode, &u) ?: + darray_push(&w->inodes, ((struct inode_walker_entry) { .inode = u, .snapshot = inode.k->p.snapshot, })); @@ -909,8 +962,9 @@ static int get_visible_inodes(struct btree_trans *trans, int ret; w->inodes.nr = 0; + w->deletes.nr = 0; - for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum), + for_each_btree_key_reverse_norestart(trans, iter, BTREE_ID_inodes, SPOS(0, inum, s->pos.snapshot), BTREE_ITER_all_snapshots, k, ret) { if (k.k->p.offset != inum) break; @@ -918,10 +972,13 @@ static int get_visible_inodes(struct btree_trans *trans, if (!ref_visible(c, s, s->pos.snapshot, k.k->p.snapshot)) continue; - if (bkey_is_inode(k.k)) - add_inode(c, w, k); + if (snapshot_list_has_ancestor(c, &w->deletes, k.k->p.snapshot)) + continue; - if (k.k->p.snapshot >= s->pos.snapshot) + ret = bkey_is_inode(k.k) + ? add_inode(c, w, k) + : snapshot_list_add(c, &w->deletes, k.k->p.snapshot); + if (ret) break; } bch2_trans_iter_exit(trans, &iter); @@ -929,69 +986,16 @@ static int get_visible_inodes(struct btree_trans *trans, return ret; } -static int dirent_has_target(struct btree_trans *trans, struct bkey_s_c_dirent d) -{ - if (d.v->d_type == DT_SUBVOL) { - u32 snap; - u64 inum; - int ret = subvol_lookup(trans, le32_to_cpu(d.v->d_child_subvol), &snap, &inum); - if (ret && !bch2_err_matches(ret, ENOENT)) - return ret; - return !ret; - } else { - struct btree_iter iter; - struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, - SPOS(0, le64_to_cpu(d.v->d_inum), d.k->p.snapshot), 0); - int ret = bkey_err(k); - if (ret) - return ret; - - ret = bkey_is_inode(k.k); - bch2_trans_iter_exit(trans, &iter); - return ret; - } -} - /* * Prefer to delete the first one, since that will be the one at the wrong * offset: * return value: 0 -> delete k1, 1 -> delete k2 */ -static int hash_pick_winner(struct btree_trans *trans, - const struct bch_hash_desc desc, - struct bch_hash_info *hash_info, - struct bkey_s_c k1, - struct bkey_s_c k2) -{ - if (bkey_val_bytes(k1.k) == bkey_val_bytes(k2.k) && - !memcmp(k1.v, k2.v, bkey_val_bytes(k1.k))) - return 0; - - switch (desc.btree_id) { - case BTREE_ID_dirents: { - int ret = dirent_has_target(trans, bkey_s_c_to_dirent(k1)); - if (ret < 0) - return ret; - if (!ret) - return 0; - - ret = dirent_has_target(trans, bkey_s_c_to_dirent(k2)); - if (ret < 0) - return ret; - if (!ret) - return 1; - return 2; - } - default: - return 0; - } -} - -static int fsck_update_backpointers(struct btree_trans *trans, - struct snapshots_seen *s, - const struct bch_hash_desc desc, - struct bch_hash_info *hash_info, - struct bkey_i *new) +int bch2_fsck_update_backpointers(struct btree_trans *trans, + struct snapshots_seen *s, + const struct bch_hash_desc desc, + struct bch_hash_info *hash_info, + struct bkey_i *new) { if (new->k.type != KEY_TYPE_dirent) return 0; @@ -1019,160 +1023,6 @@ err: return ret; } -static int fsck_rename_dirent(struct btree_trans *trans, - struct snapshots_seen *s, - const struct bch_hash_desc desc, - struct bch_hash_info *hash_info, - struct bkey_s_c_dirent old) -{ - struct qstr old_name = bch2_dirent_get_name(old); - struct bkey_i_dirent *new = bch2_trans_kmalloc(trans, bkey_bytes(old.k) + 32); - int ret = PTR_ERR_OR_ZERO(new); - if (ret) - return ret; - - bkey_dirent_init(&new->k_i); - dirent_copy_target(new, old); - new->k.p = old.k->p; - - for (unsigned i = 0; i < 1000; i++) { - unsigned len = sprintf(new->v.d_name, "%.*s.fsck_renamed-%u", - old_name.len, old_name.name, i); - unsigned u64s = BKEY_U64s + dirent_val_u64s(len); - - if (u64s > U8_MAX) - return -EINVAL; - - new->k.u64s = u64s; - - ret = bch2_hash_set_in_snapshot(trans, bch2_dirent_hash_desc, hash_info, - (subvol_inum) { 0, old.k->p.inode }, - old.k->p.snapshot, &new->k_i, - BTREE_UPDATE_internal_snapshot_node); - if (!bch2_err_matches(ret, EEXIST)) - break; - } - - if (ret) - return ret; - - return fsck_update_backpointers(trans, s, desc, hash_info, &new->k_i); -} - -static int hash_check_key(struct btree_trans *trans, - struct snapshots_seen *s, - const struct bch_hash_desc desc, - struct bch_hash_info *hash_info, - struct btree_iter *k_iter, struct bkey_s_c hash_k) -{ - struct bch_fs *c = trans->c; - struct btree_iter iter = { NULL }; - struct printbuf buf = PRINTBUF; - struct bkey_s_c k; - u64 hash; - int ret = 0; - - if (hash_k.k->type != desc.key_type) - return 0; - - hash = desc.hash_bkey(hash_info, hash_k); - - if (likely(hash == hash_k.k->p.offset)) - return 0; - - if (hash_k.k->p.offset < hash) - goto bad_hash; - - for_each_btree_key_norestart(trans, iter, desc.btree_id, - SPOS(hash_k.k->p.inode, hash, hash_k.k->p.snapshot), - BTREE_ITER_slots, k, ret) { - if (bkey_eq(k.k->p, hash_k.k->p)) - break; - - if (k.k->type == desc.key_type && - !desc.cmp_bkey(k, hash_k)) - goto duplicate_entries; - - if (bkey_deleted(k.k)) { - bch2_trans_iter_exit(trans, &iter); - goto bad_hash; - } - } -out: - bch2_trans_iter_exit(trans, &iter); - printbuf_exit(&buf); - return ret; -bad_hash: - if (fsck_err(trans, hash_table_key_wrong_offset, - "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n %s", - bch2_btree_id_str(desc.btree_id), hash_k.k->p.inode, hash_k.k->p.offset, hash, - (printbuf_reset(&buf), - bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf))) { - struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, hash_k); - if (IS_ERR(new)) - return PTR_ERR(new); - - k = bch2_hash_set_or_get_in_snapshot(trans, &iter, desc, hash_info, - (subvol_inum) { 0, hash_k.k->p.inode }, - hash_k.k->p.snapshot, new, - STR_HASH_must_create| - BTREE_ITER_with_updates| - BTREE_UPDATE_internal_snapshot_node); - ret = bkey_err(k); - if (ret) - goto out; - if (k.k) - goto duplicate_entries; - - ret = bch2_hash_delete_at(trans, desc, hash_info, k_iter, - BTREE_UPDATE_internal_snapshot_node) ?: - fsck_update_backpointers(trans, s, desc, hash_info, new) ?: - bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?: - -BCH_ERR_transaction_restart_nested; - goto out; - } -fsck_err: - goto out; -duplicate_entries: - ret = hash_pick_winner(trans, desc, hash_info, hash_k, k); - if (ret < 0) - goto out; - - if (!fsck_err(trans, hash_table_key_duplicate, - "duplicate hash table keys%s:\n%s", - ret != 2 ? "" : ", both point to valid inodes", - (printbuf_reset(&buf), - bch2_bkey_val_to_text(&buf, c, hash_k), - prt_newline(&buf), - bch2_bkey_val_to_text(&buf, c, k), - buf.buf))) - goto out; - - switch (ret) { - case 0: - ret = bch2_hash_delete_at(trans, desc, hash_info, k_iter, 0); - break; - case 1: - ret = bch2_hash_delete_at(trans, desc, hash_info, &iter, 0); - break; - case 2: - ret = fsck_rename_dirent(trans, s, desc, hash_info, bkey_s_c_to_dirent(hash_k)) ?: - bch2_hash_delete_at(trans, desc, hash_info, k_iter, 0); - goto out; - } - - ret = bch2_trans_commit(trans, NULL, NULL, 0) ?: - -BCH_ERR_transaction_restart_nested; - goto out; -} - -static struct bkey_s_c_dirent dirent_get_by_pos(struct btree_trans *trans, - struct btree_iter *iter, - struct bpos pos) -{ - return bch2_bkey_get_iter_typed(trans, iter, BTREE_ID_dirents, pos, 0, dirent); -} - static struct bkey_s_c_dirent inode_get_dirent(struct btree_trans *trans, struct btree_iter *iter, struct bch_inode_unpacked *inode, @@ -1260,12 +1110,43 @@ static int get_snapshot_root_inode(struct btree_trans *trans, goto err; BUG(); found_root: - BUG_ON(bch2_inode_unpack(k, root)); + ret = bch2_inode_unpack(k, root); err: bch2_trans_iter_exit(trans, &iter); return ret; } +static int check_directory_size(struct btree_trans *trans, + struct bch_inode_unpacked *inode_u, + struct bkey_s_c inode_k, bool *write_inode) +{ + struct btree_iter iter; + struct bkey_s_c k; + u64 new_size = 0; + int ret; + + for_each_btree_key_max_norestart(trans, iter, BTREE_ID_dirents, + SPOS(inode_k.k->p.offset, 0, inode_k.k->p.snapshot), + POS(inode_k.k->p.offset, U64_MAX), + 0, k, ret) { + if (k.k->type != KEY_TYPE_dirent) + continue; + + struct bkey_s_c_dirent dirent = bkey_s_c_to_dirent(k); + struct qstr name = bch2_dirent_get_name(dirent); + + new_size += dirent_occupied_size(&name); + } + bch2_trans_iter_exit(trans, &iter); + + if (!ret && inode_u->bi_size != new_size) { + inode_u->bi_size = new_size; + *write_inode = true; + } + + return ret; +} + static int check_inode(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k, @@ -1291,7 +1172,9 @@ static int check_inode(struct btree_trans *trans, if (!bkey_is_inode(k.k)) return 0; - BUG_ON(bch2_inode_unpack(k, &u)); + ret = bch2_inode_unpack(k, &u); + if (ret) + goto err; if (snapshot_root->bi_inum != u.bi_inum) { ret = get_snapshot_root_inode(trans, snapshot_root, u.bi_inum); @@ -1302,7 +1185,7 @@ static int check_inode(struct btree_trans *trans, if (fsck_err_on(u.bi_hash_seed != snapshot_root->bi_hash_seed || INODE_STR_HASH(&u) != INODE_STR_HASH(snapshot_root), trans, inode_snapshot_mismatch, - "inodes in different snapshots don't match")) { + "inode hash info in different snapshots don't match")) { u.bi_hash_seed = snapshot_root->bi_hash_seed; SET_INODE_STR_HASH(&u, INODE_STR_HASH(snapshot_root)); do_update = true; @@ -1392,7 +1275,7 @@ static int check_inode(struct btree_trans *trans, if (fsck_err_on(!ret, trans, inode_unlinked_and_not_open, - "inode %llu%u unlinked and not open", + "inode %llu:%u unlinked and not open", u.bi_inum, u.bi_snapshot)) { ret = bch2_inode_rm_snapshot(trans, u.bi_inum, iter->pos.snapshot); bch_err_msg(c, ret, "in fsck deleting inode"); @@ -1415,7 +1298,7 @@ static int check_inode(struct btree_trans *trans, if (u.bi_subvol) { struct bch_subvolume s; - ret = bch2_subvolume_get(trans, u.bi_subvol, false, 0, &s); + ret = bch2_subvolume_get(trans, u.bi_subvol, false, &s); if (ret && !bch2_err_matches(ret, ENOENT)) goto err; @@ -1441,6 +1324,27 @@ static int check_inode(struct btree_trans *trans, do_update = true; } } + + if (fsck_err_on(u.bi_journal_seq > journal_cur_seq(&c->journal), + trans, inode_journal_seq_in_future, + "inode journal seq in future (currently at %llu)\n%s", + journal_cur_seq(&c->journal), + (printbuf_reset(&buf), + bch2_inode_unpacked_to_text(&buf, &u), + buf.buf))) { + u.bi_journal_seq = journal_cur_seq(&c->journal); + do_update = true; + } + + if (S_ISDIR(u.bi_mode)) { + ret = check_directory_size(trans, &u, k, &do_update); + + fsck_err_on(ret, + trans, directory_size_mismatch, + "directory inode %llu:%u with the mismatch directory size", + u.bi_inum, k.k->p.snapshot); + ret = 0; + } do_update: if (do_update) { ret = __bch2_fsck_write_inode(trans, &u); @@ -1502,7 +1406,9 @@ static int find_oldest_inode_needs_reattach(struct btree_trans *trans, break; struct bch_inode_unpacked parent_inode; - bch2_inode_unpack(k, &parent_inode); + ret = bch2_inode_unpack(k, &parent_inode); + if (ret) + break; if (!inode_should_reattach(&parent_inode)) break; @@ -1525,7 +1431,9 @@ static int check_unreachable_inode(struct btree_trans *trans, return 0; struct bch_inode_unpacked inode; - BUG_ON(bch2_inode_unpack(k, &inode)); + ret = bch2_inode_unpack(k, &inode); + if (ret) + return ret; if (!inode_should_reattach(&inode)) return 0; @@ -1649,7 +1557,7 @@ static int check_i_sectors_notnested(struct btree_trans *trans, struct inode_wal if (i->count != count2) { bch_err_ratelimited(c, "fsck counted i_sectors wrong for inode %llu:%u: got %llu should be %llu", w->last_pos.inode, i->snapshot, i->count, count2); - return -BCH_ERR_internal_fsck_err; + i->count = count2; } if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_sectors_dirty), @@ -1753,7 +1661,7 @@ static int overlapping_extents_found(struct btree_trans *trans, bch2_trans_iter_init(trans, &iter1, btree, pos1, BTREE_ITER_all_snapshots| BTREE_ITER_not_extents); - k1 = bch2_btree_iter_peek_upto(&iter1, POS(pos1.inode, U64_MAX)); + k1 = bch2_btree_iter_peek_max(&iter1, POS(pos1.inode, U64_MAX)); ret = bkey_err(k1); if (ret) goto err; @@ -1778,7 +1686,7 @@ static int overlapping_extents_found(struct btree_trans *trans, while (1) { bch2_btree_iter_advance(&iter2); - k2 = bch2_btree_iter_peek_upto(&iter2, POS(pos1.inode, U64_MAX)); + k2 = bch2_btree_iter_peek_max(&iter2, POS(pos1.inode, U64_MAX)); ret = bkey_err(k2); if (ret) goto err; @@ -2156,7 +2064,7 @@ static int check_dirent_inode_dirent(struct btree_trans *trans, return __bch2_fsck_write_inode(trans, target); } - if (bch2_inode_should_have_bp(target) && + if (bch2_inode_should_have_single_bp(target) && !fsck_err(trans, inode_wrong_backpointer, "dirent points to inode that does not point back:\n %s", (bch2_bkey_val_to_text(&buf, c, d.s_c), @@ -2480,7 +2388,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter, *hash_info = bch2_hash_info_init(c, &i->inode); dir->first_this_inode = false; - ret = hash_check_key(trans, s, bch2_dirent_hash_desc, hash_info, iter, k); + ret = bch2_str_hash_check_key(trans, s, &bch2_dirent_hash_desc, hash_info, iter, k); if (ret < 0) goto err; if (ret) { @@ -2519,6 +2427,30 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter, if (ret) goto err; } + + darray_for_each(target->deletes, i) + if (fsck_err_on(!snapshot_list_has_id(&s->ids, *i), + trans, dirent_to_overwritten_inode, + "dirent points to inode overwritten in snapshot %u:\n%s", + *i, + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, k), + buf.buf))) { + struct btree_iter delete_iter; + bch2_trans_iter_init(trans, &delete_iter, + BTREE_ID_dirents, + SPOS(k.k->p.inode, k.k->p.offset, *i), + BTREE_ITER_intent); + ret = bch2_btree_iter_traverse(&delete_iter) ?: + bch2_hash_delete_at(trans, bch2_dirent_hash_desc, + hash_info, + &delete_iter, + BTREE_UPDATE_internal_snapshot_node); + bch2_trans_iter_exit(trans, &delete_iter); + if (ret) + goto err; + + } } ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); @@ -2594,7 +2526,7 @@ static int check_xattr(struct btree_trans *trans, struct btree_iter *iter, *hash_info = bch2_hash_info_init(c, &i->inode); inode->first_this_inode = false; - ret = hash_check_key(trans, NULL, bch2_xattr_hash_desc, hash_info, iter, k); + ret = bch2_str_hash_check_key(trans, NULL, &bch2_xattr_hash_desc, hash_info, iter, k); bch_err_fn(c, ret); return ret; } @@ -2774,6 +2706,48 @@ struct pathbuf_entry { typedef DARRAY(struct pathbuf_entry) pathbuf; +static int bch2_bi_depth_renumber_one(struct btree_trans *trans, struct pathbuf_entry *p, + u32 new_depth) +{ + struct btree_iter iter; + struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, + SPOS(0, p->inum, p->snapshot), 0); + + struct bch_inode_unpacked inode; + int ret = bkey_err(k) ?: + !bkey_is_inode(k.k) ? -BCH_ERR_ENOENT_inode + : bch2_inode_unpack(k, &inode); + if (ret) + goto err; + + if (inode.bi_depth != new_depth) { + inode.bi_depth = new_depth; + ret = __bch2_fsck_write_inode(trans, &inode) ?: + bch2_trans_commit(trans, NULL, NULL, 0); + } +err: + bch2_trans_iter_exit(trans, &iter); + return ret; +} + +static int bch2_bi_depth_renumber(struct btree_trans *trans, pathbuf *path, u32 new_bi_depth) +{ + u32 restart_count = trans->restart_count; + int ret = 0; + + darray_for_each_reverse(*path, i) { + ret = nested_lockrestart_do(trans, + bch2_bi_depth_renumber_one(trans, i, new_bi_depth)); + bch_err_fn(trans->c, ret); + if (ret) + break; + + new_bi_depth++; + } + + return ret ?: trans_was_restarted(trans, restart_count); +} + static bool path_is_dup(pathbuf *p, u64 inum, u32 snapshot) { darray_for_each(*p, i) @@ -2783,21 +2757,21 @@ static bool path_is_dup(pathbuf *p, u64 inum, u32 snapshot) return false; } -static int check_path(struct btree_trans *trans, pathbuf *p, struct bkey_s_c inode_k) +static int check_path_loop(struct btree_trans *trans, struct bkey_s_c inode_k) { struct bch_fs *c = trans->c; struct btree_iter inode_iter = {}; - struct bch_inode_unpacked inode; + pathbuf path = {}; struct printbuf buf = PRINTBUF; u32 snapshot = inode_k.k->p.snapshot; + bool redo_bi_depth = false; + u32 min_bi_depth = U32_MAX; int ret = 0; - p->nr = 0; - - BUG_ON(bch2_inode_unpack(inode_k, &inode)); - - if (!S_ISDIR(inode.bi_mode)) - return 0; + struct bch_inode_unpacked inode; + ret = bch2_inode_unpack(inode_k, &inode); + if (ret) + return ret; while (!inode.bi_subvol) { struct btree_iter dirent_iter; @@ -2807,7 +2781,7 @@ static int check_path(struct btree_trans *trans, pathbuf *p, struct bkey_s_c ino d = inode_get_dirent(trans, &dirent_iter, &inode, &parent_snapshot); ret = bkey_err(d.s_c); if (ret && !bch2_err_matches(ret, ENOENT)) - break; + goto out; if (!ret && (ret = dirent_points_to_inode(c, d, &inode))) bch2_trans_iter_exit(trans, &dirent_iter); @@ -2822,7 +2796,7 @@ static int check_path(struct btree_trans *trans, pathbuf *p, struct bkey_s_c ino bch2_trans_iter_exit(trans, &dirent_iter); - ret = darray_push(p, ((struct pathbuf_entry) { + ret = darray_push(&path, ((struct pathbuf_entry) { .inum = inode.bi_inum, .snapshot = snapshot, })); @@ -2834,22 +2808,32 @@ static int check_path(struct btree_trans *trans, pathbuf *p, struct bkey_s_c ino bch2_trans_iter_exit(trans, &inode_iter); inode_k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, SPOS(0, inode.bi_dir, snapshot), 0); + + struct bch_inode_unpacked parent_inode; ret = bkey_err(inode_k) ?: !bkey_is_inode(inode_k.k) ? -BCH_ERR_ENOENT_inode - : bch2_inode_unpack(inode_k, &inode); + : bch2_inode_unpack(inode_k, &parent_inode); if (ret) { /* Should have been caught in dirents pass */ bch_err_msg(c, ret, "error looking up parent directory"); - break; + goto out; } + min_bi_depth = parent_inode.bi_depth; + + if (parent_inode.bi_depth < inode.bi_depth && + min_bi_depth < U16_MAX) + break; + + inode = parent_inode; snapshot = inode_k.k->p.snapshot; + redo_bi_depth = true; - if (path_is_dup(p, inode.bi_inum, snapshot)) { + if (path_is_dup(&path, inode.bi_inum, snapshot)) { /* XXX print path */ bch_err(c, "directory structure loop"); - darray_for_each(*p, i) + darray_for_each(path, i) pr_err("%llu:%u", i->inum, i->snapshot); pr_err("%llu:%u", inode.bi_inum, snapshot); @@ -2862,12 +2846,20 @@ static int check_path(struct btree_trans *trans, pathbuf *p, struct bkey_s_c ino ret = reattach_inode(trans, &inode); bch_err_msg(c, ret, "reattaching inode %llu", inode.bi_inum); } - break; + + goto out; } } + + if (inode.bi_subvol) + min_bi_depth = 0; + + if (redo_bi_depth) + ret = bch2_bi_depth_renumber(trans, &path, min_bi_depth); out: fsck_err: bch2_trans_iter_exit(trans, &inode_iter); + darray_exit(&path); printbuf_exit(&buf); bch_err_fn(c, ret); return ret; @@ -2879,24 +2871,20 @@ fsck_err: */ int bch2_check_directory_structure(struct bch_fs *c) { - pathbuf path = { 0, }; - int ret; - - ret = bch2_trans_run(c, + int ret = bch2_trans_run(c, for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, POS_MIN, BTREE_ITER_intent| BTREE_ITER_prefetch| BTREE_ITER_all_snapshots, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ - if (!bkey_is_inode(k.k)) + if (!S_ISDIR(bkey_inode_mode(k))) continue; if (bch2_inode_flags(k) & BCH_INODE_unlinked) continue; - check_path(trans, &path, k); + check_path_loop(trans, k); }))); - darray_exit(&path); bch_err_fn(c, ret); return ret; @@ -2994,7 +2982,9 @@ static int check_nlinks_find_hardlinks(struct bch_fs *c, /* Should never fail, checked by bch2_inode_invalid: */ struct bch_inode_unpacked u; - BUG_ON(bch2_inode_unpack(k, &u)); + _ret3 = bch2_inode_unpack(k, &u); + if (_ret3) + break; /* * Backpointer and directory structure checks are sufficient for @@ -3072,7 +3062,9 @@ static int check_nlinks_update_inode(struct btree_trans *trans, struct btree_ite if (!bkey_is_inode(k.k)) return 0; - BUG_ON(bch2_inode_unpack(k, &u)); + ret = bch2_inode_unpack(k, &u); + if (ret) + return ret; if (S_ISDIR(u.bi_mode)) return 0; @@ -3194,3 +3186,223 @@ int bch2_fix_reflink_p(struct bch_fs *c) bch_err_fn(c, ret); return ret; } + +#ifndef NO_BCACHEFS_CHARDEV + +struct fsck_thread { + struct thread_with_stdio thr; + struct bch_fs *c; + struct bch_opts opts; +}; + +static void bch2_fsck_thread_exit(struct thread_with_stdio *_thr) +{ + struct fsck_thread *thr = container_of(_thr, struct fsck_thread, thr); + kfree(thr); +} + +static int bch2_fsck_offline_thread_fn(struct thread_with_stdio *stdio) +{ + struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr); + struct bch_fs *c = thr->c; + + int ret = PTR_ERR_OR_ZERO(c); + if (ret) + return ret; + + ret = bch2_fs_start(thr->c); + if (ret) + goto err; + + if (test_bit(BCH_FS_errors_fixed, &c->flags)) { + bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: errors fixed\n", c->name); + ret |= 1; + } + if (test_bit(BCH_FS_error, &c->flags)) { + bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: still has errors\n", c->name); + ret |= 4; + } +err: + bch2_fs_stop(c); + return ret; +} + +static const struct thread_with_stdio_ops bch2_offline_fsck_ops = { + .exit = bch2_fsck_thread_exit, + .fn = bch2_fsck_offline_thread_fn, +}; + +long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_arg) +{ + struct bch_ioctl_fsck_offline arg; + struct fsck_thread *thr = NULL; + darray_str(devs) = {}; + long ret = 0; + + if (copy_from_user(&arg, user_arg, sizeof(arg))) + return -EFAULT; + + if (arg.flags) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + for (size_t i = 0; i < arg.nr_devs; i++) { + u64 dev_u64; + ret = copy_from_user_errcode(&dev_u64, &user_arg->devs[i], sizeof(u64)); + if (ret) + goto err; + + char *dev_str = strndup_user((char __user *)(unsigned long) dev_u64, PATH_MAX); + ret = PTR_ERR_OR_ZERO(dev_str); + if (ret) + goto err; + + ret = darray_push(&devs, dev_str); + if (ret) { + kfree(dev_str); + goto err; + } + } + + thr = kzalloc(sizeof(*thr), GFP_KERNEL); + if (!thr) { + ret = -ENOMEM; + goto err; + } + + thr->opts = bch2_opts_empty(); + + if (arg.opts) { + char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16); + ret = PTR_ERR_OR_ZERO(optstr) ?: + bch2_parse_mount_opts(NULL, &thr->opts, NULL, optstr); + if (!IS_ERR(optstr)) + kfree(optstr); + + if (ret) + goto err; + } + + opt_set(thr->opts, stdio, (u64)(unsigned long)&thr->thr.stdio); + opt_set(thr->opts, read_only, 1); + opt_set(thr->opts, ratelimit_errors, 0); + + /* We need request_key() to be called before we punt to kthread: */ + opt_set(thr->opts, nostart, true); + + bch2_thread_with_stdio_init(&thr->thr, &bch2_offline_fsck_ops); + + thr->c = bch2_fs_open(devs.data, arg.nr_devs, thr->opts); + + if (!IS_ERR(thr->c) && + thr->c->opts.errors == BCH_ON_ERROR_panic) + thr->c->opts.errors = BCH_ON_ERROR_ro; + + ret = __bch2_run_thread_with_stdio(&thr->thr); +out: + darray_for_each(devs, i) + kfree(*i); + darray_exit(&devs); + return ret; +err: + if (thr) + bch2_fsck_thread_exit(&thr->thr); + pr_err("ret %s", bch2_err_str(ret)); + goto out; +} + +static int bch2_fsck_online_thread_fn(struct thread_with_stdio *stdio) +{ + struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr); + struct bch_fs *c = thr->c; + + c->stdio_filter = current; + c->stdio = &thr->thr.stdio; + + /* + * XXX: can we figure out a way to do this without mucking with c->opts? + */ + unsigned old_fix_errors = c->opts.fix_errors; + if (opt_defined(thr->opts, fix_errors)) + c->opts.fix_errors = thr->opts.fix_errors; + else + c->opts.fix_errors = FSCK_FIX_ask; + + c->opts.fsck = true; + set_bit(BCH_FS_fsck_running, &c->flags); + + c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info; + int ret = bch2_run_online_recovery_passes(c); + + clear_bit(BCH_FS_fsck_running, &c->flags); + bch_err_fn(c, ret); + + c->stdio = NULL; + c->stdio_filter = NULL; + c->opts.fix_errors = old_fix_errors; + + up(&c->online_fsck_mutex); + bch2_ro_ref_put(c); + return ret; +} + +static const struct thread_with_stdio_ops bch2_online_fsck_ops = { + .exit = bch2_fsck_thread_exit, + .fn = bch2_fsck_online_thread_fn, +}; + +long bch2_ioctl_fsck_online(struct bch_fs *c, struct bch_ioctl_fsck_online arg) +{ + struct fsck_thread *thr = NULL; + long ret = 0; + + if (arg.flags) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (!bch2_ro_ref_tryget(c)) + return -EROFS; + + if (down_trylock(&c->online_fsck_mutex)) { + bch2_ro_ref_put(c); + return -EAGAIN; + } + + thr = kzalloc(sizeof(*thr), GFP_KERNEL); + if (!thr) { + ret = -ENOMEM; + goto err; + } + + thr->c = c; + thr->opts = bch2_opts_empty(); + + if (arg.opts) { + char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16); + + ret = PTR_ERR_OR_ZERO(optstr) ?: + bch2_parse_mount_opts(c, &thr->opts, NULL, optstr); + if (!IS_ERR(optstr)) + kfree(optstr); + + if (ret) + goto err; + } + + ret = bch2_run_thread_with_stdio(&thr->thr, &bch2_online_fsck_ops); +err: + if (ret < 0) { + bch_err_fn(c, ret); + if (thr) + bch2_fsck_thread_exit(&thr->thr); + up(&c->online_fsck_mutex); + bch2_ro_ref_put(c); + } + return ret; +} + +#endif /* NO_BCACHEFS_CHARDEV */ diff --git a/fs/bcachefs/fsck.h b/fs/bcachefs/fsck.h index 1cca31011530..574948278cd4 100644 --- a/fs/bcachefs/fsck.h +++ b/fs/bcachefs/fsck.h @@ -2,6 +2,14 @@ #ifndef _BCACHEFS_FSCK_H #define _BCACHEFS_FSCK_H +#include "str_hash.h" + +int bch2_fsck_update_backpointers(struct btree_trans *, + struct snapshots_seen *, + const struct bch_hash_desc, + struct bch_hash_info *, + struct bkey_i *); + int bch2_check_inodes(struct bch_fs *); int bch2_check_extents(struct bch_fs *); int bch2_check_indirect_extents(struct bch_fs *); @@ -14,4 +22,7 @@ int bch2_check_directory_structure(struct bch_fs *); int bch2_check_nlinks(struct bch_fs *); int bch2_fix_reflink_p(struct bch_fs *); +long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *); +long bch2_ioctl_fsck_online(struct bch_fs *, struct bch_ioctl_fsck_online); + #endif /* _BCACHEFS_FSCK_H */ diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c index 039cb7a22244..04ec05206f8c 100644 --- a/fs/bcachefs/inode.c +++ b/fs/bcachefs/inode.c @@ -14,6 +14,7 @@ #include "extent_update.h" #include "fs.h" #include "inode.h" +#include "opts.h" #include "str_hash.h" #include "snapshot.h" #include "subvolume.h" @@ -47,10 +48,10 @@ static int inode_decode_field(const u8 *in, const u8 *end, u8 *p; if (in >= end) - return -1; + return -BCH_ERR_inode_unpack_error; if (!*in) - return -1; + return -BCH_ERR_inode_unpack_error; /* * position of highest set bit indicates number of bytes: @@ -60,7 +61,7 @@ static int inode_decode_field(const u8 *in, const u8 *end, bytes = byte_table[shift - 1]; if (in + bytes > end) - return -1; + return -BCH_ERR_inode_unpack_error; p = (u8 *) be + 16 - bytes; memcpy(p, in, bytes); @@ -176,7 +177,7 @@ static noinline int bch2_inode_unpack_v1(struct bkey_s_c_inode inode, return ret; \ \ if (field_bits > sizeof(unpacked->_name) * 8) \ - return -1; \ + return -BCH_ERR_inode_unpack_error; \ \ unpacked->_name = field[1]; \ in += ret; @@ -217,7 +218,7 @@ static int bch2_inode_unpack_v2(struct bch_inode_unpacked *unpacked, \ unpacked->_name = v[0]; \ if (v[1] || v[0] != unpacked->_name) \ - return -1; \ + return -BCH_ERR_inode_unpack_error; \ fieldnr++; BCH_INODE_FIELDS_v2() @@ -268,7 +269,7 @@ static int bch2_inode_unpack_v3(struct bkey_s_c k, \ unpacked->_name = v[0]; \ if (v[1] || v[0] != unpacked->_name) \ - return -1; \ + return -BCH_ERR_inode_unpack_error; \ fieldnr++; BCH_INODE_FIELDS_v3() @@ -428,7 +429,7 @@ struct bkey_i *bch2_inode_to_v3(struct btree_trans *trans, struct bkey_i *k) } static int __bch2_inode_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bch_inode_unpacked unpacked; int ret = 0; @@ -468,7 +469,7 @@ fsck_err: } int bch2_inode_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_inode inode = bkey_s_c_to_inode(k); int ret = 0; @@ -478,13 +479,13 @@ int bch2_inode_validate(struct bch_fs *c, struct bkey_s_c k, "invalid str hash type (%llu >= %u)", INODEv1_STR_HASH(inode.v), BCH_STR_HASH_NR); - ret = __bch2_inode_validate(c, k, flags); + ret = __bch2_inode_validate(c, k, from); fsck_err: return ret; } int bch2_inode_v2_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_inode_v2 inode = bkey_s_c_to_inode_v2(k); int ret = 0; @@ -494,13 +495,13 @@ int bch2_inode_v2_validate(struct bch_fs *c, struct bkey_s_c k, "invalid str hash type (%llu >= %u)", INODEv2_STR_HASH(inode.v), BCH_STR_HASH_NR); - ret = __bch2_inode_validate(c, k, flags); + ret = __bch2_inode_validate(c, k, from); fsck_err: return ret; } int bch2_inode_v3_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_inode_v3 inode = bkey_s_c_to_inode_v3(k); int ret = 0; @@ -518,7 +519,7 @@ int bch2_inode_v3_validate(struct bch_fs *c, struct bkey_s_c k, "invalid str hash type (%llu >= %u)", INODEv3_STR_HASH(inode.v), BCH_STR_HASH_NR); - ret = __bch2_inode_validate(c, k, flags); + ret = __bch2_inode_validate(c, k, from); fsck_err: return ret; } @@ -617,7 +618,7 @@ bch2_bkey_get_iter_snapshot_parent(struct btree_trans *trans, struct btree_iter struct bkey_s_c k; int ret = 0; - for_each_btree_key_upto_norestart(trans, *iter, btree, + for_each_btree_key_max_norestart(trans, *iter, btree, bpos_successor(pos), SPOS(pos.inode, pos.offset, U32_MAX), flags|BTREE_ITER_all_snapshots, k, ret) @@ -652,7 +653,7 @@ int __bch2_inode_has_child_snapshots(struct btree_trans *trans, struct bpos pos) struct bkey_s_c k; int ret = 0; - for_each_btree_key_upto_norestart(trans, iter, + for_each_btree_key_max_norestart(trans, iter, BTREE_ID_inodes, POS(0, pos.offset), bpos_predecessor(pos), BTREE_ITER_all_snapshots| BTREE_ITER_with_updates, k, ret) @@ -779,7 +780,7 @@ int bch2_trigger_inode(struct btree_trans *trans, } int bch2_inode_generation_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { int ret = 0; @@ -798,6 +799,28 @@ void bch2_inode_generation_to_text(struct printbuf *out, struct bch_fs *c, prt_printf(out, "generation: %u", le32_to_cpu(gen.v->bi_generation)); } +int bch2_inode_alloc_cursor_validate(struct bch_fs *c, struct bkey_s_c k, + struct bkey_validate_context from) +{ + int ret = 0; + + bkey_fsck_err_on(k.k->p.inode != LOGGED_OPS_INUM_inode_cursors, + c, inode_alloc_cursor_inode_bad, + "k.p.inode bad"); +fsck_err: + return ret; +} + +void bch2_inode_alloc_cursor_to_text(struct printbuf *out, struct bch_fs *c, + struct bkey_s_c k) +{ + struct bkey_s_c_inode_alloc_cursor i = bkey_s_c_to_inode_alloc_cursor(k); + + prt_printf(out, "idx %llu generation %llu", + le64_to_cpu(i.v->idx), + le64_to_cpu(i.v->gen)); +} + void bch2_inode_init_early(struct bch_fs *c, struct bch_inode_unpacked *inode_u) { @@ -858,43 +881,78 @@ static inline u32 bkey_generation(struct bkey_s_c k) } } -/* - * This just finds an empty slot: - */ -int bch2_inode_create(struct btree_trans *trans, - struct btree_iter *iter, - struct bch_inode_unpacked *inode_u, - u32 snapshot, u64 cpu) +static struct bkey_i_inode_alloc_cursor * +bch2_inode_alloc_cursor_get(struct btree_trans *trans, u64 cpu, u64 *min, u64 *max) { struct bch_fs *c = trans->c; - struct bkey_s_c k; - u64 min, max, start, pos, *hint; - int ret = 0; - unsigned bits = (c->opts.inodes_32bit ? 31 : 63); - if (c->opts.shard_inode_numbers) { - bits -= c->inode_shard_bits; + u64 cursor_idx = c->opts.inodes_32bit ? 0 : cpu + 1; - min = (cpu << bits); - max = (cpu << bits) | ~(ULLONG_MAX << bits); + cursor_idx &= ~(~0ULL << c->opts.shard_inode_numbers_bits); - min = max_t(u64, min, BLOCKDEV_INODE_MAX); - hint = c->unused_inode_hints + cpu; + struct btree_iter iter; + struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, + BTREE_ID_logged_ops, + POS(LOGGED_OPS_INUM_inode_cursors, cursor_idx), + BTREE_ITER_cached); + int ret = bkey_err(k); + if (ret) + return ERR_PTR(ret); + + struct bkey_i_inode_alloc_cursor *cursor = + k.k->type == KEY_TYPE_inode_alloc_cursor + ? bch2_bkey_make_mut_typed(trans, &iter, &k, 0, inode_alloc_cursor) + : bch2_bkey_alloc(trans, &iter, 0, inode_alloc_cursor); + ret = PTR_ERR_OR_ZERO(cursor); + if (ret) + goto err; + + if (c->opts.inodes_32bit) { + *min = BLOCKDEV_INODE_MAX; + *max = INT_MAX; } else { - min = BLOCKDEV_INODE_MAX; - max = ~(ULLONG_MAX << bits); - hint = c->unused_inode_hints; + cursor->v.bits = c->opts.shard_inode_numbers_bits; + + unsigned bits = 63 - c->opts.shard_inode_numbers_bits; + + *min = max(cpu << bits, (u64) INT_MAX + 1); + *max = (cpu << bits) | ~(ULLONG_MAX << bits); } - start = READ_ONCE(*hint); + if (le64_to_cpu(cursor->v.idx) < *min) + cursor->v.idx = cpu_to_le64(*min); - if (start >= max || start < min) - start = min; + if (le64_to_cpu(cursor->v.idx) >= *max) { + cursor->v.idx = cpu_to_le64(*min); + le32_add_cpu(&cursor->v.gen, 1); + } +err: + bch2_trans_iter_exit(trans, &iter); + return ret ? ERR_PTR(ret) : cursor; +} + +/* + * This just finds an empty slot: + */ +int bch2_inode_create(struct btree_trans *trans, + struct btree_iter *iter, + struct bch_inode_unpacked *inode_u, + u32 snapshot, u64 cpu) +{ + u64 min, max; + struct bkey_i_inode_alloc_cursor *cursor = + bch2_inode_alloc_cursor_get(trans, cpu, &min, &max); + int ret = PTR_ERR_OR_ZERO(cursor); + if (ret) + return ret; + + u64 start = le64_to_cpu(cursor->v.idx); + u64 pos = start; - pos = start; bch2_trans_iter_init(trans, iter, BTREE_ID_inodes, POS(0, pos), BTREE_ITER_all_snapshots| BTREE_ITER_intent); + struct bkey_s_c k; again: while ((k = bch2_btree_iter_peek(iter)).k && !(ret = bkey_err(k)) && @@ -924,6 +982,7 @@ again: /* Retry from start */ pos = start = min; bch2_btree_iter_set_pos(iter, POS(0, pos)); + le32_add_cpu(&cursor->v.gen, 1); goto again; found_slot: bch2_btree_iter_set_pos(iter, SPOS(0, pos, snapshot)); @@ -934,9 +993,9 @@ found_slot: return ret; } - *hint = k.k->p.offset; inode_u->bi_inum = k.k->p.offset; - inode_u->bi_generation = bkey_generation(k); + inode_u->bi_generation = le64_to_cpu(cursor->v.gen); + cursor->v.idx = cpu_to_le64(k.k->p.offset + 1); return 0; } @@ -966,7 +1025,7 @@ static int bch2_inode_delete_keys(struct btree_trans *trans, bch2_btree_iter_set_snapshot(&iter, snapshot); - k = bch2_btree_iter_peek_upto(&iter, end); + k = bch2_btree_iter_peek_max(&iter, end); ret = bkey_err(k); if (ret) goto err; @@ -998,8 +1057,6 @@ int bch2_inode_rm(struct bch_fs *c, subvol_inum inum) { struct btree_trans *trans = bch2_trans_get(c); struct btree_iter iter = { NULL }; - struct bkey_i_inode_generation delete; - struct bch_inode_unpacked inode_u; struct bkey_s_c k; u32 snapshot; int ret; @@ -1039,13 +1096,7 @@ retry: goto err; } - bch2_inode_unpack(k, &inode_u); - - bkey_inode_generation_init(&delete.k_i); - delete.k.p = iter.pos; - delete.v.bi_generation = cpu_to_le32(inode_u.bi_generation + 1); - - ret = bch2_trans_update(trans, &iter, &delete.k_i, 0) ?: + ret = bch2_btree_delete_at(trans, &iter, 0) ?: bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); err: @@ -1141,12 +1192,17 @@ struct bch_opts bch2_inode_opts_to_opts(struct bch_inode_unpacked *inode) void bch2_inode_opts_get(struct bch_io_opts *opts, struct bch_fs *c, struct bch_inode_unpacked *inode) { -#define x(_name, _bits) opts->_name = inode_opt_get(c, inode, _name); +#define x(_name, _bits) \ + if ((inode)->bi_##_name) { \ + opts->_name = inode->bi_##_name - 1; \ + opts->_name##_from_inode = true; \ + } else { \ + opts->_name = c->opts._name; \ + } BCH_INODE_OPTS() #undef x - if (opts->nocow) - opts->compression = opts->background_compression = opts->data_checksum = opts->erasure_code = 0; + bch2_io_opts_fixups(opts); } int bch2_inum_opts_get(struct btree_trans *trans, subvol_inum inum, struct bch_io_opts *opts) @@ -1380,7 +1436,8 @@ again: NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ ret = may_delete_deleted_inode(trans, &iter, k.k->p, &need_another_pass); if (ret > 0) { - bch_verbose(c, "deleting unlinked inode %llu:%u", k.k->p.offset, k.k->p.snapshot); + bch_verbose_ratelimited(c, "deleting unlinked inode %llu:%u", + k.k->p.offset, k.k->p.snapshot); ret = bch2_inode_rm_snapshot(trans, k.k->p.offset, k.k->p.snapshot); /* diff --git a/fs/bcachefs/inode.h b/fs/bcachefs/inode.h index eab82b5eb897..428b9be6af34 100644 --- a/fs/bcachefs/inode.h +++ b/fs/bcachefs/inode.h @@ -7,15 +7,14 @@ #include "opts.h" #include "snapshot.h" -enum bch_validate_flags; extern const char * const bch2_inode_opts[]; int bch2_inode_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); int bch2_inode_v2_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); int bch2_inode_v3_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); void bch2_inode_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); int __bch2_inode_has_child_snapshots(struct btree_trans *, struct bpos); @@ -60,7 +59,7 @@ static inline bool bkey_is_inode(const struct bkey *k) } int bch2_inode_generation_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); void bch2_inode_generation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); #define bch2_bkey_ops_inode_generation ((struct bkey_ops) { \ @@ -69,6 +68,16 @@ void bch2_inode_generation_to_text(struct printbuf *, struct bch_fs *, struct bk .min_val_size = 8, \ }) +int bch2_inode_alloc_cursor_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); +void bch2_inode_alloc_cursor_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); + +#define bch2_bkey_ops_inode_alloc_cursor ((struct bkey_ops) { \ + .key_validate = bch2_inode_alloc_cursor_validate, \ + .val_to_text = bch2_inode_alloc_cursor_to_text, \ + .min_val_size = 16, \ +}) + #if 0 typedef struct { u64 lo; @@ -220,6 +229,20 @@ static inline u32 bch2_inode_flags(struct bkey_s_c k) } } +static inline unsigned bkey_inode_mode(struct bkey_s_c k) +{ + switch (k.k->type) { + case KEY_TYPE_inode: + return le16_to_cpu(bkey_s_c_to_inode(k).v->bi_mode); + case KEY_TYPE_inode_v2: + return le16_to_cpu(bkey_s_c_to_inode_v2(k).v->bi_mode); + case KEY_TYPE_inode_v3: + return INODEv3_MODE(bkey_s_c_to_inode_v3(k).v); + default: + return 0; + } +} + /* i_nlink: */ static inline unsigned nlink_bias(umode_t mode) @@ -249,7 +272,7 @@ static inline void bch2_inode_nlink_set(struct bch_inode_unpacked *bi, int bch2_inode_nlink_inc(struct bch_inode_unpacked *); void bch2_inode_nlink_dec(struct btree_trans *, struct bch_inode_unpacked *); -static inline bool bch2_inode_should_have_bp(struct bch_inode_unpacked *inode) +static inline bool bch2_inode_should_have_single_bp(struct bch_inode_unpacked *inode) { bool inode_has_bp = inode->bi_dir || inode->bi_dir_offset; @@ -262,6 +285,16 @@ void bch2_inode_opts_get(struct bch_io_opts *, struct bch_fs *, struct bch_inode_unpacked *); int bch2_inum_opts_get(struct btree_trans*, subvol_inum, struct bch_io_opts *); +#include "rebalance.h" + +static inline struct bch_extent_rebalance +bch2_inode_rebalance_opts_get(struct bch_fs *c, struct bch_inode_unpacked *inode) +{ + struct bch_io_opts io_opts; + bch2_inode_opts_get(&io_opts, c, inode); + return io_opts_to_rebalance_opts(c, &io_opts); +} + int bch2_inode_rm_snapshot(struct btree_trans *, u64, u32); int bch2_delete_dead_inodes(struct bch_fs *); diff --git a/fs/bcachefs/inode_format.h b/fs/bcachefs/inode_format.h index 7928d0c6954f..b99a5bf1a75e 100644 --- a/fs/bcachefs/inode_format.h +++ b/fs/bcachefs/inode_format.h @@ -101,7 +101,9 @@ struct bch_inode_generation { x(bi_dir_offset, 64) \ x(bi_subvol, 32) \ x(bi_parent_subvol, 32) \ - x(bi_nocow, 8) + x(bi_nocow, 8) \ + x(bi_depth, 32) \ + x(bi_inodes_32bit, 8) /* subset of BCH_INODE_FIELDS */ #define BCH_INODE_OPTS() \ @@ -114,7 +116,8 @@ struct bch_inode_generation { x(foreground_target, 16) \ x(background_target, 16) \ x(erasure_code, 16) \ - x(nocow, 8) + x(nocow, 8) \ + x(inodes_32bit, 8) enum inode_opt_id { #define x(name, ...) \ @@ -164,4 +167,12 @@ LE64_BITMASK(INODEv3_FIELDS_START, struct bch_inode_v3, bi_flags, 31, 36); LE64_BITMASK(INODEv3_MODE, struct bch_inode_v3, bi_flags, 36, 52); +struct bch_inode_alloc_cursor { + struct bch_val v; + __u8 bits; + __u8 pad; + __le32 gen; + __le64 idx; +}; + #endif /* _BCACHEFS_INODE_FORMAT_H */ diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c index f283051758d6..5353979117b0 100644 --- a/fs/bcachefs/io_misc.c +++ b/fs/bcachefs/io_misc.c @@ -113,11 +113,13 @@ int bch2_extent_fallocate(struct btree_trans *trans, err: if (!ret && sectors_allocated) bch2_increment_clock(c, sectors_allocated, WRITE); - if (should_print_err(ret)) - bch_err_inum_offset_ratelimited(c, - inum.inum, - iter->pos.offset << 9, - "%s(): error: %s", __func__, bch2_err_str(ret)); + if (should_print_err(ret)) { + struct printbuf buf = PRINTBUF; + bch2_inum_offset_err_msg_trans(trans, &buf, inum, iter->pos.offset << 9); + prt_printf(&buf, "fallocate error: %s", bch2_err_str(ret)); + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); + } err_noprint: bch2_open_buckets_put(c, &open_buckets); bch2_disk_reservation_put(c, &disk_res); @@ -164,9 +166,9 @@ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter, bch2_btree_iter_set_snapshot(iter, snapshot); /* - * peek_upto() doesn't have ideal semantics for extents: + * peek_max() doesn't have ideal semantics for extents: */ - k = bch2_btree_iter_peek_upto(iter, end_pos); + k = bch2_btree_iter_peek_max(iter, end_pos); if (!k.k) break; @@ -426,8 +428,8 @@ case LOGGED_OP_FINSERT_shift_extents: bch2_btree_iter_set_pos(&iter, SPOS(inum.inum, pos, snapshot)); k = insert - ? bch2_btree_iter_peek_prev(&iter) - : bch2_btree_iter_peek_upto(&iter, POS(inum.inum, U64_MAX)); + ? bch2_btree_iter_peek_prev_min(&iter, POS(inum.inum, 0)) + : bch2_btree_iter_peek_max(&iter, POS(inum.inum, U64_MAX)); if ((ret = bkey_err(k))) goto btree_err; @@ -461,7 +463,7 @@ case LOGGED_OP_FINSERT_shift_extents: op->v.pos = cpu_to_le64(insert ? bkey_start_offset(&delete.k) : delete.k.p.offset); - ret = bch2_bkey_set_needs_rebalance(c, copy, &opts) ?: + ret = bch2_bkey_set_needs_rebalance(c, &opts, copy) ?: bch2_btree_insert_trans(trans, BTREE_ID_extents, &delete, 0) ?: bch2_btree_insert_trans(trans, BTREE_ID_extents, copy, 0) ?: bch2_logged_op_update(trans, &op->k_i) ?: diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c index b3b934a87c6d..8c7b2d3d779d 100644 --- a/fs/bcachefs/io_read.c +++ b/fs/bcachefs/io_read.c @@ -21,6 +21,7 @@ #include "io_read.h" #include "io_misc.h" #include "io_write.h" +#include "reflink.h" #include "subvolume.h" #include "trace.h" @@ -90,13 +91,18 @@ static const struct rhashtable_params bch_promote_params = { .automatic_shrinking = true, }; +static inline bool have_io_error(struct bch_io_failures *failed) +{ + return failed && failed->nr; +} + static inline int should_promote(struct bch_fs *c, struct bkey_s_c k, struct bpos pos, struct bch_io_opts opts, unsigned flags, struct bch_io_failures *failed) { - if (!failed) { + if (!have_io_error(failed)) { BUG_ON(!opts.promote_target); if (!(flags & BCH_READ_MAY_PROMOTE)) @@ -223,7 +229,7 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans, struct data_update_opts update_opts = {}; - if (!failed) { + if (!have_io_error(failed)) { update_opts.target = opts.promote_target; update_opts.extra_replicas = 1; update_opts.write_flags = BCH_WRITE_ALLOC_NOWAIT|BCH_WRITE_CACHED; @@ -231,11 +237,11 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans, update_opts.target = opts.foreground_target; struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); - unsigned i = 0; + unsigned ptr_bit = 1; bkey_for_each_ptr(ptrs, ptr) { if (bch2_dev_io_failures(failed, ptr->dev)) - update_opts.rewrite_ptrs |= BIT(i); - i++; + update_opts.rewrite_ptrs |= ptr_bit; + ptr_bit <<= 1; } } @@ -285,7 +291,7 @@ static struct promote_op *promote_alloc(struct btree_trans *trans, * if failed != NULL we're not actually doing a promote, we're * recovering from an io/checksum error */ - bool promote_full = (failed || + bool promote_full = (have_io_error(failed) || *read_full || READ_ONCE(c->opts.promote_whole_extents)); /* data might have to be decompressed in the write path: */ @@ -321,6 +327,20 @@ nopromote: /* Read */ +static int bch2_read_err_msg_trans(struct btree_trans *trans, struct printbuf *out, + struct bch_read_bio *rbio, struct bpos read_pos) +{ + return bch2_inum_offset_err_msg_trans(trans, out, + (subvol_inum) { rbio->subvol, read_pos.inode }, + read_pos.offset << 9); +} + +static void bch2_read_err_msg(struct bch_fs *c, struct printbuf *out, + struct bch_read_bio *rbio, struct bpos read_pos) +{ + bch2_trans_run(c, bch2_read_err_msg_trans(trans, out, rbio, read_pos)); +} + #define READ_RETRY_AVOID 1 #define READ_RETRY 2 #define READ_ERR 3 @@ -499,6 +519,29 @@ static void bch2_rbio_error(struct bch_read_bio *rbio, int retry, } } +static void bch2_read_io_err(struct work_struct *work) +{ + struct bch_read_bio *rbio = + container_of(work, struct bch_read_bio, work); + struct bio *bio = &rbio->bio; + struct bch_fs *c = rbio->c; + struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL; + struct printbuf buf = PRINTBUF; + + bch2_read_err_msg(c, &buf, rbio, rbio->read_pos); + prt_printf(&buf, "data read error: %s", bch2_blk_status_to_str(bio->bi_status)); + + if (ca) { + bch2_io_error(ca, BCH_MEMBER_ERROR_read); + bch_err_ratelimited(ca, "%s", buf.buf); + } else { + bch_err_ratelimited(c, "%s", buf.buf); + } + + printbuf_exit(&buf); + bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status); +} + static int __bch2_rbio_narrow_crcs(struct btree_trans *trans, struct bch_read_bio *rbio) { @@ -562,6 +605,73 @@ static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio) __bch2_rbio_narrow_crcs(trans, rbio)); } +static void bch2_read_csum_err(struct work_struct *work) +{ + struct bch_read_bio *rbio = + container_of(work, struct bch_read_bio, work); + struct bch_fs *c = rbio->c; + struct bio *src = &rbio->bio; + struct bch_extent_crc_unpacked crc = rbio->pick.crc; + struct nonce nonce = extent_nonce(rbio->version, crc); + struct bch_csum csum = bch2_checksum_bio(c, crc.csum_type, nonce, src); + struct printbuf buf = PRINTBUF; + + bch2_read_err_msg(c, &buf, rbio, rbio->read_pos); + prt_str(&buf, "data "); + bch2_csum_err_msg(&buf, crc.csum_type, rbio->pick.crc.csum, csum); + + struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL; + if (ca) { + bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); + bch_err_ratelimited(ca, "%s", buf.buf); + } else { + bch_err_ratelimited(c, "%s", buf.buf); + } + + bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR); + printbuf_exit(&buf); +} + +static void bch2_read_decompress_err(struct work_struct *work) +{ + struct bch_read_bio *rbio = + container_of(work, struct bch_read_bio, work); + struct bch_fs *c = rbio->c; + struct printbuf buf = PRINTBUF; + + bch2_read_err_msg(c, &buf, rbio, rbio->read_pos); + prt_str(&buf, "decompression error"); + + struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL; + if (ca) + bch_err_ratelimited(ca, "%s", buf.buf); + else + bch_err_ratelimited(c, "%s", buf.buf); + + bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR); + printbuf_exit(&buf); +} + +static void bch2_read_decrypt_err(struct work_struct *work) +{ + struct bch_read_bio *rbio = + container_of(work, struct bch_read_bio, work); + struct bch_fs *c = rbio->c; + struct printbuf buf = PRINTBUF; + + bch2_read_err_msg(c, &buf, rbio, rbio->read_pos); + prt_str(&buf, "decrypt error"); + + struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL; + if (ca) + bch_err_ratelimited(ca, "%s", buf.buf); + else + bch_err_ratelimited(c, "%s", buf.buf); + + bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR); + printbuf_exit(&buf); +} + /* Inner part that may run in process context */ static void __bch2_read_endio(struct work_struct *work) { @@ -668,33 +778,13 @@ csum_err: goto out; } - struct printbuf buf = PRINTBUF; - buf.atomic++; - prt_str(&buf, "data "); - bch2_csum_err_msg(&buf, crc.csum_type, rbio->pick.crc.csum, csum); - - struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL; - if (ca) { - bch_err_inum_offset_ratelimited(ca, - rbio->read_pos.inode, - rbio->read_pos.offset << 9, - "data %s", buf.buf); - bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); - } - printbuf_exit(&buf); - bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR); + bch2_rbio_punt(rbio, bch2_read_csum_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq); goto out; decompression_err: - bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode, - rbio->read_pos.offset << 9, - "decompression error"); - bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR); + bch2_rbio_punt(rbio, bch2_read_decompress_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq); goto out; decrypt_err: - bch_err_inum_offset_ratelimited(c, rbio->read_pos.inode, - rbio->read_pos.offset << 9, - "decrypt error"); - bch2_rbio_error(rbio, READ_ERR, BLK_STS_IOERR); + bch2_rbio_punt(rbio, bch2_read_decrypt_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq); goto out; } @@ -715,16 +805,8 @@ static void bch2_read_endio(struct bio *bio) if (!rbio->split) rbio->bio.bi_end_io = rbio->end_io; - if (bio->bi_status) { - if (ca) { - bch_err_inum_offset_ratelimited(ca, - rbio->read_pos.inode, - rbio->read_pos.offset, - "data read error: %s", - bch2_blk_status_to_str(bio->bi_status)); - bch2_io_error(ca, BCH_MEMBER_ERROR_read); - } - bch2_rbio_error(rbio, READ_RETRY_AVOID, bio->bi_status); + if (unlikely(bio->bi_status)) { + bch2_rbio_punt(rbio, bch2_read_io_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq); return; } @@ -750,45 +832,6 @@ static void bch2_read_endio(struct bio *bio) bch2_rbio_punt(rbio, __bch2_read_endio, context, wq); } -int __bch2_read_indirect_extent(struct btree_trans *trans, - unsigned *offset_into_extent, - struct bkey_buf *orig_k) -{ - struct btree_iter iter; - struct bkey_s_c k; - u64 reflink_offset; - int ret; - - reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k->k)->v.idx) + - *offset_into_extent; - - k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_reflink, - POS(0, reflink_offset), 0); - ret = bkey_err(k); - if (ret) - goto err; - - if (k.k->type != KEY_TYPE_reflink_v && - k.k->type != KEY_TYPE_indirect_inline_data) { - bch_err_inum_offset_ratelimited(trans->c, - orig_k->k->k.p.inode, - orig_k->k->k.p.offset << 9, - "%llu len %u points to nonexistent indirect extent %llu", - orig_k->k->k.p.offset, - orig_k->k->k.size, - reflink_offset); - bch2_inconsistent_error(trans->c); - ret = -BCH_ERR_missing_indirect_extent; - goto err; - } - - *offset_into_extent = iter.pos.offset - bkey_start_offset(k.k); - bch2_bkey_buf_reassemble(orig_k, trans->c, k); -err: - bch2_trans_iter_exit(trans, &iter); - return ret; -} - static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans, struct bch_dev *ca, struct bkey_s_c k, @@ -868,15 +911,24 @@ retry_pick: if (!pick_ret) goto hole; - if (pick_ret < 0) { + if (unlikely(pick_ret < 0)) { + struct printbuf buf = PRINTBUF; + bch2_read_err_msg_trans(trans, &buf, orig, read_pos); + prt_printf(&buf, "no device to read from: %s\n ", bch2_err_str(pick_ret)); + bch2_bkey_val_to_text(&buf, c, k); + + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); + goto err; + } + + if (unlikely(bch2_csum_type_is_encryption(pick.crc.csum_type)) && !c->chacha20) { struct printbuf buf = PRINTBUF; + bch2_read_err_msg_trans(trans, &buf, orig, read_pos); + prt_printf(&buf, "attempting to read encrypted data without encryption key\n "); bch2_bkey_val_to_text(&buf, c, k); - bch_err_inum_offset_ratelimited(c, - read_pos.inode, read_pos.offset << 9, - "no device to read from: %s\n %s", - bch2_err_str(pick_ret), - buf.buf); + bch_err_ratelimited(c, "%s", buf.buf); printbuf_exit(&buf); goto err; } @@ -942,7 +994,7 @@ retry_pick: bounce = true; } - if (orig->opts.promote_target)// || failed) + if (orig->opts.promote_target || have_io_error(failed)) promote = promote_alloc(trans, iter, k, &pick, orig->opts, flags, &rbio, &bounce, &read_full, failed); @@ -1062,11 +1114,15 @@ get_bio: } if (!rbio->pick.idx) { - if (!rbio->have_ioref) { - bch_err_inum_offset_ratelimited(c, - read_pos.inode, - read_pos.offset << 9, - "no device to read from"); + if (unlikely(!rbio->have_ioref)) { + struct printbuf buf = PRINTBUF; + bch2_read_err_msg_trans(trans, &buf, rbio, read_pos); + prt_printf(&buf, "no device to read from:\n "); + bch2_bkey_val_to_text(&buf, c, k); + + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); + bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR); goto out; } @@ -1164,7 +1220,6 @@ void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, BTREE_ITER_slots); while (1) { - unsigned bytes, sectors, offset_into_extent; enum btree_id data_btree = BTREE_ID_extents; bch2_trans_begin(trans); @@ -1184,9 +1239,9 @@ void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, if (ret) goto err; - offset_into_extent = iter.pos.offset - + s64 offset_into_extent = iter.pos.offset - bkey_start_offset(k.k); - sectors = k.k->size - offset_into_extent; + unsigned sectors = k.k->size - offset_into_extent; bch2_bkey_buf_reassemble(&sk, c, k); @@ -1201,9 +1256,9 @@ void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, * With indirect extents, the amount of data to read is the min * of the original extent and the indirect extent: */ - sectors = min(sectors, k.k->size - offset_into_extent); + sectors = min_t(unsigned, sectors, k.k->size - offset_into_extent); - bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9; + unsigned bytes = min(sectors, bvec_iter_sectors(bvec_iter)) << 9; swap(bvec_iter.bi_size, bytes); if (bvec_iter.bi_size == bytes) @@ -1229,16 +1284,20 @@ err: } bch2_trans_iter_exit(trans, &iter); - bch2_trans_put(trans); - bch2_bkey_buf_exit(&sk, c); if (ret) { - bch_err_inum_offset_ratelimited(c, inum.inum, - bvec_iter.bi_sector << 9, - "read error %i from btree lookup", ret); + struct printbuf buf = PRINTBUF; + bch2_inum_offset_err_msg_trans(trans, &buf, inum, bvec_iter.bi_sector << 9); + prt_printf(&buf, "read error %i from btree lookup", ret); + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); + rbio->bio.bi_status = BLK_STS_IOERR; bch2_rbio_done(rbio); } + + bch2_trans_put(trans); + bch2_bkey_buf_exit(&sk, c); } void bch2_fs_io_read_exit(struct bch_fs *c) diff --git a/fs/bcachefs/io_read.h b/fs/bcachefs/io_read.h index d9c18bb7d403..a82e8a94ccb6 100644 --- a/fs/bcachefs/io_read.h +++ b/fs/bcachefs/io_read.h @@ -3,6 +3,7 @@ #define _BCACHEFS_IO_READ_H #include "bkey_buf.h" +#include "reflink.h" struct bch_read_bio { struct bch_fs *c; @@ -79,19 +80,32 @@ struct bch_devs_mask; struct cache_promote_op; struct extent_ptr_decoded; -int __bch2_read_indirect_extent(struct btree_trans *, unsigned *, - struct bkey_buf *); - static inline int bch2_read_indirect_extent(struct btree_trans *trans, enum btree_id *data_btree, - unsigned *offset_into_extent, - struct bkey_buf *k) + s64 *offset_into_extent, + struct bkey_buf *extent) { - if (k->k->k.type != KEY_TYPE_reflink_p) + if (extent->k->k.type != KEY_TYPE_reflink_p) return 0; *data_btree = BTREE_ID_reflink; - return __bch2_read_indirect_extent(trans, offset_into_extent, k); + struct btree_iter iter; + struct bkey_s_c k = bch2_lookup_indirect_extent(trans, &iter, + offset_into_extent, + bkey_i_to_s_c_reflink_p(extent->k), + true, 0); + int ret = bkey_err(k); + if (ret) + return ret; + + if (bkey_deleted(k.k)) { + bch2_trans_iter_exit(trans, &iter); + return -BCH_ERR_missing_indirect_extent; + } + + bch2_bkey_buf_reassemble(extent, trans->c, k); + bch2_trans_iter_exit(trans, &iter); + return 0; } enum bch_read_flags { diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c index 96720adcfee0..03892388832b 100644 --- a/fs/bcachefs/io_write.c +++ b/fs/bcachefs/io_write.c @@ -164,7 +164,7 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans, bch2_trans_copy_iter(&iter, extent_iter); - for_each_btree_key_upto_continue_norestart(iter, + for_each_btree_key_max_continue_norestart(iter, new->k.p, BTREE_ITER_slots, old, ret) { s64 sectors = min(new->k.p.offset, old.k->p.offset) - max(bkey_start_offset(&new->k), @@ -216,6 +216,7 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans, SPOS(0, extent_iter->pos.inode, extent_iter->snapshot), + BTREE_ITER_intent| BTREE_ITER_cached); int ret = bkey_err(k); if (unlikely(ret)) @@ -369,7 +370,7 @@ static int bch2_write_index_default(struct bch_write_op *op) bkey_start_pos(&sk.k->k), BTREE_ITER_slots|BTREE_ITER_intent); - ret = bch2_bkey_set_needs_rebalance(c, sk.k, &op->opts) ?: + ret = bch2_bkey_set_needs_rebalance(c, &op->opts, sk.k) ?: bch2_extent_update(trans, inum, &iter, sk.k, &op->res, op->new_i_size, &op->i_sectors_delta, @@ -395,6 +396,31 @@ static int bch2_write_index_default(struct bch_write_op *op) /* Writes */ +static void __bch2_write_op_error(struct printbuf *out, struct bch_write_op *op, + u64 offset) +{ + bch2_inum_offset_err_msg(op->c, out, + (subvol_inum) { op->subvol, op->pos.inode, }, + offset << 9); + prt_printf(out, "write error%s: ", + op->flags & BCH_WRITE_MOVE ? "(internal move)" : ""); +} + +void bch2_write_op_error(struct printbuf *out, struct bch_write_op *op) +{ + __bch2_write_op_error(out, op, op->pos.offset); +} + +static void bch2_write_op_error_trans(struct btree_trans *trans, struct printbuf *out, + struct bch_write_op *op, u64 offset) +{ + bch2_inum_offset_err_msg_trans(trans, out, + (subvol_inum) { op->subvol, op->pos.inode, }, + offset << 9); + prt_printf(out, "write error%s: ", + op->flags & BCH_WRITE_MOVE ? "(internal move)" : ""); +} + void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c, enum bch_data_type type, const struct bkey_i *k, @@ -531,14 +557,14 @@ static void __bch2_write_index(struct bch_write_op *op) op->written += sectors_start - keylist_sectors(keys); - if (ret && !bch2_err_matches(ret, EROFS)) { + if (unlikely(ret && !bch2_err_matches(ret, EROFS))) { struct bkey_i *insert = bch2_keylist_front(&op->insert_keys); - bch_err_inum_offset_ratelimited(c, - insert->k.p.inode, insert->k.p.offset << 9, - "%s write error while doing btree update: %s", - op->flags & BCH_WRITE_MOVE ? "move" : "user", - bch2_err_str(ret)); + struct printbuf buf = PRINTBUF; + __bch2_write_op_error(&buf, op, bkey_start_offset(&insert->k)); + prt_printf(&buf, "btree update error: %s", bch2_err_str(ret)); + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); } if (ret) @@ -621,9 +647,7 @@ void bch2_write_point_do_index_updates(struct work_struct *work) while (1) { spin_lock_irq(&wp->writes_lock); - op = list_first_entry_or_null(&wp->writes, struct bch_write_op, wp_list); - if (op) - list_del(&op->wp_list); + op = list_pop_entry(&wp->writes, struct bch_write_op, wp_list); wp_update_state(wp, op != NULL); spin_unlock_irq(&wp->writes_lock); @@ -859,7 +883,7 @@ static enum prep_encoded_ret { if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io) return PREP_ENCODED_CHECKSUM_ERR; - if (bch2_bio_uncompress_inplace(c, bio, &op->crc)) + if (bch2_bio_uncompress_inplace(op, bio)) return PREP_ENCODED_ERR; } @@ -1080,11 +1104,14 @@ do_write: *_dst = dst; return more; csum_err: - bch_err_inum_offset_ratelimited(c, - op->pos.inode, - op->pos.offset << 9, - "%s write error: error verifying existing checksum while rewriting existing data (memory corruption?)", - op->flags & BCH_WRITE_MOVE ? "move" : "user"); + { + struct printbuf buf = PRINTBUF; + bch2_write_op_error(&buf, op); + prt_printf(&buf, "error verifying existing checksum while rewriting existing data (memory corruption?)"); + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); + } + ret = -EIO; err: if (to_wbio(dst)->bounce) @@ -1165,7 +1192,7 @@ static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op) struct btree_trans *trans = bch2_trans_get(c); for_each_keylist_key(&op->insert_keys, orig) { - int ret = for_each_btree_key_upto_commit(trans, iter, BTREE_ID_extents, + int ret = for_each_btree_key_max_commit(trans, iter, BTREE_ID_extents, bkey_start_pos(&orig->k), orig->k.p, BTREE_ITER_intent, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({ @@ -1175,11 +1202,11 @@ static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op) if (ret && !bch2_err_matches(ret, EROFS)) { struct bkey_i *insert = bch2_keylist_front(&op->insert_keys); - bch_err_inum_offset_ratelimited(c, - insert->k.p.inode, insert->k.p.offset << 9, - "%s write error while doing btree update: %s", - op->flags & BCH_WRITE_MOVE ? "move" : "user", - bch2_err_str(ret)); + struct printbuf buf = PRINTBUF; + bch2_write_op_error_trans(trans, &buf, op, bkey_start_offset(&insert->k)); + prt_printf(&buf, "btree update error: %s", bch2_err_str(ret)); + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); } if (ret) { @@ -1339,17 +1366,19 @@ err: if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto retry; + bch2_trans_put(trans); + darray_exit(&buckets); + if (ret) { - bch_err_inum_offset_ratelimited(c, - op->pos.inode, op->pos.offset << 9, - "%s: btree lookup error %s", __func__, bch2_err_str(ret)); + struct printbuf buf = PRINTBUF; + bch2_write_op_error(&buf, op); + prt_printf(&buf, "%s(): btree lookup error: %s", __func__, bch2_err_str(ret)); + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); op->error = ret; op->flags |= BCH_WRITE_SUBMITTED; } - bch2_trans_put(trans); - darray_exit(&buckets); - /* fallback to cow write path? */ if (!(op->flags & BCH_WRITE_SUBMITTED)) { closure_sync(&op->cl); @@ -1462,14 +1491,14 @@ err: if (ret <= 0) { op->flags |= BCH_WRITE_SUBMITTED; - if (ret < 0) { - if (!(op->flags & BCH_WRITE_ALLOC_NOWAIT)) - bch_err_inum_offset_ratelimited(c, - op->pos.inode, - op->pos.offset << 9, - "%s(): %s error: %s", __func__, - op->flags & BCH_WRITE_MOVE ? "move" : "user", - bch2_err_str(ret)); + if (unlikely(ret < 0)) { + if (!(op->flags & BCH_WRITE_ALLOC_NOWAIT)) { + struct printbuf buf = PRINTBUF; + bch2_write_op_error(&buf, op); + prt_printf(&buf, "%s(): %s", __func__, bch2_err_str(ret)); + bch_err_ratelimited(c, "%s", buf.buf); + printbuf_exit(&buf); + } op->error = ret; break; } @@ -1595,12 +1624,11 @@ CLOSURE_CALLBACK(bch2_write) bch2_keylist_init(&op->insert_keys, op->inline_keys); wbio_init(bio)->put_bio = false; - if (bio->bi_iter.bi_size & (c->opts.block_size - 1)) { - bch_err_inum_offset_ratelimited(c, - op->pos.inode, - op->pos.offset << 9, - "%s write error: misaligned write", - op->flags & BCH_WRITE_MOVE ? "move" : "user"); + if (unlikely(bio->bi_iter.bi_size & (c->opts.block_size - 1))) { + struct printbuf buf = PRINTBUF; + bch2_write_op_error(&buf, op); + prt_printf(&buf, "misaligned write"); + printbuf_exit(&buf); op->error = -EIO; goto err; } diff --git a/fs/bcachefs/io_write.h b/fs/bcachefs/io_write.h index 5400ce94ee57..b4626013abc8 100644 --- a/fs/bcachefs/io_write.h +++ b/fs/bcachefs/io_write.h @@ -20,6 +20,8 @@ static inline void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *, enum bch_data_type, const struct bkey_i *, bool); +void bch2_write_op_error(struct printbuf *out, struct bch_write_op *op); + #define BCH_WRITE_FLAGS() \ x(ALLOC_NOWAIT) \ x(CACHED) \ diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c index 2dc0d60c1745..24c294d4634e 100644 --- a/fs/bcachefs/journal.c +++ b/fs/bcachefs/journal.c @@ -113,11 +113,10 @@ journal_seq_to_buf(struct journal *j, u64 seq) static void journal_pin_list_init(struct journal_entry_pin_list *p, int count) { - unsigned i; - - for (i = 0; i < ARRAY_SIZE(p->list); i++) - INIT_LIST_HEAD(&p->list[i]); - INIT_LIST_HEAD(&p->flushed); + for (unsigned i = 0; i < ARRAY_SIZE(p->unflushed); i++) + INIT_LIST_HEAD(&p->unflushed[i]); + for (unsigned i = 0; i < ARRAY_SIZE(p->flushed); i++) + INIT_LIST_HEAD(&p->flushed[i]); atomic_set(&p->count, count); p->devs.nr = 0; } @@ -217,6 +216,12 @@ void bch2_journal_buf_put_final(struct journal *j, u64 seq) if (__bch2_journal_pin_put(j, seq)) bch2_journal_reclaim_fast(j); bch2_journal_do_writes(j); + + /* + * for __bch2_next_write_buffer_flush_journal_buf(), when quiescing an + * open journal entry + */ + wake_up(&j->wait); } /* @@ -251,6 +256,9 @@ static void __journal_entry_close(struct journal *j, unsigned closed_val, bool t if (!__journal_entry_is_open(old)) return; + if (old.cur_entry_offset == JOURNAL_ENTRY_BLOCKED_VAL) + old.cur_entry_offset = j->cur_entry_offset_if_blocked; + /* Close out old buffer: */ buf->data->u64s = cpu_to_le32(old.cur_entry_offset); @@ -311,6 +319,16 @@ void bch2_journal_halt(struct journal *j) spin_unlock(&j->lock); } +void bch2_journal_halt_locked(struct journal *j) +{ + lockdep_assert_held(&j->lock); + + __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true); + if (!j->err_seq) + j->err_seq = journal_cur_seq(j); + journal_wake(j); +} + static bool journal_entry_want_write(struct journal *j) { bool ret = !journal_entry_is_open(j) || @@ -373,6 +391,13 @@ static int journal_entry_open(struct journal *j) if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf)) return JOURNAL_ERR_max_in_flight; + if (journal_cur_seq(j) >= JOURNAL_SEQ_MAX) { + bch_err(c, "cannot start: journal seq overflow"); + if (bch2_fs_emergency_read_only_locked(c)) + bch_err(c, "fatal error - emergency read only"); + return JOURNAL_ERR_insufficient_devices; /* -EROFS */ + } + BUG_ON(!j->cur_entry_sectors); buf->expires = @@ -588,6 +613,16 @@ out: : -BCH_ERR_journal_res_get_blocked; } +static unsigned max_dev_latency(struct bch_fs *c) +{ + u64 nsecs = 0; + + for_each_rw_member(c, ca) + nsecs = max(nsecs, ca->io_latency[WRITE].stats.max_duration); + + return nsecs_to_jiffies(nsecs); +} + /* * Essentially the entry function to the journaling code. When bcachefs is doing * a btree insert, it calls this function to get the current journal write. @@ -599,17 +634,31 @@ out: * btree node write locks. */ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res, - unsigned flags) + unsigned flags, + struct btree_trans *trans) { int ret; if (closure_wait_event_timeout(&j->async_wait, (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked || (flags & JOURNAL_RES_GET_NONBLOCK), - HZ * 10)) + HZ)) return ret; + if (trans) + bch2_trans_unlock_long(trans); + struct bch_fs *c = container_of(j, struct bch_fs, journal); + int remaining_wait = max(max_dev_latency(c) * 2, HZ * 10); + + remaining_wait = max(0, remaining_wait - HZ); + + if (closure_wait_event_timeout(&j->async_wait, + (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked || + (flags & JOURNAL_RES_GET_NONBLOCK), + remaining_wait)) + return ret; + struct printbuf buf = PRINTBUF; bch2_journal_debug_to_text(&buf, j); bch_err(c, "Journal stuck? Waited for 10 seconds...\n%s", @@ -664,7 +713,7 @@ out: * @seq: seq to flush * @parent: closure object to wait with * Returns: 1 if @seq has already been flushed, 0 if @seq is being flushed, - * -EIO if @seq will never be flushed + * -BCH_ERR_journal_flush_err if @seq will never be flushed * * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if * necessary @@ -687,7 +736,7 @@ int bch2_journal_flush_seq_async(struct journal *j, u64 seq, /* Recheck under lock: */ if (j->err_seq && seq >= j->err_seq) { - ret = -EIO; + ret = -BCH_ERR_journal_flush_err; goto out; } @@ -714,7 +763,7 @@ recheck_need_open: * livelock: */ sched_annotate_sleep(); - ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0); + ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0, NULL); if (ret) return ret; @@ -747,6 +796,7 @@ recheck_need_open: } buf->must_flush = true; + j->flushing_seq = max(j->flushing_seq, seq); if (parent && !closure_wait(&buf->wait, parent)) BUG(); @@ -794,10 +844,11 @@ int bch2_journal_flush(struct journal *j) } /* - * bch2_journal_noflush_seq - tell the journal not to issue any flushes before + * bch2_journal_noflush_seq - ask the journal not to issue any flushes in the + * range [start, end) * @seq */ -bool bch2_journal_noflush_seq(struct journal *j, u64 seq) +bool bch2_journal_noflush_seq(struct journal *j, u64 start, u64 end) { struct bch_fs *c = container_of(j, struct bch_fs, journal); u64 unwritten_seq; @@ -806,15 +857,15 @@ bool bch2_journal_noflush_seq(struct journal *j, u64 seq) if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush))) return false; - if (seq <= c->journal.flushed_seq_ondisk) + if (c->journal.flushed_seq_ondisk >= start) return false; spin_lock(&j->lock); - if (seq <= c->journal.flushed_seq_ondisk) + if (c->journal.flushed_seq_ondisk >= start) goto out; for (unwritten_seq = journal_last_unwritten_seq(j); - unwritten_seq < seq; + unwritten_seq < end; unwritten_seq++) { struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq); @@ -831,19 +882,14 @@ out: return ret; } -int bch2_journal_meta(struct journal *j) +static int __bch2_journal_meta(struct journal *j) { - struct journal_buf *buf; - struct journal_res res; - int ret; - - memset(&res, 0, sizeof(res)); - - ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0); + struct journal_res res = {}; + int ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0, NULL); if (ret) return ret; - buf = j->buf + (res.seq & JOURNAL_BUF_MASK); + struct journal_buf *buf = j->buf + (res.seq & JOURNAL_BUF_MASK); buf->must_flush = true; if (!buf->flush_time) { @@ -856,27 +902,70 @@ int bch2_journal_meta(struct journal *j) return bch2_journal_flush_seq(j, res.seq, TASK_UNINTERRUPTIBLE); } +int bch2_journal_meta(struct journal *j) +{ + struct bch_fs *c = container_of(j, struct bch_fs, journal); + + if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_journal)) + return -EROFS; + + int ret = __bch2_journal_meta(j); + bch2_write_ref_put(c, BCH_WRITE_REF_journal); + return ret; +} + /* block/unlock the journal: */ void bch2_journal_unblock(struct journal *j) { spin_lock(&j->lock); - j->blocked--; + if (!--j->blocked && + j->cur_entry_offset_if_blocked < JOURNAL_ENTRY_CLOSED_VAL && + j->reservations.cur_entry_offset == JOURNAL_ENTRY_BLOCKED_VAL) { + union journal_res_state old, new; + + old.v = atomic64_read(&j->reservations.counter); + do { + new.v = old.v; + new.cur_entry_offset = j->cur_entry_offset_if_blocked; + } while (!atomic64_try_cmpxchg(&j->reservations.counter, &old.v, new.v)); + } spin_unlock(&j->lock); journal_wake(j); } +static void __bch2_journal_block(struct journal *j) +{ + if (!j->blocked++) { + union journal_res_state old, new; + + old.v = atomic64_read(&j->reservations.counter); + do { + j->cur_entry_offset_if_blocked = old.cur_entry_offset; + + if (j->cur_entry_offset_if_blocked >= JOURNAL_ENTRY_CLOSED_VAL) + break; + + new.v = old.v; + new.cur_entry_offset = JOURNAL_ENTRY_BLOCKED_VAL; + } while (!atomic64_try_cmpxchg(&j->reservations.counter, &old.v, new.v)); + + journal_cur_buf(j)->data->u64s = cpu_to_le32(old.cur_entry_offset); + } +} + void bch2_journal_block(struct journal *j) { spin_lock(&j->lock); - j->blocked++; + __bch2_journal_block(j); spin_unlock(&j->lock); journal_quiesce(j); } -static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq) +static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct journal *j, + u64 max_seq, bool *blocked) { struct journal_buf *ret = NULL; @@ -893,13 +982,17 @@ static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct jou struct journal_buf *buf = j->buf + idx; if (buf->need_flush_to_write_buffer) { - if (seq == journal_cur_seq(j)) - __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true); - union journal_res_state s; s.v = atomic64_read_acquire(&j->reservations.counter); - ret = journal_state_count(s, idx) + unsigned open = seq == journal_cur_seq(j) && __journal_entry_is_open(s); + + if (open && !*blocked) { + __bch2_journal_block(j); + *blocked = true; + } + + ret = journal_state_count(s, idx) > open ? ERR_PTR(-EAGAIN) : buf; break; @@ -912,11 +1005,17 @@ static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct jou return ret; } -struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq) +struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j, + u64 max_seq, bool *blocked) { struct journal_buf *ret; + *blocked = false; + + wait_event(j->wait, (ret = __bch2_next_write_buffer_flush_journal_buf(j, + max_seq, blocked)) != ERR_PTR(-EAGAIN)); + if (IS_ERR_OR_NULL(ret) && *blocked) + bch2_journal_unblock(j); - wait_event(j->wait, (ret = __bch2_next_write_buffer_flush_journal_buf(j, max_seq)) != ERR_PTR(-EAGAIN)); return ret; } @@ -945,19 +1044,17 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, } for (nr_got = 0; nr_got < nr_want; nr_got++) { - if (new_fs) { - bu[nr_got] = bch2_bucket_alloc_new_fs(ca); - if (bu[nr_got] < 0) { - ret = -BCH_ERR_ENOSPC_bucket_alloc; - break; - } - } else { - ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal, - BCH_DATA_journal, cl); - ret = PTR_ERR_OR_ZERO(ob[nr_got]); - if (ret) - break; + enum bch_watermark watermark = new_fs + ? BCH_WATERMARK_btree + : BCH_WATERMARK_normal; + + ob[nr_got] = bch2_bucket_alloc(c, ca, watermark, + BCH_DATA_journal, cl); + ret = PTR_ERR_OR_ZERO(ob[nr_got]); + if (ret) + break; + if (!new_fs) { ret = bch2_trans_run(c, bch2_trans_mark_metadata_bucket(trans, ca, ob[nr_got]->bucket, BCH_DATA_journal, @@ -967,9 +1064,9 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, bch_err_msg(c, ret, "marking new journal buckets"); break; } - - bu[nr_got] = ob[nr_got]->bucket; } + + bu[nr_got] = ob[nr_got]->bucket; } if (!nr_got) @@ -1009,8 +1106,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr, if (ret) goto err_unblock; - if (!new_fs) - bch2_write_super(c); + bch2_write_super(c); /* Commit: */ if (c) @@ -1044,9 +1140,8 @@ err_unblock: bu[i], BCH_DATA_free, 0, BTREE_TRIGGER_transactional)); err_free: - if (!new_fs) - for (i = 0; i < nr_got; i++) - bch2_open_bucket_put(c, ob[i]); + for (i = 0; i < nr_got; i++) + bch2_open_bucket_put(c, ob[i]); kfree(new_bucket_seq); kfree(new_buckets); @@ -1193,7 +1288,7 @@ void bch2_fs_journal_stop(struct journal *j) * Always write a new journal entry, to make sure the clock hands are up * to date (and match the superblock) */ - bch2_journal_meta(j); + __bch2_journal_meta(j); journal_quiesce(j); cancel_delayed_work_sync(&j->write_work); @@ -1217,6 +1312,11 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq) bool had_entries = false; u64 last_seq = cur_seq, nr, seq; + if (cur_seq >= JOURNAL_SEQ_MAX) { + bch_err(c, "cannot start: journal seq overflow"); + return -EINVAL; + } + genradix_for_each_reverse(&c->journal_entries, iter, _i) { i = *_i; @@ -1474,6 +1574,9 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) case JOURNAL_ENTRY_CLOSED_VAL: prt_printf(out, "closed\n"); break; + case JOURNAL_ENTRY_BLOCKED_VAL: + prt_printf(out, "blocked\n"); + break; default: prt_printf(out, "%u/%u\n", s.cur_entry_offset, j->cur_entry_u64s); break; @@ -1499,6 +1602,9 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) printbuf_indent_sub(out, 2); for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) { + if (!ca->mi.durability) + continue; + struct journal_device *ja = &ca->journal; if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d)) @@ -1508,6 +1614,7 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) continue; prt_printf(out, "dev %u:\n", ca->dev_idx); + prt_printf(out, "durability %u:\n", ca->mi.durability); printbuf_indent_add(out, 2); prt_printf(out, "nr\t%u\n", ja->nr); prt_printf(out, "bucket size\t%u\n", ca->mi.bucket_size); @@ -1519,6 +1626,8 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) printbuf_indent_sub(out, 2); } + prt_printf(out, "replicas want %u need %u\n", c->opts.metadata_replicas, c->opts.metadata_replicas_required); + rcu_read_unlock(); --out->atomic; @@ -1530,54 +1639,3 @@ void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) __bch2_journal_debug_to_text(out, j); spin_unlock(&j->lock); } - -bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq) -{ - struct journal_entry_pin_list *pin_list; - struct journal_entry_pin *pin; - - spin_lock(&j->lock); - if (!test_bit(JOURNAL_running, &j->flags)) { - spin_unlock(&j->lock); - return true; - } - - *seq = max(*seq, j->pin.front); - - if (*seq >= j->pin.back) { - spin_unlock(&j->lock); - return true; - } - - out->atomic++; - - pin_list = journal_seq_pin(j, *seq); - - prt_printf(out, "%llu: count %u\n", *seq, atomic_read(&pin_list->count)); - printbuf_indent_add(out, 2); - - for (unsigned i = 0; i < ARRAY_SIZE(pin_list->list); i++) - list_for_each_entry(pin, &pin_list->list[i], list) - prt_printf(out, "\t%px %ps\n", pin, pin->flush); - - if (!list_empty(&pin_list->flushed)) - prt_printf(out, "flushed:\n"); - - list_for_each_entry(pin, &pin_list->flushed, list) - prt_printf(out, "\t%px %ps\n", pin, pin->flush); - - printbuf_indent_sub(out, 2); - - --out->atomic; - spin_unlock(&j->lock); - - return false; -} - -void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j) -{ - u64 seq = 0; - - while (!bch2_journal_seq_pins_to_text(out, j, &seq)) - seq++; -} diff --git a/fs/bcachefs/journal.h b/fs/bcachefs/journal.h index 2762be6f9814..107f7f901cd9 100644 --- a/fs/bcachefs/journal.h +++ b/fs/bcachefs/journal.h @@ -285,7 +285,8 @@ static inline void bch2_journal_buf_put(struct journal *j, unsigned idx, u64 seq spin_lock(&j->lock); bch2_journal_buf_put_final(j, seq); spin_unlock(&j->lock); - } + } else if (unlikely(s.cur_entry_offset == JOURNAL_ENTRY_BLOCKED_VAL)) + wake_up(&j->wait); } /* @@ -311,7 +312,7 @@ static inline void bch2_journal_res_put(struct journal *j, } int bch2_journal_res_get_slowpath(struct journal *, struct journal_res *, - unsigned); + unsigned, struct btree_trans *); /* First bits for BCH_WATERMARK: */ enum journal_res_flags { @@ -367,7 +368,8 @@ static inline int journal_res_get_fast(struct journal *j, } static inline int bch2_journal_res_get(struct journal *j, struct journal_res *res, - unsigned u64s, unsigned flags) + unsigned u64s, unsigned flags, + struct btree_trans *trans) { int ret; @@ -379,7 +381,7 @@ static inline int bch2_journal_res_get(struct journal *j, struct journal_res *re if (journal_res_get_fast(j, res, flags)) goto out; - ret = bch2_journal_res_get_slowpath(j, res, flags); + ret = bch2_journal_res_get_slowpath(j, res, flags, trans); if (ret) return ret; out: @@ -403,15 +405,16 @@ void bch2_journal_flush_async(struct journal *, struct closure *); int bch2_journal_flush_seq(struct journal *, u64, unsigned); int bch2_journal_flush(struct journal *); -bool bch2_journal_noflush_seq(struct journal *, u64); +bool bch2_journal_noflush_seq(struct journal *, u64, u64); int bch2_journal_meta(struct journal *); void bch2_journal_halt(struct journal *); +void bch2_journal_halt_locked(struct journal *); static inline int bch2_journal_error(struct journal *j) { return j->reservations.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL - ? -EIO : 0; + ? -BCH_ERR_journal_shutdown : 0; } struct bch_dev; @@ -424,12 +427,10 @@ static inline void bch2_journal_set_replay_done(struct journal *j) void bch2_journal_unblock(struct journal *); void bch2_journal_block(struct journal *); -struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq); +struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *, u64, bool *); void __bch2_journal_debug_to_text(struct printbuf *, struct journal *); void bch2_journal_debug_to_text(struct printbuf *, struct journal *); -void bch2_journal_pins_to_text(struct printbuf *, struct journal *); -bool bch2_journal_seq_pins_to_text(struct printbuf *, struct journal *, u64 *); int bch2_set_nr_journal_buckets(struct bch_fs *, struct bch_dev *, unsigned nr); diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c index fb35dd336331..11c39e0c34f4 100644 --- a/fs/bcachefs/journal_io.c +++ b/fs/bcachefs/journal_io.c @@ -17,6 +17,9 @@ #include "sb-clean.h" #include "trace.h" +#include <linux/ioprio.h> +#include <linux/string_choices.h> + void bch2_journal_pos_from_member_info_set(struct bch_fs *c) { lockdep_assert_held(&c->sb_lock); @@ -299,7 +302,7 @@ static void journal_entry_err_msg(struct printbuf *out, journal_entry_err_msg(&_buf, version, jset, entry); \ prt_printf(&_buf, msg, ##__VA_ARGS__); \ \ - switch (flags & BCH_VALIDATE_write) { \ + switch (from.flags & BCH_VALIDATE_write) { \ case READ: \ mustfix_fsck_err(c, _err, "%s", _buf.buf); \ break; \ @@ -325,11 +328,11 @@ static void journal_entry_err_msg(struct printbuf *out, static int journal_validate_key(struct bch_fs *c, struct jset *jset, struct jset_entry *entry, - unsigned level, enum btree_id btree_id, struct bkey_i *k, - unsigned version, int big_endian, - enum bch_validate_flags flags) + struct bkey_validate_context from, + unsigned version, int big_endian) { + enum bch_validate_flags flags = from.flags; int write = flags & BCH_VALIDATE_write; void *next = vstruct_next(entry); int ret = 0; @@ -364,11 +367,10 @@ static int journal_validate_key(struct bch_fs *c, } if (!write) - bch2_bkey_compat(level, btree_id, version, big_endian, + bch2_bkey_compat(from.level, from.btree, version, big_endian, write, NULL, bkey_to_packed(k)); - ret = bch2_bkey_validate(c, bkey_i_to_s_c(k), - __btree_node_type(level, btree_id), write); + ret = bch2_bkey_validate(c, bkey_i_to_s_c(k), from); if (ret == -BCH_ERR_fsck_delete_bkey) { le16_add_cpu(&entry->u64s, -((u16) k->k.u64s)); memmove(k, bkey_next(k), next - (void *) bkey_next(k)); @@ -379,7 +381,7 @@ static int journal_validate_key(struct bch_fs *c, goto fsck_err; if (write) - bch2_bkey_compat(level, btree_id, version, big_endian, + bch2_bkey_compat(from.level, from.btree, version, big_endian, write, NULL, bkey_to_packed(k)); fsck_err: return ret; @@ -389,16 +391,15 @@ static int journal_entry_btree_keys_validate(struct bch_fs *c, struct jset *jset, struct jset_entry *entry, unsigned version, int big_endian, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_i *k = entry->start; + from.level = entry->level; + from.btree = entry->btree_id; + while (k != vstruct_last(entry)) { - int ret = journal_validate_key(c, jset, entry, - entry->level, - entry->btree_id, - k, version, big_endian, - flags|BCH_VALIDATE_journal); + int ret = journal_validate_key(c, jset, entry, k, from, version, big_endian); if (ret == FSCK_DELETED_KEY) continue; else if (ret) @@ -421,7 +422,8 @@ static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs bch2_prt_jset_entry_type(out, entry->type); prt_str(out, ": "); } - prt_printf(out, "btree=%s l=%u ", bch2_btree_id_str(entry->btree_id), entry->level); + bch2_btree_id_level_to_text(out, entry->btree_id, entry->level); + prt_char(out, ' '); bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k)); first = false; } @@ -431,11 +433,15 @@ static int journal_entry_btree_root_validate(struct bch_fs *c, struct jset *jset, struct jset_entry *entry, unsigned version, int big_endian, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_i *k = entry->start; int ret = 0; + from.root = true; + from.level = entry->level + 1; + from.btree = entry->btree_id; + if (journal_entry_err_on(!entry->u64s || le16_to_cpu(entry->u64s) != k->k.u64s, c, version, jset, entry, @@ -452,8 +458,7 @@ static int journal_entry_btree_root_validate(struct bch_fs *c, return 0; } - ret = journal_validate_key(c, jset, entry, 1, entry->btree_id, k, - version, big_endian, flags); + ret = journal_validate_key(c, jset, entry, k, from, version, big_endian); if (ret == FSCK_DELETED_KEY) ret = 0; fsck_err: @@ -470,7 +475,7 @@ static int journal_entry_prio_ptrs_validate(struct bch_fs *c, struct jset *jset, struct jset_entry *entry, unsigned version, int big_endian, - enum bch_validate_flags flags) + struct bkey_validate_context from) { /* obsolete, don't care: */ return 0; @@ -485,7 +490,7 @@ static int journal_entry_blacklist_validate(struct bch_fs *c, struct jset *jset, struct jset_entry *entry, unsigned version, int big_endian, - enum bch_validate_flags flags) + struct bkey_validate_context from) { int ret = 0; @@ -512,7 +517,7 @@ static int journal_entry_blacklist_v2_validate(struct bch_fs *c, struct jset *jset, struct jset_entry *entry, unsigned version, int big_endian, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct jset_entry_blacklist_v2 *bl_entry; int ret = 0; @@ -554,7 +559,7 @@ static int journal_entry_usage_validate(struct bch_fs *c, struct jset *jset, struct jset_entry *entry, unsigned version, int big_endian, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct jset_entry_usage *u = container_of(entry, struct jset_entry_usage, entry); @@ -588,7 +593,7 @@ static int journal_entry_data_usage_validate(struct bch_fs *c, struct jset *jset, struct jset_entry *entry, unsigned version, int big_endian, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct jset_entry_data_usage *u = container_of(entry, struct jset_entry_data_usage, entry); @@ -632,7 +637,7 @@ static int journal_entry_clock_validate(struct bch_fs *c, struct jset *jset, struct jset_entry *entry, unsigned version, int big_endian, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct jset_entry_clock *clock = container_of(entry, struct jset_entry_clock, entry); @@ -665,14 +670,14 @@ static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c, struct jset_entry_clock *clock = container_of(entry, struct jset_entry_clock, entry); - prt_printf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time)); + prt_printf(out, "%s=%llu", str_write_read(clock->rw), le64_to_cpu(clock->time)); } static int journal_entry_dev_usage_validate(struct bch_fs *c, struct jset *jset, struct jset_entry *entry, unsigned version, int big_endian, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct jset_entry_dev_usage *u = container_of(entry, struct jset_entry_dev_usage, entry); @@ -729,7 +734,7 @@ static int journal_entry_log_validate(struct bch_fs *c, struct jset *jset, struct jset_entry *entry, unsigned version, int big_endian, - enum bch_validate_flags flags) + struct bkey_validate_context from) { return 0; } @@ -738,19 +743,19 @@ static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c, struct jset_entry *entry) { struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry); - unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d); - prt_printf(out, "%.*s", bytes, l->d); + prt_printf(out, "%.*s", jset_entry_log_msg_bytes(l), l->d); } static int journal_entry_overwrite_validate(struct bch_fs *c, struct jset *jset, struct jset_entry *entry, unsigned version, int big_endian, - enum bch_validate_flags flags) + struct bkey_validate_context from) { + from.flags = 0; return journal_entry_btree_keys_validate(c, jset, entry, - version, big_endian, READ); + version, big_endian, from); } static void journal_entry_overwrite_to_text(struct printbuf *out, struct bch_fs *c, @@ -763,10 +768,10 @@ static int journal_entry_write_buffer_keys_validate(struct bch_fs *c, struct jset *jset, struct jset_entry *entry, unsigned version, int big_endian, - enum bch_validate_flags flags) + struct bkey_validate_context from) { return journal_entry_btree_keys_validate(c, jset, entry, - version, big_endian, READ); + version, big_endian, from); } static void journal_entry_write_buffer_keys_to_text(struct printbuf *out, struct bch_fs *c, @@ -779,7 +784,7 @@ static int journal_entry_datetime_validate(struct bch_fs *c, struct jset *jset, struct jset_entry *entry, unsigned version, int big_endian, - enum bch_validate_flags flags) + struct bkey_validate_context from) { unsigned bytes = vstruct_bytes(entry); unsigned expected = 16; @@ -809,7 +814,7 @@ static void journal_entry_datetime_to_text(struct printbuf *out, struct bch_fs * struct jset_entry_ops { int (*validate)(struct bch_fs *, struct jset *, struct jset_entry *, unsigned, int, - enum bch_validate_flags); + struct bkey_validate_context); void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *); }; @@ -827,11 +832,11 @@ int bch2_journal_entry_validate(struct bch_fs *c, struct jset *jset, struct jset_entry *entry, unsigned version, int big_endian, - enum bch_validate_flags flags) + struct bkey_validate_context from) { return entry->type < BCH_JSET_ENTRY_NR ? bch2_jset_entry_ops[entry->type].validate(c, jset, entry, - version, big_endian, flags) + version, big_endian, from) : 0; } @@ -849,10 +854,18 @@ void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c, static int jset_validate_entries(struct bch_fs *c, struct jset *jset, enum bch_validate_flags flags) { + struct bkey_validate_context from = { + .flags = flags, + .from = BKEY_VALIDATE_journal, + .journal_seq = le64_to_cpu(jset->seq), + }; + unsigned version = le32_to_cpu(jset->version); int ret = 0; vstruct_for_each(jset, entry) { + from.journal_offset = (u64 *) entry - jset->_data; + if (journal_entry_err_on(vstruct_next(entry) > vstruct_last(jset), c, version, jset, entry, journal_entry_past_jset_end, @@ -861,8 +874,8 @@ static int jset_validate_entries(struct bch_fs *c, struct jset *jset, break; } - ret = bch2_journal_entry_validate(c, jset, entry, - version, JSET_BIG_ENDIAN(jset), flags); + ret = bch2_journal_entry_validate(c, jset, entry, version, + JSET_BIG_ENDIAN(jset), from); if (ret) break; } @@ -875,13 +888,17 @@ static int jset_validate(struct bch_fs *c, struct jset *jset, u64 sector, enum bch_validate_flags flags) { - unsigned version; + struct bkey_validate_context from = { + .flags = flags, + .from = BKEY_VALIDATE_journal, + .journal_seq = le64_to_cpu(jset->seq), + }; int ret = 0; if (le64_to_cpu(jset->magic) != jset_magic(c)) return JOURNAL_ENTRY_NONE; - version = le32_to_cpu(jset->version); + unsigned version = le32_to_cpu(jset->version); if (journal_entry_err_on(!bch2_version_compatible(version), c, version, jset, NULL, jset_unsupported_version, @@ -926,15 +943,16 @@ static int jset_validate_early(struct bch_fs *c, unsigned bucket_sectors_left, unsigned sectors_read) { - size_t bytes = vstruct_bytes(jset); - unsigned version; - enum bch_validate_flags flags = BCH_VALIDATE_journal; + struct bkey_validate_context from = { + .from = BKEY_VALIDATE_journal, + .journal_seq = le64_to_cpu(jset->seq), + }; int ret = 0; if (le64_to_cpu(jset->magic) != jset_magic(c)) return JOURNAL_ENTRY_NONE; - version = le32_to_cpu(jset->version); + unsigned version = le32_to_cpu(jset->version); if (journal_entry_err_on(!bch2_version_compatible(version), c, version, jset, NULL, jset_unsupported_version, @@ -947,6 +965,7 @@ static int jset_validate_early(struct bch_fs *c, return -EINVAL; } + size_t bytes = vstruct_bytes(jset); if (bytes > (sectors_read << 9) && sectors_read < bucket_sectors_left) return JOURNAL_ENTRY_REREAD; @@ -1231,8 +1250,6 @@ int bch2_journal_read(struct bch_fs *c, * those entries will be blacklisted: */ genradix_for_each_reverse(&c->journal_entries, radix_iter, _i) { - enum bch_validate_flags flags = BCH_VALIDATE_journal; - i = *_i; if (journal_replay_ignore(i)) @@ -1252,6 +1269,10 @@ int bch2_journal_read(struct bch_fs *c, continue; } + struct bkey_validate_context from = { + .from = BKEY_VALIDATE_journal, + .journal_seq = le64_to_cpu(i->j.seq), + }; if (journal_entry_err_on(le64_to_cpu(i->j.last_seq) > le64_to_cpu(i->j.seq), c, le32_to_cpu(i->j.version), &i->j, NULL, jset_last_seq_newer_than_seq, @@ -1411,27 +1432,50 @@ fsck_err: /* journal write: */ +static void journal_advance_devs_to_next_bucket(struct journal *j, + struct dev_alloc_list *devs, + unsigned sectors, u64 seq) +{ + struct bch_fs *c = container_of(j, struct bch_fs, journal); + + darray_for_each(*devs, i) { + struct bch_dev *ca = rcu_dereference(c->devs[*i]); + if (!ca) + continue; + + struct journal_device *ja = &ca->journal; + + if (sectors > ja->sectors_free && + sectors <= ca->mi.bucket_size && + bch2_journal_dev_buckets_available(j, ja, + journal_space_discarded)) { + ja->cur_idx = (ja->cur_idx + 1) % ja->nr; + ja->sectors_free = ca->mi.bucket_size; + + /* + * ja->bucket_seq[ja->cur_idx] must always have + * something sensible: + */ + ja->bucket_seq[ja->cur_idx] = le64_to_cpu(seq); + } + } +} + static void __journal_write_alloc(struct journal *j, struct journal_buf *w, - struct dev_alloc_list *devs_sorted, + struct dev_alloc_list *devs, unsigned sectors, unsigned *replicas, unsigned replicas_want) { struct bch_fs *c = container_of(j, struct bch_fs, journal); - struct journal_device *ja; - struct bch_dev *ca; - unsigned i; - if (*replicas >= replicas_want) - return; - - for (i = 0; i < devs_sorted->nr; i++) { - ca = rcu_dereference(c->devs[devs_sorted->devs[i]]); + darray_for_each(*devs, i) { + struct bch_dev *ca = rcu_dereference(c->devs[*i]); if (!ca) continue; - ja = &ca->journal; + struct journal_device *ja = &ca->journal; /* * Check that we can use this device, and aren't already using @@ -1477,65 +1521,53 @@ static int journal_write_alloc(struct journal *j, struct journal_buf *w) { struct bch_fs *c = container_of(j, struct bch_fs, journal); struct bch_devs_mask devs; - struct journal_device *ja; - struct bch_dev *ca; struct dev_alloc_list devs_sorted; unsigned sectors = vstruct_sectors(w->data, c->block_bits); unsigned target = c->opts.metadata_target ?: c->opts.foreground_target; - unsigned i, replicas = 0, replicas_want = + unsigned replicas = 0, replicas_want = READ_ONCE(c->opts.metadata_replicas); unsigned replicas_need = min_t(unsigned, replicas_want, READ_ONCE(c->opts.metadata_replicas_required)); + bool advance_done = false; rcu_read_lock(); -retry: - devs = target_rw_devs(c, BCH_DATA_journal, target); - devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs); + /* We might run more than once if we have to stop and do discards: */ + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(&w->key)); + bkey_for_each_ptr(ptrs, p) { + struct bch_dev *ca = bch2_dev_rcu_noerror(c, p->dev); + if (ca) + replicas += ca->mi.durability; + } - __journal_write_alloc(j, w, &devs_sorted, - sectors, &replicas, replicas_want); +retry_target: + devs = target_rw_devs(c, BCH_DATA_journal, target); + devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs); +retry_alloc: + __journal_write_alloc(j, w, &devs_sorted, sectors, &replicas, replicas_want); - if (replicas >= replicas_want) + if (likely(replicas >= replicas_want)) goto done; - for (i = 0; i < devs_sorted.nr; i++) { - ca = rcu_dereference(c->devs[devs_sorted.devs[i]]); - if (!ca) - continue; - - ja = &ca->journal; - - if (sectors > ja->sectors_free && - sectors <= ca->mi.bucket_size && - bch2_journal_dev_buckets_available(j, ja, - journal_space_discarded)) { - ja->cur_idx = (ja->cur_idx + 1) % ja->nr; - ja->sectors_free = ca->mi.bucket_size; - - /* - * ja->bucket_seq[ja->cur_idx] must always have - * something sensible: - */ - ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq); - } + if (!advance_done) { + journal_advance_devs_to_next_bucket(j, &devs_sorted, sectors, w->data->seq); + advance_done = true; + goto retry_alloc; } - __journal_write_alloc(j, w, &devs_sorted, - sectors, &replicas, replicas_want); - if (replicas < replicas_want && target) { /* Retry from all devices: */ target = 0; - goto retry; + advance_done = false; + goto retry_target; } done: rcu_read_unlock(); BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX); - return replicas >= replicas_need ? 0 : -EROFS; + return replicas >= replicas_need ? 0 : -BCH_ERR_insufficient_journal_devices; } static void journal_buf_realloc(struct journal *j, struct journal_buf *buf) @@ -1732,6 +1764,7 @@ static CLOSURE_CALLBACK(journal_write_submit) bio->bi_iter.bi_sector = ptr->offset; bio->bi_end_io = journal_write_endio; bio->bi_private = ca; + bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_RT, 0); BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector); ca->prev_journal_sector = bio->bi_iter.bi_sector; @@ -2023,19 +2056,21 @@ CLOSURE_CALLBACK(bch2_journal_write) bch2_journal_do_discards(j); } - if (ret) { + if (ret && !bch2_journal_error(j)) { struct printbuf buf = PRINTBUF; buf.atomic++; - prt_printf(&buf, bch2_fmt(c, "Unable to allocate journal write at seq %llu: %s"), + prt_printf(&buf, bch2_fmt(c, "Unable to allocate journal write at seq %llu for %zu sectors: %s"), le64_to_cpu(w->data->seq), + vstruct_sectors(w->data, c->block_bits), bch2_err_str(ret)); __bch2_journal_debug_to_text(&buf, j); spin_unlock(&j->lock); bch2_print_string_as_lines(KERN_ERR, buf.buf); printbuf_exit(&buf); - goto err; } + if (ret) + goto err; /* * write is allocated, no longer need to account for it in diff --git a/fs/bcachefs/journal_io.h b/fs/bcachefs/journal_io.h index 2ca9cde30ea8..12b39fcb4424 100644 --- a/fs/bcachefs/journal_io.h +++ b/fs/bcachefs/journal_io.h @@ -63,7 +63,7 @@ static inline struct jset_entry *__jset_entry_type_next(struct jset *jset, int bch2_journal_entry_validate(struct bch_fs *, struct jset *, struct jset_entry *, unsigned, int, - enum bch_validate_flags); + struct bkey_validate_context); void bch2_journal_entry_to_text(struct printbuf *, struct bch_fs *, struct jset_entry *); diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c index ace291f175dd..d373cd181a7f 100644 --- a/fs/bcachefs/journal_reclaim.c +++ b/fs/bcachefs/journal_reclaim.c @@ -38,6 +38,9 @@ unsigned bch2_journal_dev_buckets_available(struct journal *j, struct journal_device *ja, enum journal_space_from from) { + if (!ja->nr) + return 0; + unsigned available = (journal_space_from(ja, from) - ja->cur_idx - 1 + ja->nr) % ja->nr; @@ -137,14 +140,18 @@ static struct journal_space __journal_space_available(struct journal *j, unsigne struct bch_fs *c = container_of(j, struct bch_fs, journal); unsigned pos, nr_devs = 0; struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX]; + unsigned min_bucket_size = U32_MAX; BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space)); rcu_read_lock(); for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) { - if (!ca->journal.nr) + if (!ca->journal.nr || + !ca->mi.durability) continue; + min_bucket_size = min(min_bucket_size, ca->mi.bucket_size); + space = journal_dev_space_available(j, ca, from); if (!space.next_entry) continue; @@ -164,7 +171,9 @@ static struct journal_space __journal_space_available(struct journal *j, unsigne * We sorted largest to smallest, and we want the smallest out of the * @nr_devs_want largest devices: */ - return dev_space[nr_devs_want - 1]; + space = dev_space[nr_devs_want - 1]; + space.next_entry = min(space.next_entry, min_bucket_size); + return space; } void bch2_journal_space_available(struct journal *j) @@ -318,8 +327,10 @@ void bch2_journal_reclaim_fast(struct journal *j) popped = true; } - if (popped) + if (popped) { bch2_journal_space_available(j); + __closure_wake_up(&j->reclaim_flush_wait); + } } bool __bch2_journal_pin_put(struct journal *j, u64 seq) @@ -353,6 +364,9 @@ static inline bool __journal_pin_drop(struct journal *j, pin->seq = 0; list_del_init(&pin->list); + if (j->reclaim_flush_wait.list.first) + __closure_wake_up(&j->reclaim_flush_wait); + /* * Unpinning a journal entry may make journal_next_bucket() succeed, if * writing a new last_seq will now make another bucket available: @@ -370,15 +384,19 @@ void bch2_journal_pin_drop(struct journal *j, spin_unlock(&j->lock); } -static enum journal_pin_type journal_pin_type(journal_pin_flush_fn fn) +static enum journal_pin_type journal_pin_type(struct journal_entry_pin *pin, + journal_pin_flush_fn fn) { if (fn == bch2_btree_node_flush0 || - fn == bch2_btree_node_flush1) - return JOURNAL_PIN_btree; - else if (fn == bch2_btree_key_cache_journal_flush) - return JOURNAL_PIN_key_cache; + fn == bch2_btree_node_flush1) { + unsigned idx = fn == bch2_btree_node_flush1; + struct btree *b = container_of(pin, struct btree, writes[idx].journal); + + return JOURNAL_PIN_TYPE_btree0 - b->c.level; + } else if (fn == bch2_btree_key_cache_journal_flush) + return JOURNAL_PIN_TYPE_key_cache; else - return JOURNAL_PIN_other; + return JOURNAL_PIN_TYPE_other; } static inline void bch2_journal_pin_set_locked(struct journal *j, u64 seq, @@ -397,7 +415,12 @@ static inline void bch2_journal_pin_set_locked(struct journal *j, u64 seq, atomic_inc(&pin_list->count); pin->seq = seq; pin->flush = flush_fn; - list_add(&pin->list, &pin_list->list[type]); + + if (list_empty(&pin_list->unflushed[type]) && + j->reclaim_flush_wait.list.first) + __closure_wake_up(&j->reclaim_flush_wait); + + list_add(&pin->list, &pin_list->unflushed[type]); } void bch2_journal_pin_copy(struct journal *j, @@ -422,7 +445,7 @@ void bch2_journal_pin_copy(struct journal *j, bool reclaim = __journal_pin_drop(j, dst); - bch2_journal_pin_set_locked(j, seq, dst, flush_fn, journal_pin_type(flush_fn)); + bch2_journal_pin_set_locked(j, seq, dst, flush_fn, journal_pin_type(dst, flush_fn)); if (reclaim) bch2_journal_reclaim_fast(j); @@ -446,7 +469,7 @@ void bch2_journal_pin_set(struct journal *j, u64 seq, bool reclaim = __journal_pin_drop(j, pin); - bch2_journal_pin_set_locked(j, seq, pin, flush_fn, journal_pin_type(flush_fn)); + bch2_journal_pin_set_locked(j, seq, pin, flush_fn, journal_pin_type(pin, flush_fn)); if (reclaim) bch2_journal_reclaim_fast(j); @@ -490,16 +513,15 @@ journal_get_next_pin(struct journal *j, { struct journal_entry_pin_list *pin_list; struct journal_entry_pin *ret = NULL; - unsigned i; fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) { if (*seq > seq_to_flush && !allowed_above_seq) break; - for (i = 0; i < JOURNAL_PIN_NR; i++) - if ((((1U << i) & allowed_below_seq) && *seq <= seq_to_flush) || - ((1U << i) & allowed_above_seq)) { - ret = list_first_entry_or_null(&pin_list->list[i], + for (unsigned i = 0; i < JOURNAL_PIN_TYPE_NR; i++) + if (((BIT(i) & allowed_below_seq) && *seq <= seq_to_flush) || + (BIT(i) & allowed_above_seq)) { + ret = list_first_entry_or_null(&pin_list->unflushed[i], struct journal_entry_pin, list); if (ret) return ret; @@ -535,8 +557,8 @@ static size_t journal_flush_pins(struct journal *j, } if (min_key_cache) { - allowed_above |= 1U << JOURNAL_PIN_key_cache; - allowed_below |= 1U << JOURNAL_PIN_key_cache; + allowed_above |= BIT(JOURNAL_PIN_TYPE_key_cache); + allowed_below |= BIT(JOURNAL_PIN_TYPE_key_cache); } cond_resched(); @@ -544,7 +566,9 @@ static size_t journal_flush_pins(struct journal *j, j->last_flushed = jiffies; spin_lock(&j->lock); - pin = journal_get_next_pin(j, seq_to_flush, allowed_below, allowed_above, &seq); + pin = journal_get_next_pin(j, seq_to_flush, + allowed_below, + allowed_above, &seq); if (pin) { BUG_ON(j->flush_in_progress); j->flush_in_progress = pin; @@ -567,7 +591,7 @@ static size_t journal_flush_pins(struct journal *j, spin_lock(&j->lock); /* Pin might have been dropped or rearmed: */ if (likely(!err && !j->flush_in_progress_dropped)) - list_move(&pin->list, &journal_seq_pin(j, seq)->flushed); + list_move(&pin->list, &journal_seq_pin(j, seq)->flushed[journal_pin_type(pin, flush_fn)]); j->flush_in_progress = NULL; j->flush_in_progress_dropped = false; spin_unlock(&j->lock); @@ -758,10 +782,12 @@ static int bch2_journal_reclaim_thread(void *arg) journal_empty = fifo_empty(&j->pin); spin_unlock(&j->lock); + long timeout = j->next_reclaim - jiffies; + if (journal_empty) schedule(); - else if (time_after(j->next_reclaim, jiffies)) - schedule_timeout(j->next_reclaim - jiffies); + else if (timeout > 0) + schedule_timeout(timeout); else break; } @@ -805,10 +831,41 @@ int bch2_journal_reclaim_start(struct journal *j) return 0; } +static bool journal_pins_still_flushing(struct journal *j, u64 seq_to_flush, + unsigned types) +{ + struct journal_entry_pin_list *pin_list; + u64 seq; + + spin_lock(&j->lock); + fifo_for_each_entry_ptr(pin_list, &j->pin, seq) { + if (seq > seq_to_flush) + break; + + for (unsigned i = 0; i < JOURNAL_PIN_TYPE_NR; i++) + if ((BIT(i) & types) && + (!list_empty(&pin_list->unflushed[i]) || + !list_empty(&pin_list->flushed[i]))) { + spin_unlock(&j->lock); + return true; + } + } + spin_unlock(&j->lock); + + return false; +} + +static bool journal_flush_pins_or_still_flushing(struct journal *j, u64 seq_to_flush, + unsigned types) +{ + return journal_flush_pins(j, seq_to_flush, types, 0, 0, 0) || + journal_pins_still_flushing(j, seq_to_flush, types); +} + static int journal_flush_done(struct journal *j, u64 seq_to_flush, bool *did_work) { - int ret; + int ret = 0; ret = bch2_journal_error(j); if (ret) @@ -816,12 +873,13 @@ static int journal_flush_done(struct journal *j, u64 seq_to_flush, mutex_lock(&j->reclaim_lock); - if (journal_flush_pins(j, seq_to_flush, - (1U << JOURNAL_PIN_key_cache)| - (1U << JOURNAL_PIN_other), 0, 0, 0) || - journal_flush_pins(j, seq_to_flush, - (1U << JOURNAL_PIN_btree), 0, 0, 0)) - *did_work = true; + for (int type = JOURNAL_PIN_TYPE_NR - 1; + type >= 0; + --type) + if (journal_flush_pins_or_still_flushing(j, seq_to_flush, BIT(type))) { + *did_work = true; + goto unlock; + } if (seq_to_flush > journal_cur_seq(j)) bch2_journal_entry_close(j); @@ -836,6 +894,7 @@ static int journal_flush_done(struct journal *j, u64 seq_to_flush, !fifo_used(&j->pin); spin_unlock(&j->lock); +unlock: mutex_unlock(&j->reclaim_lock); return ret; @@ -849,7 +908,7 @@ bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush) if (!test_bit(JOURNAL_running, &j->flags)) return false; - closure_wait_event(&j->async_wait, + closure_wait_event(&j->reclaim_flush_wait, journal_flush_done(j, seq_to_flush, &did_work)); return did_work; @@ -915,3 +974,54 @@ err: return ret; } + +bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq) +{ + struct journal_entry_pin_list *pin_list; + struct journal_entry_pin *pin; + + spin_lock(&j->lock); + if (!test_bit(JOURNAL_running, &j->flags)) { + spin_unlock(&j->lock); + return true; + } + + *seq = max(*seq, j->pin.front); + + if (*seq >= j->pin.back) { + spin_unlock(&j->lock); + return true; + } + + out->atomic++; + + pin_list = journal_seq_pin(j, *seq); + + prt_printf(out, "%llu: count %u\n", *seq, atomic_read(&pin_list->count)); + printbuf_indent_add(out, 2); + + prt_printf(out, "unflushed:\n"); + for (unsigned i = 0; i < ARRAY_SIZE(pin_list->unflushed); i++) + list_for_each_entry(pin, &pin_list->unflushed[i], list) + prt_printf(out, "\t%px %ps\n", pin, pin->flush); + + prt_printf(out, "flushed:\n"); + for (unsigned i = 0; i < ARRAY_SIZE(pin_list->flushed); i++) + list_for_each_entry(pin, &pin_list->flushed[i], list) + prt_printf(out, "\t%px %ps\n", pin, pin->flush); + + printbuf_indent_sub(out, 2); + + --out->atomic; + spin_unlock(&j->lock); + + return false; +} + +void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j) +{ + u64 seq = 0; + + while (!bch2_journal_seq_pins_to_text(out, j, &seq)) + seq++; +} diff --git a/fs/bcachefs/journal_reclaim.h b/fs/bcachefs/journal_reclaim.h index ec84c3345281..0a73d7134e1c 100644 --- a/fs/bcachefs/journal_reclaim.h +++ b/fs/bcachefs/journal_reclaim.h @@ -78,4 +78,7 @@ static inline bool bch2_journal_flush_all_pins(struct journal *j) int bch2_journal_flush_device_pins(struct journal *, int); +void bch2_journal_pins_to_text(struct printbuf *, struct journal *); +bool bch2_journal_seq_pins_to_text(struct printbuf *, struct journal *, u64 *); + #endif /* _BCACHEFS_JOURNAL_RECLAIM_H */ diff --git a/fs/bcachefs/journal_types.h b/fs/bcachefs/journal_types.h index 19183fcf7ad7..1ef3a28ed6ab 100644 --- a/fs/bcachefs/journal_types.h +++ b/fs/bcachefs/journal_types.h @@ -9,6 +9,9 @@ #include "super_types.h" #include "fifo.h" +/* btree write buffer steals 8 bits for its own purposes: */ +#define JOURNAL_SEQ_MAX ((1ULL << 56) - 1) + #define JOURNAL_BUF_BITS 2 #define JOURNAL_BUF_NR (1U << JOURNAL_BUF_BITS) #define JOURNAL_BUF_MASK (JOURNAL_BUF_NR - 1) @@ -50,15 +53,18 @@ struct journal_buf { */ enum journal_pin_type { - JOURNAL_PIN_btree, - JOURNAL_PIN_key_cache, - JOURNAL_PIN_other, - JOURNAL_PIN_NR, + JOURNAL_PIN_TYPE_btree3, + JOURNAL_PIN_TYPE_btree2, + JOURNAL_PIN_TYPE_btree1, + JOURNAL_PIN_TYPE_btree0, + JOURNAL_PIN_TYPE_key_cache, + JOURNAL_PIN_TYPE_other, + JOURNAL_PIN_TYPE_NR, }; struct journal_entry_pin_list { - struct list_head list[JOURNAL_PIN_NR]; - struct list_head flushed; + struct list_head unflushed[JOURNAL_PIN_TYPE_NR]; + struct list_head flushed[JOURNAL_PIN_TYPE_NR]; atomic_t count; struct bch_devs_list devs; }; @@ -112,6 +118,7 @@ union journal_res_state { */ #define JOURNAL_ENTRY_OFFSET_MAX ((1U << 20) - 1) +#define JOURNAL_ENTRY_BLOCKED_VAL (JOURNAL_ENTRY_OFFSET_MAX - 2) #define JOURNAL_ENTRY_CLOSED_VAL (JOURNAL_ENTRY_OFFSET_MAX - 1) #define JOURNAL_ENTRY_ERROR_VAL (JOURNAL_ENTRY_OFFSET_MAX) @@ -193,6 +200,7 @@ struct journal { * insufficient devices: */ enum journal_errors cur_entry_error; + unsigned cur_entry_offset_if_blocked; unsigned buf_size_want; /* @@ -221,6 +229,7 @@ struct journal { /* Used when waiting because the journal was full */ wait_queue_head_t wait; struct closure_waitlist async_wait; + struct closure_waitlist reclaim_flush_wait; struct delayed_work write_work; struct workqueue_struct *wq; @@ -231,6 +240,7 @@ struct journal { /* seq, last_seq from the most recent journal entry successfully written */ u64 seq_ondisk; u64 flushed_seq_ondisk; + u64 flushing_seq; u64 last_seq_ondisk; u64 err_seq; u64 last_empty_seq; diff --git a/fs/bcachefs/logged_ops.c b/fs/bcachefs/logged_ops.c index 60e00702d1a4..75f27ec26f85 100644 --- a/fs/bcachefs/logged_ops.c +++ b/fs/bcachefs/logged_ops.c @@ -63,8 +63,10 @@ fsck_err: int bch2_resume_logged_ops(struct bch_fs *c) { int ret = bch2_trans_run(c, - for_each_btree_key(trans, iter, - BTREE_ID_logged_ops, POS_MIN, + for_each_btree_key_max(trans, iter, + BTREE_ID_logged_ops, + POS(LOGGED_OPS_INUM_logged_ops, 0), + POS(LOGGED_OPS_INUM_logged_ops, U64_MAX), BTREE_ITER_prefetch, k, resume_logged_op(trans, &iter, k))); bch_err_fn(c, ret); @@ -74,9 +76,8 @@ int bch2_resume_logged_ops(struct bch_fs *c) static int __bch2_logged_op_start(struct btree_trans *trans, struct bkey_i *k) { struct btree_iter iter; - int ret; - - ret = bch2_bkey_get_empty_slot(trans, &iter, BTREE_ID_logged_ops, POS_MAX); + int ret = bch2_bkey_get_empty_slot(trans, &iter, + BTREE_ID_logged_ops, POS(LOGGED_OPS_INUM_logged_ops, U64_MAX)); if (ret) return ret; diff --git a/fs/bcachefs/logged_ops_format.h b/fs/bcachefs/logged_ops_format.h index 6a4bf7129dba..cfb67c95d4c8 100644 --- a/fs/bcachefs/logged_ops_format.h +++ b/fs/bcachefs/logged_ops_format.h @@ -2,6 +2,11 @@ #ifndef _BCACHEFS_LOGGED_OPS_FORMAT_H #define _BCACHEFS_LOGGED_OPS_FORMAT_H +enum logged_ops_inums { + LOGGED_OPS_INUM_logged_ops, + LOGGED_OPS_INUM_inode_cursors, +}; + struct bch_logged_op_truncate { struct bch_val v; __le32 subvol; diff --git a/fs/bcachefs/lru.c b/fs/bcachefs/lru.c index 10857eccdeaf..ce794d55818f 100644 --- a/fs/bcachefs/lru.c +++ b/fs/bcachefs/lru.c @@ -12,7 +12,7 @@ /* KEY_TYPE_lru is obsolete: */ int bch2_lru_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { int ret = 0; @@ -192,7 +192,7 @@ int bch2_check_lrus(struct bch_fs *c) int ret = bch2_trans_run(c, for_each_btree_key_commit(trans, iter, BTREE_ID_lru, POS_MIN, BTREE_ITER_prefetch, k, - NULL, NULL, BCH_TRANS_COMMIT_no_enospc|BCH_TRANS_COMMIT_lazy_rw, + NULL, NULL, BCH_TRANS_COMMIT_no_enospc, bch2_check_lru_key(trans, &iter, k, &last_flushed))); bch2_bkey_buf_exit(&last_flushed, c); diff --git a/fs/bcachefs/lru.h b/fs/bcachefs/lru.h index e6a7d8241bb8..f31a6cf1514c 100644 --- a/fs/bcachefs/lru.h +++ b/fs/bcachefs/lru.h @@ -33,7 +33,7 @@ static inline enum bch_lru_type lru_type(struct bkey_s_c l) return BCH_LRU_read; } -int bch2_lru_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_lru_validate(struct bch_fs *, struct bkey_s_c, struct bkey_validate_context); void bch2_lru_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); void bch2_lru_pos_to_text(struct printbuf *, struct bpos); diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c index 0ef4a86850bb..160b4374160a 100644 --- a/fs/bcachefs/move.c +++ b/fs/bcachefs/move.c @@ -21,6 +21,8 @@ #include "journal_reclaim.h" #include "keylist.h" #include "move.h" +#include "rebalance.h" +#include "reflink.h" #include "replicas.h" #include "snapshot.h" #include "super-io.h" @@ -196,6 +198,13 @@ void bch2_moving_ctxt_exit(struct moving_context *ctxt) list_del(&ctxt->list); mutex_unlock(&c->moving_context_lock); + /* + * Generally, releasing a transaction within a transaction restart means + * an unhandled transaction restart: but this can happen legitimately + * within the move code, e.g. when bch2_move_ratelimit() tells us to + * exit before we've retried + */ + bch2_trans_begin(ctxt->trans); bch2_trans_put(ctxt->trans); memset(ctxt, 0, sizeof(*ctxt)); } @@ -292,8 +301,8 @@ int bch2_move_extent(struct moving_context *ctxt, io->write_sectors = k.k->size; bio_init(&io->write.op.wbio.bio, NULL, io->bi_inline_vecs, pages, 0); - bio_set_prio(&io->write.op.wbio.bio, - IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); + io->write.op.wbio.bio.bi_ioprio = + IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0); if (bch2_bio_alloc_pages(&io->write.op.wbio.bio, sectors << 9, GFP_KERNEL)) @@ -303,7 +312,7 @@ int bch2_move_extent(struct moving_context *ctxt, io->rbio.opts = io_opts; bio_init(&io->rbio.bio, NULL, io->bi_inline_vecs, pages, 0); io->rbio.bio.bi_vcnt = pages; - bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); + io->rbio.bio.bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0); io->rbio.bio.bi_iter.bi_size = sectors << 9; io->rbio.bio.bi_opf = REQ_OP_READ; @@ -379,34 +388,42 @@ err: return ret; } -struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans, +static struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans, struct per_snapshot_io_opts *io_opts, + struct bpos extent_pos, /* extent_iter, extent_k may be in reflink btree */ + struct btree_iter *extent_iter, struct bkey_s_c extent_k) { struct bch_fs *c = trans->c; u32 restart_count = trans->restart_count; + struct bch_io_opts *opts_ret = &io_opts->fs_io_opts; int ret = 0; - if (io_opts->cur_inum != extent_k.k->p.inode) { + if (extent_k.k->type == KEY_TYPE_reflink_v) + goto out; + + if (io_opts->cur_inum != extent_pos.inode) { io_opts->d.nr = 0; - ret = for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, extent_k.k->p.inode), + ret = for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, extent_pos.inode), BTREE_ITER_all_snapshots, k, ({ - if (k.k->p.offset != extent_k.k->p.inode) + if (k.k->p.offset != extent_pos.inode) break; if (!bkey_is_inode(k.k)) continue; struct bch_inode_unpacked inode; - BUG_ON(bch2_inode_unpack(k, &inode)); + _ret3 = bch2_inode_unpack(k, &inode); + if (_ret3) + break; struct snapshot_io_opts_entry e = { .snapshot = k.k->p.snapshot }; bch2_inode_opts_get(&e.io_opts, trans->c, &inode); darray_push(&io_opts->d, e); })); - io_opts->cur_inum = extent_k.k->p.inode; + io_opts->cur_inum = extent_pos.inode; } ret = ret ?: trans_was_restarted(trans, restart_count); @@ -415,43 +432,46 @@ struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans, if (extent_k.k->p.snapshot) darray_for_each(io_opts->d, i) - if (bch2_snapshot_is_ancestor(c, extent_k.k->p.snapshot, i->snapshot)) - return &i->io_opts; - - return &io_opts->fs_io_opts; + if (bch2_snapshot_is_ancestor(c, extent_k.k->p.snapshot, i->snapshot)) { + opts_ret = &i->io_opts; + break; + } +out: + ret = bch2_get_update_rebalance_opts(trans, opts_ret, extent_iter, extent_k); + if (ret) + return ERR_PTR(ret); + return opts_ret; } int bch2_move_get_io_opts_one(struct btree_trans *trans, struct bch_io_opts *io_opts, + struct btree_iter *extent_iter, struct bkey_s_c extent_k) { - struct btree_iter iter; - struct bkey_s_c k; - int ret; + struct bch_fs *c = trans->c; + + *io_opts = bch2_opts_to_inode_opts(c->opts); /* reflink btree? */ - if (!extent_k.k->p.inode) { - *io_opts = bch2_opts_to_inode_opts(trans->c->opts); - return 0; - } + if (!extent_k.k->p.inode) + goto out; - k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, + struct btree_iter inode_iter; + struct bkey_s_c inode_k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, SPOS(0, extent_k.k->p.inode, extent_k.k->p.snapshot), BTREE_ITER_cached); - ret = bkey_err(k); + int ret = bkey_err(inode_k); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) return ret; - if (!ret && bkey_is_inode(k.k)) { + if (!ret && bkey_is_inode(inode_k.k)) { struct bch_inode_unpacked inode; - bch2_inode_unpack(k, &inode); - bch2_inode_opts_get(io_opts, trans->c, &inode); - } else { - *io_opts = bch2_opts_to_inode_opts(trans->c->opts); + bch2_inode_unpack(inode_k, &inode); + bch2_inode_opts_get(io_opts, c, &inode); } - - bch2_trans_iter_exit(trans, &iter); - return 0; + bch2_trans_iter_exit(trans, &inode_iter); +out: + return bch2_get_update_rebalance_opts(trans, io_opts, extent_iter, extent_k); } int bch2_move_ratelimit(struct moving_context *ctxt) @@ -509,9 +529,15 @@ static int bch2_move_data_btree(struct moving_context *ctxt, struct per_snapshot_io_opts snapshot_io_opts; struct bch_io_opts *io_opts; struct bkey_buf sk; - struct btree_iter iter; + struct btree_iter iter, reflink_iter = {}; struct bkey_s_c k; struct data_update_opts data_opts; + /* + * If we're moving a single file, also process reflinked data it points + * to (this includes propagating changed io_opts from the inode to the + * extent): + */ + bool walk_indirect = start.inode == end.inode; int ret = 0, ret2; per_snapshot_io_opts_init(&snapshot_io_opts, c); @@ -531,6 +557,8 @@ static int bch2_move_data_btree(struct moving_context *ctxt, bch2_ratelimit_reset(ctxt->rate); while (!bch2_move_ratelimit(ctxt)) { + struct btree_iter *extent_iter = &iter; + bch2_trans_begin(trans); k = bch2_btree_iter_peek(&iter); @@ -549,10 +577,36 @@ static int bch2_move_data_btree(struct moving_context *ctxt, if (ctxt->stats) ctxt->stats->pos = BBPOS(iter.btree_id, iter.pos); + if (walk_indirect && + k.k->type == KEY_TYPE_reflink_p && + REFLINK_P_MAY_UPDATE_OPTIONS(bkey_s_c_to_reflink_p(k).v)) { + struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k); + s64 offset_into_extent = iter.pos.offset - bkey_start_offset(k.k); + + bch2_trans_iter_exit(trans, &reflink_iter); + k = bch2_lookup_indirect_extent(trans, &reflink_iter, &offset_into_extent, p, true, 0); + ret = bkey_err(k); + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + continue; + if (ret) + break; + + if (bkey_deleted(k.k)) + goto next_nondata; + + /* + * XXX: reflink pointers may point to multiple indirect + * extents, so don't advance past the entire reflink + * pointer - need to fixup iter->k + */ + extent_iter = &reflink_iter; + } + if (!bkey_extent_is_direct_data(k.k)) goto next_nondata; - io_opts = bch2_move_get_io_opts(trans, &snapshot_io_opts, k); + io_opts = bch2_move_get_io_opts(trans, &snapshot_io_opts, + iter.pos, extent_iter, k); ret = PTR_ERR_OR_ZERO(io_opts); if (ret) continue; @@ -568,7 +622,7 @@ static int bch2_move_data_btree(struct moving_context *ctxt, bch2_bkey_buf_reassemble(&sk, c, k); k = bkey_i_to_s_c(sk.k); - ret2 = bch2_move_extent(ctxt, NULL, &iter, k, *io_opts, data_opts); + ret2 = bch2_move_extent(ctxt, NULL, extent_iter, k, *io_opts, data_opts); if (ret2) { if (bch2_err_matches(ret2, BCH_ERR_transaction_restart)) continue; @@ -589,6 +643,7 @@ next_nondata: bch2_btree_iter_advance(&iter); } + bch2_trans_iter_exit(trans, &reflink_iter); bch2_trans_iter_exit(trans, &iter); bch2_bkey_buf_exit(&sk, c); per_snapshot_io_opts_exit(&snapshot_io_opts); @@ -654,16 +709,12 @@ int bch2_evacuate_bucket(struct moving_context *ctxt, struct bch_fs *c = trans->c; bool is_kthread = current->flags & PF_KTHREAD; struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); - struct btree_iter iter; + struct btree_iter iter = {}, bp_iter = {}; struct bkey_buf sk; - struct bch_backpointer bp; - struct bch_alloc_v4 a_convert; - const struct bch_alloc_v4 *a; struct bkey_s_c k; struct data_update_opts data_opts; - unsigned dirty_sectors, bucket_size; - u64 fragmentation; - struct bpos bp_pos = POS_MIN; + unsigned sectors_moved = 0; + struct bkey_buf last_flushed; int ret = 0; struct bch_dev *ca = bch2_dev_tryget(c, bucket.inode); @@ -672,6 +723,8 @@ int bch2_evacuate_bucket(struct moving_context *ctxt, trace_bucket_evacuate(c, &bucket); + bch2_bkey_buf_init(&last_flushed); + bkey_init(&last_flushed.k->k); bch2_bkey_buf_init(&sk); /* @@ -679,21 +732,13 @@ int bch2_evacuate_bucket(struct moving_context *ctxt, */ bch2_trans_begin(trans); - bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, - bucket, BTREE_ITER_cached); - ret = lockrestart_do(trans, - bkey_err(k = bch2_btree_iter_peek_slot(&iter))); - bch2_trans_iter_exit(trans, &iter); + bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers, + bucket_pos_to_bp_start(ca, bucket), 0); bch_err_msg(c, ret, "looking up alloc key"); if (ret) goto err; - a = bch2_alloc_to_v4(k, &a_convert); - dirty_sectors = bch2_bucket_sectors_dirty(*a); - bucket_size = ca->mi.bucket_size; - fragmentation = alloc_lru_idx_fragmentation(*a, ca); - ret = bch2_btree_write_buffer_tryflush(trans); bch_err_msg(c, ret, "flushing btree write buffer"); if (ret) @@ -705,18 +750,23 @@ int bch2_evacuate_bucket(struct moving_context *ctxt, bch2_trans_begin(trans); - ret = bch2_get_next_backpointer(trans, ca, bucket, gen, - &bp_pos, &bp, - BTREE_ITER_cached); + k = bch2_btree_iter_peek(&bp_iter); + ret = bkey_err(k); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; if (ret) goto err; - if (bkey_eq(bp_pos, POS_MAX)) + + if (!k.k || bkey_gt(k.k->p, bucket_pos_to_bp_end(ca, bucket))) break; - if (!bp.level) { - k = bch2_backpointer_get_key(trans, &iter, bp_pos, bp, 0); + if (k.k->type != KEY_TYPE_backpointer) + goto next; + + struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k); + + if (!bp.v->level) { + k = bch2_backpointer_get_key(trans, bp, &iter, 0, &last_flushed); ret = bkey_err(k); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; @@ -728,7 +778,7 @@ int bch2_evacuate_bucket(struct moving_context *ctxt, bch2_bkey_buf_reassemble(&sk, c, k); k = bkey_i_to_s_c(sk.k); - ret = bch2_move_get_io_opts_one(trans, &io_opts, k); + ret = bch2_move_get_io_opts_one(trans, &io_opts, &iter, k); if (ret) { bch2_trans_iter_exit(trans, &iter); continue; @@ -738,14 +788,18 @@ int bch2_evacuate_bucket(struct moving_context *ctxt, data_opts.target = io_opts.background_target; data_opts.rewrite_ptrs = 0; + unsigned sectors = bp.v->bucket_len; /* move_extent will drop locks */ unsigned i = 0; - bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) { - if (ptr->dev == bucket.inode) { - data_opts.rewrite_ptrs |= 1U << i; - if (ptr->cached) { + const union bch_extent_entry *entry; + struct extent_ptr_decoded p; + bkey_for_each_ptr_decode(k.k, bch2_bkey_ptrs_c(k), p, entry) { + if (p.ptr.dev == bucket.inode) { + if (p.ptr.cached) { bch2_trans_iter_exit(trans, &iter); goto next; } + data_opts.rewrite_ptrs |= 1U << i; + break; } i++; } @@ -765,14 +819,15 @@ int bch2_evacuate_bucket(struct moving_context *ctxt, goto err; if (ctxt->stats) - atomic64_add(k.k->size, &ctxt->stats->sectors_seen); + atomic64_add(sectors, &ctxt->stats->sectors_seen); + sectors_moved += sectors; } else { struct btree *b; - b = bch2_backpointer_get_node(trans, &iter, bp_pos, bp); + b = bch2_backpointer_get_node(trans, bp, &iter, &last_flushed); ret = PTR_ERR_OR_ZERO(b); if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node) - continue; + goto next; if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) continue; if (ret) @@ -796,15 +851,18 @@ int bch2_evacuate_bucket(struct moving_context *ctxt, atomic64_add(sectors, &ctxt->stats->sectors_seen); atomic64_add(sectors, &ctxt->stats->sectors_moved); } + sectors_moved += btree_sectors(c); } next: - bp_pos = bpos_nosnap_successor(bp_pos); + bch2_btree_iter_advance(&bp_iter); } - trace_evacuate_bucket(c, &bucket, dirty_sectors, bucket_size, fragmentation, ret); + trace_evacuate_bucket(c, &bucket, sectors_moved, ca->mi.bucket_size, ret); err: + bch2_trans_iter_exit(trans, &bp_iter); bch2_dev_put(ca); bch2_bkey_buf_exit(&sk, c); + bch2_bkey_buf_exit(&last_flushed, c); return ret; } diff --git a/fs/bcachefs/move.h b/fs/bcachefs/move.h index 9baf3093a678..51e0505a8156 100644 --- a/fs/bcachefs/move.h +++ b/fs/bcachefs/move.h @@ -110,9 +110,8 @@ static inline void per_snapshot_io_opts_exit(struct per_snapshot_io_opts *io_opt darray_exit(&io_opts->d); } -struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *, - struct per_snapshot_io_opts *, struct bkey_s_c); -int bch2_move_get_io_opts_one(struct btree_trans *, struct bch_io_opts *, struct bkey_s_c); +int bch2_move_get_io_opts_one(struct btree_trans *, struct bch_io_opts *, + struct btree_iter *, struct bkey_s_c); int bch2_scan_old_btree_nodes(struct bch_fs *, struct bch_move_stats *); diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c index d658be90f737..21805509ab9e 100644 --- a/fs/bcachefs/movinggc.c +++ b/fs/bcachefs/movinggc.c @@ -167,7 +167,7 @@ static int bch2_copygc_get_buckets(struct moving_context *ctxt, bch2_trans_begin(trans); - ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru, + ret = for_each_btree_key_max(trans, iter, BTREE_ID_lru, lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0), lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX), 0, k, ({ @@ -215,7 +215,8 @@ static int bch2_copygc(struct moving_context *ctxt, }; move_buckets buckets = { 0 }; struct move_bucket_in_flight *f; - u64 moved = atomic64_read(&ctxt->stats->sectors_moved); + u64 sectors_seen = atomic64_read(&ctxt->stats->sectors_seen); + u64 sectors_moved = atomic64_read(&ctxt->stats->sectors_moved); int ret = 0; ret = bch2_copygc_get_buckets(ctxt, buckets_in_flight, &buckets); @@ -245,7 +246,6 @@ static int bch2_copygc(struct moving_context *ctxt, *did_work = true; } err: - darray_exit(&buckets); /* no entries in LRU btree found, or got to end: */ if (bch2_err_matches(ret, ENOENT)) @@ -254,8 +254,11 @@ err: if (ret < 0 && !bch2_err_matches(ret, EROFS)) bch_err_msg(c, ret, "from bch2_move_data()"); - moved = atomic64_read(&ctxt->stats->sectors_moved) - moved; - trace_and_count(c, copygc, c, moved, 0, 0, 0); + sectors_seen = atomic64_read(&ctxt->stats->sectors_seen) - sectors_seen; + sectors_moved = atomic64_read(&ctxt->stats->sectors_moved) - sectors_moved; + trace_and_count(c, copygc, c, buckets.nr, sectors_seen, sectors_moved); + + darray_exit(&buckets); return ret; } @@ -350,9 +353,9 @@ static int bch2_copygc_thread(void *arg) bch2_trans_unlock_long(ctxt.trans); cond_resched(); - if (!c->copy_gc_enabled) { + if (!c->opts.copygc_enabled) { move_buckets_wait(&ctxt, buckets, true); - kthread_wait_freezable(c->copy_gc_enabled || + kthread_wait_freezable(c->opts.copygc_enabled || kthread_should_stop()); } diff --git a/fs/bcachefs/opts.c b/fs/bcachefs/opts.c index 0e2ee262fbd4..6772faf385a5 100644 --- a/fs/bcachefs/opts.c +++ b/fs/bcachefs/opts.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> +#include <linux/fs_parser.h> #include "bcachefs.h" #include "compress.h" @@ -48,12 +49,12 @@ static const char * const __bch2_csum_types[] = { NULL }; -const char * const bch2_csum_opts[] = { +const char * const __bch2_csum_opts[] = { BCH_CSUM_OPTS() NULL }; -static const char * const __bch2_compression_types[] = { +const char * const __bch2_compression_types[] = { BCH_COMPRESSION_TYPES() NULL }; @@ -113,6 +114,7 @@ void bch2_prt_##name(struct printbuf *out, type t) \ PRT_STR_OPT_BOUNDSCHECKED(jset_entry_type, enum bch_jset_entry_type); PRT_STR_OPT_BOUNDSCHECKED(fs_usage_type, enum bch_fs_usage_type); PRT_STR_OPT_BOUNDSCHECKED(data_type, enum bch_data_type); +PRT_STR_OPT_BOUNDSCHECKED(csum_opt, enum bch_csum_opt); PRT_STR_OPT_BOUNDSCHECKED(csum_type, enum bch_csum_type); PRT_STR_OPT_BOUNDSCHECKED(compression_type, enum bch_compression_type); PRT_STR_OPT_BOUNDSCHECKED(str_hash_type, enum bch_str_hash_type); @@ -333,17 +335,18 @@ int bch2_opt_parse(struct bch_fs *c, switch (opt->type) { case BCH_OPT_BOOL: if (val) { - ret = kstrtou64(val, 10, res); + ret = lookup_constant(bool_names, val, -BCH_ERR_option_not_bool); + if (ret != -BCH_ERR_option_not_bool) { + *res = ret; + } else { + if (err) + prt_printf(err, "%s: must be bool", opt->attr.name); + return ret; + } } else { - ret = 0; *res = 1; } - if (ret < 0 || (*res != 0 && *res != 1)) { - if (err) - prt_printf(err, "%s: must be bool", opt->attr.name); - return ret < 0 ? ret : -BCH_ERR_option_not_bool; - } break; case BCH_OPT_UINT: if (!val) { @@ -710,11 +713,14 @@ void bch2_opt_set_sb(struct bch_fs *c, struct bch_dev *ca, struct bch_io_opts bch2_opts_to_inode_opts(struct bch_opts src) { - return (struct bch_io_opts) { + struct bch_io_opts opts = { #define x(_name, _bits) ._name = src._name, BCH_INODE_OPTS() #undef x }; + + bch2_io_opts_fixups(&opts); + return opts; } bool bch2_opt_is_inode_opt(enum bch_opt_id id) diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h index 23dda014e331..9d397fc2a1f0 100644 --- a/fs/bcachefs/opts.h +++ b/fs/bcachefs/opts.h @@ -16,7 +16,8 @@ extern const char * const bch2_version_upgrade_opts[]; extern const char * const bch2_sb_features[]; extern const char * const bch2_sb_compat[]; extern const char * const __bch2_btree_ids[]; -extern const char * const bch2_csum_opts[]; +extern const char * const __bch2_csum_opts[]; +extern const char * const __bch2_compression_types[]; extern const char * const bch2_compression_opts[]; extern const char * const __bch2_str_hash_types[]; extern const char * const bch2_str_hash_opts[]; @@ -27,6 +28,7 @@ extern const char * const bch2_d_types[]; void bch2_prt_jset_entry_type(struct printbuf *, enum bch_jset_entry_type); void bch2_prt_fs_usage_type(struct printbuf *, enum bch_fs_usage_type); void bch2_prt_data_type(struct printbuf *, enum bch_data_type); +void bch2_prt_csum_opt(struct printbuf *, enum bch_csum_opt); void bch2_prt_csum_type(struct printbuf *, enum bch_csum_type); void bch2_prt_compression_type(struct printbuf *, enum bch_compression_type); void bch2_prt_str_hash_type(struct printbuf *, enum bch_str_hash_type); @@ -171,12 +173,12 @@ enum fsck_err_opts { "size", "Maximum size of checksummed/compressed extents")\ x(metadata_checksum, u8, \ OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \ - OPT_STR(bch2_csum_opts), \ + OPT_STR(__bch2_csum_opts), \ BCH_SB_META_CSUM_TYPE, BCH_CSUM_OPT_crc32c, \ NULL, NULL) \ x(data_checksum, u8, \ OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \ - OPT_STR(bch2_csum_opts), \ + OPT_STR(__bch2_csum_opts), \ BCH_SB_DATA_CSUM_TYPE, BCH_CSUM_OPT_crc32c, \ NULL, NULL) \ x(compression, u8, \ @@ -220,14 +222,14 @@ enum fsck_err_opts { BCH_SB_ERASURE_CODE, false, \ NULL, "Enable erasure coding (DO NOT USE YET)") \ x(inodes_32bit, u8, \ - OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \ + OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \ OPT_BOOL(), \ BCH_SB_INODE_32BIT, true, \ NULL, "Constrain inode numbers to 32 bits") \ - x(shard_inode_numbers, u8, \ - OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \ - OPT_BOOL(), \ - BCH_SB_SHARD_INUMS, true, \ + x(shard_inode_numbers_bits, u8, \ + OPT_FS|OPT_FORMAT, \ + OPT_UINT(0, 8), \ + BCH_SB_SHARD_INUMS_NBITS, 0, \ NULL, "Shard new inode numbers by CPU id") \ x(inodes_use_key_cache, u8, \ OPT_FS|OPT_FORMAT|OPT_MOUNT, \ @@ -473,6 +475,18 @@ enum fsck_err_opts { BCH2_NO_SB_OPT, true, \ NULL, "Enable nocow mode: enables runtime locking in\n"\ "data move path needed if nocow will ever be in use\n")\ + x(copygc_enabled, u8, \ + OPT_FS|OPT_MOUNT|OPT_RUNTIME, \ + OPT_BOOL(), \ + BCH2_NO_SB_OPT, true, \ + NULL, "Enable copygc: disable for debugging, or to\n"\ + "quiet the system when doing performance testing\n")\ + x(rebalance_enabled, u8, \ + OPT_FS|OPT_MOUNT|OPT_RUNTIME, \ + OPT_BOOL(), \ + BCH2_NO_SB_OPT, true, \ + NULL, "Enable rebalance: disable for debugging, or to\n"\ + "quiet the system when doing performance testing\n")\ x(no_data_io, u8, \ OPT_MOUNT, \ OPT_BOOL(), \ @@ -488,7 +502,7 @@ enum fsck_err_opts { OPT_DEVICE, \ OPT_UINT(0, S64_MAX), \ BCH2_NO_SB_OPT, 0, \ - "size", "Size of filesystem on device") \ + "size", "Specifies the bucket size; must be greater than the btree node size")\ x(durability, u8, \ OPT_DEVICE|OPT_SB_FIELD_ONE_BIAS, \ OPT_UINT(0, BCH_REPLICAS_MAX), \ @@ -624,11 +638,22 @@ struct bch_io_opts { #define x(_name, _bits) u##_bits _name; BCH_INODE_OPTS() #undef x +#define x(_name, _bits) u64 _name##_from_inode:1; + BCH_INODE_OPTS() +#undef x }; -static inline unsigned background_compression(struct bch_io_opts opts) +static inline void bch2_io_opts_fixups(struct bch_io_opts *opts) { - return opts.background_compression ?: opts.compression; + if (!opts->background_target) + opts->background_target = opts->foreground_target; + if (!opts->background_compression) + opts->background_compression = opts->compression; + if (opts->nocow) { + opts->compression = opts->background_compression = 0; + opts->data_checksum = 0; + opts->erasure_code = 0; + } } struct bch_io_opts bch2_opts_to_inode_opts(struct bch_opts); diff --git a/fs/bcachefs/printbuf.h b/fs/bcachefs/printbuf.h index 1d570387b77f..d0dd398baa2b 100644 --- a/fs/bcachefs/printbuf.h +++ b/fs/bcachefs/printbuf.h @@ -251,16 +251,23 @@ static inline void prt_hex_byte_upper(struct printbuf *out, u8 byte) printbuf_nul_terminate_reserved(out); } +static inline void printbuf_reset_keep_tabstops(struct printbuf *buf) +{ + buf->pos = 0; + buf->allocation_failure = 0; + buf->last_newline = 0; + buf->last_field = 0; + buf->indent = 0; + buf->cur_tabstop = 0; +} + /** * printbuf_reset - re-use a printbuf without freeing and re-initializing it: */ static inline void printbuf_reset(struct printbuf *buf) { - buf->pos = 0; - buf->allocation_failure = 0; - buf->indent = 0; + printbuf_reset_keep_tabstops(buf); buf->nr_tabstops = 0; - buf->cur_tabstop = 0; } /** diff --git a/fs/bcachefs/quota.c b/fs/bcachefs/quota.c index 74f45a8162ad..8b857fc33244 100644 --- a/fs/bcachefs/quota.c +++ b/fs/bcachefs/quota.c @@ -60,7 +60,7 @@ const struct bch_sb_field_ops bch_sb_field_ops_quota = { }; int bch2_quota_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { int ret = 0; diff --git a/fs/bcachefs/quota.h b/fs/bcachefs/quota.h index a62abcc5332a..1551800ff44c 100644 --- a/fs/bcachefs/quota.h +++ b/fs/bcachefs/quota.h @@ -5,10 +5,10 @@ #include "inode.h" #include "quota_types.h" -enum bch_validate_flags; extern const struct bch_sb_field_ops bch_sb_field_ops_quota; -int bch2_quota_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_quota_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); void bch2_quota_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); #define bch2_bkey_ops_quota ((struct bkey_ops) { \ diff --git a/fs/bcachefs/rcu_pending.c b/fs/bcachefs/rcu_pending.c index 40a20192eee8..bef2aa1b8bcd 100644 --- a/fs/bcachefs/rcu_pending.c +++ b/fs/bcachefs/rcu_pending.c @@ -25,21 +25,37 @@ enum rcu_pending_special { #define RCU_PENDING_KVFREE_FN ((rcu_pending_process_fn) (ulong) RCU_PENDING_KVFREE) #define RCU_PENDING_CALL_RCU_FN ((rcu_pending_process_fn) (ulong) RCU_PENDING_CALL_RCU) -static inline unsigned long __get_state_synchronize_rcu(struct srcu_struct *ssp) +#ifdef __KERNEL__ +typedef unsigned long rcu_gp_poll_state_t; + +static inline bool rcu_gp_poll_cookie_eq(rcu_gp_poll_state_t l, rcu_gp_poll_state_t r) +{ + return l == r; +} +#else +typedef struct urcu_gp_poll_state rcu_gp_poll_state_t; + +static inline bool rcu_gp_poll_cookie_eq(rcu_gp_poll_state_t l, rcu_gp_poll_state_t r) +{ + return l.grace_period_id == r.grace_period_id; +} +#endif + +static inline rcu_gp_poll_state_t __get_state_synchronize_rcu(struct srcu_struct *ssp) { return ssp ? get_state_synchronize_srcu(ssp) : get_state_synchronize_rcu(); } -static inline unsigned long __start_poll_synchronize_rcu(struct srcu_struct *ssp) +static inline rcu_gp_poll_state_t __start_poll_synchronize_rcu(struct srcu_struct *ssp) { return ssp ? start_poll_synchronize_srcu(ssp) : start_poll_synchronize_rcu(); } -static inline bool __poll_state_synchronize_rcu(struct srcu_struct *ssp, unsigned long cookie) +static inline bool __poll_state_synchronize_rcu(struct srcu_struct *ssp, rcu_gp_poll_state_t cookie) { return ssp ? poll_state_synchronize_srcu(ssp, cookie) @@ -71,13 +87,13 @@ struct rcu_pending_seq { GENRADIX(struct rcu_head *) objs; size_t nr; struct rcu_head **cursor; - unsigned long seq; + rcu_gp_poll_state_t seq; }; struct rcu_pending_list { struct rcu_head *head; struct rcu_head *tail; - unsigned long seq; + rcu_gp_poll_state_t seq; }; struct rcu_pending_pcpu { @@ -316,10 +332,10 @@ static void rcu_pending_rcu_cb(struct rcu_head *rcu) } static __always_inline struct rcu_pending_seq * -get_object_radix(struct rcu_pending_pcpu *p, unsigned long seq) +get_object_radix(struct rcu_pending_pcpu *p, rcu_gp_poll_state_t seq) { darray_for_each_reverse(p->objs, objs) - if (objs->seq == seq) + if (rcu_gp_poll_cookie_eq(objs->seq, seq)) return objs; if (darray_push_gfp(&p->objs, ((struct rcu_pending_seq) { .seq = seq }), GFP_ATOMIC)) @@ -329,7 +345,7 @@ get_object_radix(struct rcu_pending_pcpu *p, unsigned long seq) } static noinline bool -rcu_pending_enqueue_list(struct rcu_pending_pcpu *p, unsigned long seq, +rcu_pending_enqueue_list(struct rcu_pending_pcpu *p, rcu_gp_poll_state_t seq, struct rcu_head *head, void *ptr, unsigned long *flags) { @@ -364,7 +380,7 @@ rcu_pending_enqueue_list(struct rcu_pending_pcpu *p, unsigned long seq, again: for (struct rcu_pending_list *i = p->lists; i < p->lists + NUM_ACTIVE_RCU_POLL_OLDSTATE; i++) { - if (i->seq == seq) { + if (rcu_gp_poll_cookie_eq(i->seq, seq)) { rcu_pending_list_add(i, head); return false; } @@ -408,7 +424,7 @@ __rcu_pending_enqueue(struct rcu_pending *pending, struct rcu_head *head, struct rcu_pending_pcpu *p; struct rcu_pending_seq *objs; struct genradix_node *new_node = NULL; - unsigned long seq, flags; + unsigned long flags; bool start_gp = false; BUG_ON((ptr != NULL) != (pending->process == RCU_PENDING_KVFREE_FN)); @@ -416,7 +432,7 @@ __rcu_pending_enqueue(struct rcu_pending *pending, struct rcu_head *head, local_irq_save(flags); p = this_cpu_ptr(pending->p); spin_lock(&p->lock); - seq = __get_state_synchronize_rcu(pending->srcu); + rcu_gp_poll_state_t seq = __get_state_synchronize_rcu(pending->srcu); restart: if (may_sleep && unlikely(process_finished_items(pending, p, flags))) diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c index cd6647374353..d0a1f5cd5c2b 100644 --- a/fs/bcachefs/rebalance.c +++ b/fs/bcachefs/rebalance.c @@ -24,6 +24,190 @@ #include <linux/kthread.h> #include <linux/sched/cputime.h> +/* bch_extent_rebalance: */ + +static const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k) +{ + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + const union bch_extent_entry *entry; + + bkey_extent_entry_for_each(ptrs, entry) + if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance) + return &entry->rebalance; + + return NULL; +} + +static inline unsigned bch2_bkey_ptrs_need_compress(struct bch_fs *c, + struct bch_io_opts *opts, + struct bkey_s_c k, + struct bkey_ptrs_c ptrs) +{ + if (!opts->background_compression) + return 0; + + unsigned compression_type = bch2_compression_opt_to_type(opts->background_compression); + const union bch_extent_entry *entry; + struct extent_ptr_decoded p; + unsigned ptr_bit = 1; + unsigned rewrite_ptrs = 0; + + bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { + if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible || + p.ptr.unwritten) + return 0; + + if (!p.ptr.cached && p.crc.compression_type != compression_type) + rewrite_ptrs |= ptr_bit; + ptr_bit <<= 1; + } + + return rewrite_ptrs; +} + +static inline unsigned bch2_bkey_ptrs_need_move(struct bch_fs *c, + struct bch_io_opts *opts, + struct bkey_ptrs_c ptrs) +{ + if (!opts->background_target || + !bch2_target_accepts_data(c, BCH_DATA_user, opts->background_target)) + return 0; + + unsigned ptr_bit = 1; + unsigned rewrite_ptrs = 0; + + bkey_for_each_ptr(ptrs, ptr) { + if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, opts->background_target)) + rewrite_ptrs |= ptr_bit; + ptr_bit <<= 1; + } + + return rewrite_ptrs; +} + +static unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c, + struct bch_io_opts *opts, + struct bkey_s_c k) +{ + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + + return bch2_bkey_ptrs_need_compress(c, opts, k, ptrs) | + bch2_bkey_ptrs_need_move(c, opts, ptrs); +} + +u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k) +{ + const struct bch_extent_rebalance *opts = bch2_bkey_rebalance_opts(k); + if (!opts) + return 0; + + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + const union bch_extent_entry *entry; + struct extent_ptr_decoded p; + u64 sectors = 0; + + if (opts->background_compression) { + unsigned compression_type = bch2_compression_opt_to_type(opts->background_compression); + + bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { + if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible || + p.ptr.unwritten) { + sectors = 0; + goto incompressible; + } + + if (!p.ptr.cached && p.crc.compression_type != compression_type) + sectors += p.crc.compressed_size; + } + } +incompressible: + if (opts->background_target) + bkey_for_each_ptr_decode(k.k, ptrs, p, entry) + if (!p.ptr.cached && !bch2_dev_in_target(c, p.ptr.dev, opts->background_target)) + sectors += p.crc.compressed_size; + + return sectors; +} + +static bool bch2_bkey_rebalance_needs_update(struct bch_fs *c, struct bch_io_opts *opts, + struct bkey_s_c k) +{ + if (!bkey_extent_is_direct_data(k.k)) + return 0; + + const struct bch_extent_rebalance *old = bch2_bkey_rebalance_opts(k); + + if (k.k->type == KEY_TYPE_reflink_v || bch2_bkey_ptrs_need_rebalance(c, opts, k)) { + struct bch_extent_rebalance new = io_opts_to_rebalance_opts(c, opts); + return old == NULL || memcmp(old, &new, sizeof(new)); + } else { + return old != NULL; + } +} + +int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bch_io_opts *opts, + struct bkey_i *_k) +{ + if (!bkey_extent_is_direct_data(&_k->k)) + return 0; + + struct bkey_s k = bkey_i_to_s(_k); + struct bch_extent_rebalance *old = + (struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c); + + if (k.k->type == KEY_TYPE_reflink_v || bch2_bkey_ptrs_need_rebalance(c, opts, k.s_c)) { + if (!old) { + old = bkey_val_end(k); + k.k->u64s += sizeof(*old) / sizeof(u64); + } + + *old = io_opts_to_rebalance_opts(c, opts); + } else { + if (old) + extent_entry_drop(k, (union bch_extent_entry *) old); + } + + return 0; +} + +int bch2_get_update_rebalance_opts(struct btree_trans *trans, + struct bch_io_opts *io_opts, + struct btree_iter *iter, + struct bkey_s_c k) +{ + BUG_ON(iter->flags & BTREE_ITER_is_extents); + BUG_ON(iter->flags & BTREE_ITER_filter_snapshots); + + const struct bch_extent_rebalance *r = k.k->type == KEY_TYPE_reflink_v + ? bch2_bkey_rebalance_opts(k) : NULL; + if (r) { +#define x(_name) \ + if (r->_name##_from_inode) { \ + io_opts->_name = r->_name; \ + io_opts->_name##_from_inode = true; \ + } + BCH_REBALANCE_OPTS() +#undef x + } + + if (!bch2_bkey_rebalance_needs_update(trans->c, io_opts, k)) + return 0; + + struct bkey_i *n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + 8); + int ret = PTR_ERR_OR_ZERO(n); + if (ret) + return ret; + + bkey_reassemble(n, k); + + /* On successfull transaction commit, @k was invalidated: */ + + return bch2_bkey_set_needs_rebalance(trans->c, io_opts, n) ?: + bch2_trans_update(trans, iter, n, BTREE_UPDATE_internal_snapshot_node) ?: + bch2_trans_commit(trans, NULL, NULL, 0) ?: + -BCH_ERR_transaction_restart_nested; +} + #define REBALANCE_WORK_SCAN_OFFSET (U64_MAX - 1) static const char * const bch2_rebalance_state_strs[] = { @@ -33,7 +217,7 @@ static const char * const bch2_rebalance_state_strs[] = { #undef x }; -static int __bch2_set_rebalance_needs_scan(struct btree_trans *trans, u64 inum) +int bch2_set_rebalance_needs_scan_trans(struct btree_trans *trans, u64 inum) { struct btree_iter iter; struct bkey_s_c k; @@ -71,9 +255,8 @@ err: int bch2_set_rebalance_needs_scan(struct bch_fs *c, u64 inum) { int ret = bch2_trans_commit_do(c, NULL, NULL, - BCH_TRANS_COMMIT_no_enospc| - BCH_TRANS_COMMIT_lazy_rw, - __bch2_set_rebalance_needs_scan(trans, inum)); + BCH_TRANS_COMMIT_no_enospc, + bch2_set_rebalance_needs_scan_trans(trans, inum)); rebalance_wakeup(c); return ret; } @@ -121,6 +304,9 @@ static int bch2_bkey_clear_needs_rebalance(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k) { + if (!bch2_bkey_rebalance_opts(k)) + return 0; + struct bkey_i *n = bch2_bkey_make_mut(trans, iter, &k, 0); int ret = PTR_ERR_OR_ZERO(n); if (ret) @@ -134,31 +320,27 @@ static int bch2_bkey_clear_needs_rebalance(struct btree_trans *trans, static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans, struct bpos work_pos, struct btree_iter *extent_iter, + struct bch_io_opts *io_opts, struct data_update_opts *data_opts) { struct bch_fs *c = trans->c; - struct bkey_s_c k; bch2_trans_iter_exit(trans, extent_iter); bch2_trans_iter_init(trans, extent_iter, work_pos.inode ? BTREE_ID_extents : BTREE_ID_reflink, work_pos, BTREE_ITER_all_snapshots); - k = bch2_btree_iter_peek_slot(extent_iter); + struct bkey_s_c k = bch2_btree_iter_peek_slot(extent_iter); if (bkey_err(k)) return k; - const struct bch_extent_rebalance *r = k.k ? bch2_bkey_rebalance_opts(k) : NULL; - if (!r) { - /* raced due to btree write buffer, nothing to do */ - return bkey_s_c_null; - } + int ret = bch2_move_get_io_opts_one(trans, io_opts, extent_iter, k); + if (ret) + return bkey_s_c_err(ret); memset(data_opts, 0, sizeof(*data_opts)); - - data_opts->rewrite_ptrs = - bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression); - data_opts->target = r->target; + data_opts->rewrite_ptrs = bch2_bkey_ptrs_need_rebalance(c, io_opts, k); + data_opts->target = io_opts->background_target; data_opts->write_flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS; if (!data_opts->rewrite_ptrs) { @@ -178,12 +360,28 @@ static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans, if (trace_rebalance_extent_enabled()) { struct printbuf buf = PRINTBUF; - prt_str(&buf, "target="); - bch2_target_to_text(&buf, c, r->target); - prt_str(&buf, " compression="); - bch2_compression_opt_to_text(&buf, r->compression); - prt_str(&buf, " "); bch2_bkey_val_to_text(&buf, c, k); + prt_newline(&buf); + + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); + + unsigned p = bch2_bkey_ptrs_need_compress(c, io_opts, k, ptrs); + if (p) { + prt_str(&buf, "compression="); + bch2_compression_opt_to_text(&buf, io_opts->background_compression); + prt_str(&buf, " "); + bch2_prt_u64_base2(&buf, p); + prt_newline(&buf); + } + + p = bch2_bkey_ptrs_need_move(c, io_opts, ptrs); + if (p) { + prt_str(&buf, "move="); + bch2_target_to_text(&buf, c, io_opts->background_target); + prt_str(&buf, " "); + bch2_prt_u64_base2(&buf, p); + prt_newline(&buf); + } trace_rebalance_extent(c, buf.buf); printbuf_exit(&buf); @@ -212,14 +410,10 @@ static int do_rebalance_extent(struct moving_context *ctxt, bch2_bkey_buf_init(&sk); ret = bkey_err(k = next_rebalance_extent(trans, work_pos, - extent_iter, &data_opts)); + extent_iter, &io_opts, &data_opts)); if (ret || !k.k) goto out; - ret = bch2_move_get_io_opts_one(trans, &io_opts, k); - if (ret) - goto out; - atomic64_add(k.k->size, &ctxt->stats->sectors_seen); /* @@ -253,20 +447,8 @@ static bool rebalance_pred(struct bch_fs *c, void *arg, struct bch_io_opts *io_opts, struct data_update_opts *data_opts) { - unsigned target, compression; - - if (k.k->p.inode) { - target = io_opts->background_target; - compression = background_compression(*io_opts); - } else { - const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k); - - target = r ? r->target : io_opts->background_target; - compression = r ? r->compression : background_compression(*io_opts); - } - - data_opts->rewrite_ptrs = bch2_bkey_ptrs_need_rebalance(c, k, target, compression); - data_opts->target = target; + data_opts->rewrite_ptrs = bch2_bkey_ptrs_need_rebalance(c, io_opts, k); + data_opts->target = io_opts->background_target; data_opts->write_flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS; return data_opts->rewrite_ptrs != 0; } @@ -338,9 +520,9 @@ static int do_rebalance(struct moving_context *ctxt) BTREE_ITER_all_snapshots); while (!bch2_move_ratelimit(ctxt)) { - if (!r->enabled) { + if (!c->opts.rebalance_enabled) { bch2_moving_ctxt_flush_all(ctxt); - kthread_wait_freezable(r->enabled || + kthread_wait_freezable(c->opts.rebalance_enabled || kthread_should_stop()); } diff --git a/fs/bcachefs/rebalance.h b/fs/bcachefs/rebalance.h index 28a52638f16c..62a3859d3823 100644 --- a/fs/bcachefs/rebalance.h +++ b/fs/bcachefs/rebalance.h @@ -2,8 +2,38 @@ #ifndef _BCACHEFS_REBALANCE_H #define _BCACHEFS_REBALANCE_H +#include "compress.h" +#include "disk_groups.h" +#include "opts.h" #include "rebalance_types.h" +static inline struct bch_extent_rebalance io_opts_to_rebalance_opts(struct bch_fs *c, + struct bch_io_opts *opts) +{ + struct bch_extent_rebalance r = { + .type = BIT(BCH_EXTENT_ENTRY_rebalance), +#define x(_name) \ + ._name = opts->_name, \ + ._name##_from_inode = opts->_name##_from_inode, + BCH_REBALANCE_OPTS() +#undef x + }; + + if (r.background_target && + !bch2_target_accepts_data(c, BCH_DATA_user, r.background_target)) + r.background_target = 0; + + return r; +}; + +u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *, struct bkey_s_c); +int bch2_bkey_set_needs_rebalance(struct bch_fs *, struct bch_io_opts *, struct bkey_i *); +int bch2_get_update_rebalance_opts(struct btree_trans *, + struct bch_io_opts *, + struct btree_iter *, + struct bkey_s_c); + +int bch2_set_rebalance_needs_scan_trans(struct btree_trans *, u64); int bch2_set_rebalance_needs_scan(struct bch_fs *, u64 inum); int bch2_set_fs_needs_rebalance(struct bch_fs *); diff --git a/fs/bcachefs/rebalance_format.h b/fs/bcachefs/rebalance_format.h new file mode 100644 index 000000000000..ff9a1342a22b --- /dev/null +++ b/fs/bcachefs/rebalance_format.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BCACHEFS_REBALANCE_FORMAT_H +#define _BCACHEFS_REBALANCE_FORMAT_H + +struct bch_extent_rebalance { +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u64 type:6, + unused:3, + + promote_target_from_inode:1, + erasure_code_from_inode:1, + data_checksum_from_inode:1, + background_compression_from_inode:1, + data_replicas_from_inode:1, + background_target_from_inode:1, + + promote_target:16, + erasure_code:1, + data_checksum:4, + data_replicas:4, + background_compression:8, /* enum bch_compression_opt */ + background_target:16; +#elif defined (__BIG_ENDIAN_BITFIELD) + __u64 background_target:16, + background_compression:8, + data_replicas:4, + data_checksum:4, + erasure_code:1, + promote_target:16, + + background_target_from_inode:1, + data_replicas_from_inode:1, + background_compression_from_inode:1, + data_checksum_from_inode:1, + erasure_code_from_inode:1, + promote_target_from_inode:1, + + unused:3, + type:6; +#endif +}; + +/* subset of BCH_INODE_OPTS */ +#define BCH_REBALANCE_OPTS() \ + x(data_checksum) \ + x(background_compression) \ + x(data_replicas) \ + x(promote_target) \ + x(background_target) \ + x(erasure_code) + +#endif /* _BCACHEFS_REBALANCE_FORMAT_H */ + diff --git a/fs/bcachefs/rebalance_types.h b/fs/bcachefs/rebalance_types.h index 0fffb536c1d0..fe5098c17dfc 100644 --- a/fs/bcachefs/rebalance_types.h +++ b/fs/bcachefs/rebalance_types.h @@ -30,8 +30,6 @@ struct bch_fs_rebalance { struct bbpos scan_start; struct bbpos scan_end; struct bch_move_stats scan_stats; - - unsigned enabled:1; }; #endif /* _BCACHEFS_REBALANCE_TYPES_H */ diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c index 3c7f941dde39..71c786cdb192 100644 --- a/fs/bcachefs/recovery.c +++ b/fs/bcachefs/recovery.c @@ -32,23 +32,84 @@ #include <linux/sort.h> #include <linux/stat.h> -#define QSTR(n) { { { .len = strlen(n) } }, .name = n } -void bch2_btree_lost_data(struct bch_fs *c, enum btree_id btree) +int bch2_btree_lost_data(struct bch_fs *c, enum btree_id btree) { - if (btree >= BTREE_ID_NR_MAX) - return; - u64 b = BIT_ULL(btree); + int ret = 0; + + mutex_lock(&c->sb_lock); + struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); if (!(c->sb.btrees_lost_data & b)) { - bch_err(c, "flagging btree %s lost data", bch2_btree_id_str(btree)); + struct printbuf buf = PRINTBUF; + bch2_btree_id_to_text(&buf, btree); + bch_err(c, "flagging btree %s lost data", buf.buf); + printbuf_exit(&buf); + ext->btrees_lost_data |= cpu_to_le64(b); + } - mutex_lock(&c->sb_lock); - bch2_sb_field_get(c->disk_sb.sb, ext)->btrees_lost_data |= cpu_to_le64(b); - bch2_write_super(c); - mutex_unlock(&c->sb_lock); + /* Once we have runtime self healing for topology errors we won't need this: */ + ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_topology) ?: ret; + + /* Btree node accounting will be off: */ + __set_bit_le64(BCH_FSCK_ERR_accounting_mismatch, ext->errors_silent); + ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_allocations) ?: ret; + +#ifdef CONFIG_BCACHEFS_DEBUG + /* + * These are much more minor, and don't need to be corrected right away, + * but in debug mode we want the next fsck run to be clean: + */ + ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_lrus) ?: ret; + ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_backpointers_to_extents) ?: ret; +#endif + + switch (btree) { + case BTREE_ID_alloc: + ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_alloc_info) ?: ret; + + __set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent); + __set_bit_le64(BCH_FSCK_ERR_alloc_key_gen_wrong, ext->errors_silent); + __set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent); + __set_bit_le64(BCH_FSCK_ERR_alloc_key_cached_sectors_wrong, ext->errors_silent); + __set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_wrong, ext->errors_silent); + __set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_redundancy_wrong, ext->errors_silent); + goto out; + case BTREE_ID_backpointers: + ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_btree_backpointers) ?: ret; + ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_extents_to_backpointers) ?: ret; + goto out; + case BTREE_ID_need_discard: + ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_alloc_info) ?: ret; + goto out; + case BTREE_ID_freespace: + ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_alloc_info) ?: ret; + goto out; + case BTREE_ID_bucket_gens: + ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_alloc_info) ?: ret; + goto out; + case BTREE_ID_lru: + ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_alloc_info) ?: ret; + goto out; + case BTREE_ID_accounting: + ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_check_allocations) ?: ret; + goto out; + default: + ret = bch2_run_explicit_recovery_pass_persistent_locked(c, BCH_RECOVERY_PASS_scan_for_btree_nodes) ?: ret; + goto out; } +out: + bch2_write_super(c); + mutex_unlock(&c->sb_lock); + + return ret; +} + +static void kill_btree(struct bch_fs *c, enum btree_id btree) +{ + bch2_btree_id_root(c, btree)->alive = false; + bch2_shoot_down_journal_keys(c, btree, 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX); } /* for -o reconstruct_alloc: */ @@ -79,6 +140,8 @@ static void bch2_reconstruct_alloc(struct bch_fs *c) __set_bit_le64(BCH_FSCK_ERR_fs_usage_persistent_reserved_wrong, ext->errors_silent); __set_bit_le64(BCH_FSCK_ERR_fs_usage_replicas_wrong, ext->errors_silent); + __set_bit_le64(BCH_FSCK_ERR_alloc_key_to_missing_lru_entry, ext->errors_silent); + __set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent); __set_bit_le64(BCH_FSCK_ERR_alloc_key_gen_wrong, ext->errors_silent); __set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent); @@ -99,16 +162,9 @@ static void bch2_reconstruct_alloc(struct bch_fs *c) bch2_write_super(c); mutex_unlock(&c->sb_lock); - bch2_shoot_down_journal_keys(c, BTREE_ID_alloc, - 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX); - bch2_shoot_down_journal_keys(c, BTREE_ID_backpointers, - 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX); - bch2_shoot_down_journal_keys(c, BTREE_ID_need_discard, - 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX); - bch2_shoot_down_journal_keys(c, BTREE_ID_freespace, - 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX); - bch2_shoot_down_journal_keys(c, BTREE_ID_bucket_gens, - 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX); + for (unsigned i = 0; i < btree_id_nr_alive(c); i++) + if (btree_id_is_alloc(i)) + kill_btree(c, i); } /* @@ -354,10 +410,13 @@ int bch2_journal_replay(struct bch_fs *c) ? BCH_TRANS_COMMIT_no_journal_res|BCH_WATERMARK_reclaim : 0), bch2_journal_replay_key(trans, k)); - bch_err_msg(c, ret, "while replaying key at btree %s level %u:", - bch2_btree_id_str(k->btree_id), k->level); - if (ret) + if (ret) { + struct printbuf buf = PRINTBUF; + bch2_btree_id_level_to_text(&buf, k->btree_id, k->level); + bch_err_msg(c, ret, "while replaying key at %s:", buf.buf); + printbuf_exit(&buf); goto err; + } BUG_ON(k->btree_id != BTREE_ID_accounting && !k->overwritten); } @@ -403,7 +462,9 @@ static int journal_replay_entry_early(struct bch_fs *c, switch (entry->type) { case BCH_JSET_ENTRY_btree_root: { - struct btree_root *r; + + if (unlikely(!entry->u64s)) + return 0; if (fsck_err_on(entry->btree_id >= BTREE_ID_NR_MAX, c, invalid_btree_id, @@ -417,15 +478,11 @@ static int journal_replay_entry_early(struct bch_fs *c, return ret; } - r = bch2_btree_id_root(c, entry->btree_id); + struct btree_root *r = bch2_btree_id_root(c, entry->btree_id); - if (entry->u64s) { - r->level = entry->level; - bkey_copy(&r->key, (struct bkey_i *) entry->start); - r->error = 0; - } else { - r->error = -BCH_ERR_btree_node_read_error; - } + r->level = entry->level; + bkey_copy(&r->key, (struct bkey_i *) entry->start); + r->error = 0; r->alive = true; break; } @@ -505,6 +562,7 @@ static int journal_replay_early(struct bch_fs *c, static int read_btree_roots(struct bch_fs *c) { + struct printbuf buf = PRINTBUF; int ret = 0; for (unsigned i = 0; i < btree_id_nr_alive(c); i++) { @@ -513,33 +571,22 @@ static int read_btree_roots(struct bch_fs *c) if (!r->alive) continue; - if (btree_id_is_alloc(i) && c->opts.reconstruct_alloc) - continue; + printbuf_reset(&buf); + bch2_btree_id_level_to_text(&buf, i, r->level); if (mustfix_fsck_err_on((ret = r->error), c, btree_root_bkey_invalid, "invalid btree root %s", - bch2_btree_id_str(i)) || + buf.buf) || mustfix_fsck_err_on((ret = r->error = bch2_btree_root_read(c, i, &r->key, r->level)), c, btree_root_read_error, - "error reading btree root %s l=%u: %s", - bch2_btree_id_str(i), r->level, bch2_err_str(ret))) { - if (btree_id_is_alloc(i)) { - c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_allocations); - c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_info); - c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_lrus); - c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers); - c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_to_lru_refs); - c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); + "error reading btree root %s: %s", + buf.buf, bch2_err_str(ret))) { + if (btree_id_is_alloc(i)) r->error = 0; - } else if (!(c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes))) { - bch_info(c, "will run btree node scan"); - c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes); - c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_topology); - } - ret = 0; - bch2_btree_lost_data(c, i); + ret = bch2_btree_lost_data(c, i); + BUG_ON(ret); } } @@ -553,6 +600,7 @@ static int read_btree_roots(struct bch_fs *c) } } fsck_err: + printbuf_exit(&buf); return ret; } @@ -563,6 +611,7 @@ static bool check_version_upgrade(struct bch_fs *c) bch2_latest_compatible_version(c->sb.version)); unsigned old_version = c->sb.version_upgrade_complete ?: c->sb.version; unsigned new_version = 0; + bool ret = false; if (old_version < bcachefs_metadata_required_upgrade_below) { if (c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible || @@ -618,14 +667,32 @@ static bool check_version_upgrade(struct bch_fs *c) } bch_info(c, "%s", buf.buf); + printbuf_exit(&buf); - bch2_sb_upgrade(c, new_version); + ret = true; + } + if (new_version > c->sb.version_incompat && + c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible) { + struct printbuf buf = PRINTBUF; + + prt_str(&buf, "Now allowing incompatible features up to "); + bch2_version_to_text(&buf, new_version); + prt_str(&buf, ", previously allowed up to "); + bch2_version_to_text(&buf, c->sb.version_incompat_allowed); + prt_newline(&buf); + + bch_info(c, "%s", buf.buf); printbuf_exit(&buf); - return true; + + ret = true; } - return false; + if (ret) + bch2_sb_upgrade(c, new_version, + c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible); + + return ret; } int bch2_fs_recovery(struct bch_fs *c) @@ -660,8 +727,13 @@ int bch2_fs_recovery(struct bch_fs *c) goto err; } - if (c->opts.norecovery) - c->opts.recovery_pass_last = BCH_RECOVERY_PASS_journal_replay - 1; + if (c->opts.norecovery) { + c->opts.recovery_pass_last = c->opts.recovery_pass_last + ? min(c->opts.recovery_pass_last, BCH_RECOVERY_PASS_snapshots_read) + : BCH_RECOVERY_PASS_snapshots_read; + c->opts.nochanges = true; + c->opts.read_only = true; + } mutex_lock(&c->sb_lock); struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); @@ -708,17 +780,20 @@ int bch2_fs_recovery(struct bch_fs *c) c->opts.recovery_passes |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0])); + if (c->sb.version_upgrade_complete < bcachefs_metadata_version_autofix_errors) { + SET_BCH_SB_ERROR_ACTION(c->disk_sb.sb, BCH_ON_ERROR_fix_safe); + write_sb = true; + } + if (write_sb) bch2_write_super(c); mutex_unlock(&c->sb_lock); - if (c->opts.fsck && IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) - c->opts.recovery_passes |= BIT_ULL(BCH_RECOVERY_PASS_check_topology); - if (c->opts.fsck) set_bit(BCH_FS_fsck_running, &c->flags); if (c->sb.clean) set_bit(BCH_FS_clean_recovery, &c->flags); + set_bit(BCH_FS_recovery_running, &c->flags); ret = bch2_blacklist_table_initialize(c); if (ret) { @@ -807,15 +882,15 @@ use_clean: c->journal_replay_seq_start = last_seq; c->journal_replay_seq_end = blacklist_seq - 1; - if (c->opts.reconstruct_alloc) - bch2_reconstruct_alloc(c); - zero_out_btree_mem_ptr(&c->journal_keys); ret = journal_replay_early(c, clean); if (ret) goto err; + if (c->opts.reconstruct_alloc) + bch2_reconstruct_alloc(c); + /* * After an unclean shutdown, skip then next few journal sequence * numbers as they may have been referenced by btree writes that @@ -870,16 +945,17 @@ use_clean: */ set_bit(BCH_FS_may_go_rw, &c->flags); clear_bit(BCH_FS_fsck_running, &c->flags); + clear_bit(BCH_FS_recovery_running, &c->flags); /* in case we don't run journal replay, i.e. norecovery mode */ set_bit(BCH_FS_accounting_replay_done, &c->flags); + bch2_async_btree_node_rewrites_flush(c); + /* fsync if we fixed errors */ - if (test_bit(BCH_FS_errors_fixed, &c->flags) && - bch2_write_ref_tryget(c, BCH_WRITE_REF_fsync)) { + if (test_bit(BCH_FS_errors_fixed, &c->flags)) { bch2_journal_flush_all_pins(&c->journal); bch2_journal_meta(&c->journal); - bch2_write_ref_put(c, BCH_WRITE_REF_fsync); } /* If we fixed errors, verify that fs is actually clean now: */ @@ -1021,7 +1097,7 @@ int bch2_fs_initialize(struct bch_fs *c) bch2_check_version_downgrade(c); if (c->opts.version_upgrade != BCH_VERSION_UPGRADE_none) { - bch2_sb_upgrade(c, bcachefs_metadata_version_current); + bch2_sb_upgrade(c, bcachefs_metadata_version_current, false); SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current); bch2_write_super(c); } @@ -1035,7 +1111,6 @@ int bch2_fs_initialize(struct bch_fs *c) bch2_write_super(c); mutex_unlock(&c->sb_lock); - c->curr_recovery_pass = BCH_RECOVERY_PASS_NR; set_bit(BCH_FS_btree_running, &c->flags); set_bit(BCH_FS_may_go_rw, &c->flags); @@ -1076,9 +1151,6 @@ int bch2_fs_initialize(struct bch_fs *c) if (ret) goto err; - for_each_online_member(c, ca) - ca->new_fs_bucket_idx = 0; - ret = bch2_fs_freespace_init(c); if (ret) goto err; @@ -1137,6 +1209,7 @@ int bch2_fs_initialize(struct bch_fs *c) bch2_write_super(c); mutex_unlock(&c->sb_lock); + c->curr_recovery_pass = BCH_RECOVERY_PASS_NR; return 0; err: bch_err_fn(c, ret); diff --git a/fs/bcachefs/recovery.h b/fs/bcachefs/recovery.h index 4bf818de1f2f..b0d55754b21b 100644 --- a/fs/bcachefs/recovery.h +++ b/fs/bcachefs/recovery.h @@ -2,7 +2,7 @@ #ifndef _BCACHEFS_RECOVERY_H #define _BCACHEFS_RECOVERY_H -void bch2_btree_lost_data(struct bch_fs *, enum btree_id); +int bch2_btree_lost_data(struct bch_fs *, enum btree_id); int bch2_journal_replay(struct bch_fs *); diff --git a/fs/bcachefs/recovery_passes.c b/fs/bcachefs/recovery_passes.c index dff589ddc984..0b3c951c32da 100644 --- a/fs/bcachefs/recovery_passes.c +++ b/fs/bcachefs/recovery_passes.c @@ -46,7 +46,7 @@ static int bch2_set_may_go_rw(struct bch_fs *c) set_bit(BCH_FS_may_go_rw, &c->flags); - if (keys->nr || c->opts.fsck || !c->sb.clean || c->opts.recovery_passes) + if (keys->nr || !c->opts.read_only || c->opts.fsck || !c->sb.clean || c->opts.recovery_passes) return bch2_fs_read_write_early(c); return 0; } @@ -100,20 +100,34 @@ u64 bch2_recovery_passes_from_stable(u64 v) /* * For when we need to rewind recovery passes and run a pass we skipped: */ -int bch2_run_explicit_recovery_pass(struct bch_fs *c, - enum bch_recovery_pass pass) +static int __bch2_run_explicit_recovery_pass(struct bch_fs *c, + enum bch_recovery_pass pass) { - if (c->opts.recovery_passes & BIT_ULL(pass)) + if (c->curr_recovery_pass == ARRAY_SIZE(recovery_pass_fns)) + return -BCH_ERR_not_in_recovery; + + if (c->recovery_passes_complete & BIT_ULL(pass)) return 0; - bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)", - bch2_recovery_passes[pass], pass, - bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass); + bool print = !(c->opts.recovery_passes & BIT_ULL(pass)); + + if (pass < BCH_RECOVERY_PASS_set_may_go_rw && + c->curr_recovery_pass >= BCH_RECOVERY_PASS_set_may_go_rw) { + if (print) + bch_info(c, "need recovery pass %s (%u), but already rw", + bch2_recovery_passes[pass], pass); + return -BCH_ERR_cannot_rewind_recovery; + } + + if (print) + bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)", + bch2_recovery_passes[pass], pass, + bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass); c->opts.recovery_passes |= BIT_ULL(pass); - if (c->curr_recovery_pass >= pass) { - c->curr_recovery_pass = pass; + if (c->curr_recovery_pass > pass) { + c->next_recovery_pass = pass; c->recovery_passes_complete &= (1ULL << pass) >> 1; return -BCH_ERR_restart_recovery; } else { @@ -121,6 +135,27 @@ int bch2_run_explicit_recovery_pass(struct bch_fs *c, } } +int bch2_run_explicit_recovery_pass(struct bch_fs *c, + enum bch_recovery_pass pass) +{ + unsigned long flags; + spin_lock_irqsave(&c->recovery_pass_lock, flags); + int ret = __bch2_run_explicit_recovery_pass(c, pass); + spin_unlock_irqrestore(&c->recovery_pass_lock, flags); + return ret; +} + +int bch2_run_explicit_recovery_pass_persistent_locked(struct bch_fs *c, + enum bch_recovery_pass pass) +{ + lockdep_assert_held(&c->sb_lock); + + struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext); + __set_bit_le64(bch2_recovery_pass_to_stable(pass), ext->recovery_passes_required); + + return bch2_run_explicit_recovery_pass(c, pass); +} + int bch2_run_explicit_recovery_pass_persistent(struct bch_fs *c, enum bch_recovery_pass pass) { @@ -233,31 +268,48 @@ int bch2_run_recovery_passes(struct bch_fs *c) */ c->opts.recovery_passes_exclude &= ~BCH_RECOVERY_PASS_set_may_go_rw; - while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) { - if (c->opts.recovery_pass_last && - c->curr_recovery_pass > c->opts.recovery_pass_last) - break; - - if (should_run_recovery_pass(c, c->curr_recovery_pass)) { - unsigned pass = c->curr_recovery_pass; + while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns) && !ret) { + c->next_recovery_pass = c->curr_recovery_pass + 1; - ret = bch2_run_recovery_pass(c, c->curr_recovery_pass) ?: - bch2_journal_flush(&c->journal); - if (bch2_err_matches(ret, BCH_ERR_restart_recovery) || - (ret && c->curr_recovery_pass < pass)) - continue; - if (ret) - break; + spin_lock_irq(&c->recovery_pass_lock); + unsigned pass = c->curr_recovery_pass; - c->recovery_passes_complete |= BIT_ULL(c->curr_recovery_pass); + if (c->opts.recovery_pass_last && + c->curr_recovery_pass > c->opts.recovery_pass_last) { + spin_unlock_irq(&c->recovery_pass_lock); + break; } - c->recovery_pass_done = max(c->recovery_pass_done, c->curr_recovery_pass); - - if (!test_bit(BCH_FS_error, &c->flags)) - bch2_clear_recovery_pass_required(c, c->curr_recovery_pass); - - c->curr_recovery_pass++; + if (!should_run_recovery_pass(c, pass)) { + c->curr_recovery_pass++; + c->recovery_pass_done = max(c->recovery_pass_done, pass); + spin_unlock_irq(&c->recovery_pass_lock); + continue; + } + spin_unlock_irq(&c->recovery_pass_lock); + + ret = bch2_run_recovery_pass(c, pass) ?: + bch2_journal_flush(&c->journal); + + if (!ret && !test_bit(BCH_FS_error, &c->flags)) + bch2_clear_recovery_pass_required(c, pass); + + spin_lock_irq(&c->recovery_pass_lock); + if (c->next_recovery_pass < c->curr_recovery_pass) { + /* + * bch2_run_explicit_recovery_pass() was called: we + * can't always catch -BCH_ERR_restart_recovery because + * it may have been called from another thread (btree + * node read completion) + */ + ret = 0; + c->recovery_passes_complete &= ~(~0ULL << c->curr_recovery_pass); + } else { + c->recovery_passes_complete |= BIT_ULL(pass); + c->recovery_pass_done = max(c->recovery_pass_done, pass); + } + c->curr_recovery_pass = c->next_recovery_pass; + spin_unlock_irq(&c->recovery_pass_lock); } return ret; diff --git a/fs/bcachefs/recovery_passes.h b/fs/bcachefs/recovery_passes.h index 99b464e127b8..7d7339c8fa29 100644 --- a/fs/bcachefs/recovery_passes.h +++ b/fs/bcachefs/recovery_passes.h @@ -9,6 +9,7 @@ u64 bch2_recovery_passes_from_stable(u64 v); u64 bch2_fsck_recovery_passes(void); int bch2_run_explicit_recovery_pass(struct bch_fs *, enum bch_recovery_pass); +int bch2_run_explicit_recovery_pass_persistent_locked(struct bch_fs *, enum bch_recovery_pass); int bch2_run_explicit_recovery_pass_persistent(struct bch_fs *, enum bch_recovery_pass); int bch2_run_online_recovery_passes(struct bch_fs *); diff --git a/fs/bcachefs/recovery_passes_types.h b/fs/bcachefs/recovery_passes_types.h index 94dc20ca2065..418557960ed6 100644 --- a/fs/bcachefs/recovery_passes_types.h +++ b/fs/bcachefs/recovery_passes_types.h @@ -8,53 +8,59 @@ #define PASS_ALWAYS BIT(3) #define PASS_ONLINE BIT(4) +#ifdef CONFIG_BCACHEFS_DEBUG +#define PASS_FSCK_DEBUG BIT(1) +#else +#define PASS_FSCK_DEBUG 0 +#endif + /* * Passes may be reordered, but the second field is a persistent identifier and * must never change: */ -#define BCH_RECOVERY_PASSES() \ - x(recovery_pass_empty, 41, PASS_SILENT) \ - x(scan_for_btree_nodes, 37, 0) \ - x(check_topology, 4, 0) \ - x(accounting_read, 39, PASS_ALWAYS) \ - x(alloc_read, 0, PASS_ALWAYS) \ - x(stripes_read, 1, PASS_ALWAYS) \ - x(initialize_subvolumes, 2, 0) \ - x(snapshots_read, 3, PASS_ALWAYS) \ - x(check_allocations, 5, PASS_FSCK) \ - x(trans_mark_dev_sbs, 6, PASS_ALWAYS|PASS_SILENT) \ - x(fs_journal_alloc, 7, PASS_ALWAYS|PASS_SILENT) \ - x(set_may_go_rw, 8, PASS_ALWAYS|PASS_SILENT) \ - x(journal_replay, 9, PASS_ALWAYS) \ - x(check_alloc_info, 10, PASS_ONLINE|PASS_FSCK) \ - x(check_lrus, 11, PASS_ONLINE|PASS_FSCK) \ - x(check_btree_backpointers, 12, PASS_ONLINE|PASS_FSCK) \ - x(check_backpointers_to_extents, 13, PASS_ONLINE|PASS_FSCK) \ - x(check_extents_to_backpointers, 14, PASS_ONLINE|PASS_FSCK) \ - x(check_alloc_to_lru_refs, 15, PASS_ONLINE|PASS_FSCK) \ - x(fs_freespace_init, 16, PASS_ALWAYS|PASS_SILENT) \ - x(bucket_gens_init, 17, 0) \ - x(reconstruct_snapshots, 38, 0) \ - x(check_snapshot_trees, 18, PASS_ONLINE|PASS_FSCK) \ - x(check_snapshots, 19, PASS_ONLINE|PASS_FSCK) \ - x(check_subvols, 20, PASS_ONLINE|PASS_FSCK) \ - x(check_subvol_children, 35, PASS_ONLINE|PASS_FSCK) \ - x(delete_dead_snapshots, 21, PASS_ONLINE|PASS_FSCK) \ - x(fs_upgrade_for_subvolumes, 22, 0) \ - x(check_inodes, 24, PASS_FSCK) \ - x(check_extents, 25, PASS_FSCK) \ - x(check_indirect_extents, 26, PASS_FSCK) \ - x(check_dirents, 27, PASS_FSCK) \ - x(check_xattrs, 28, PASS_FSCK) \ - x(check_root, 29, PASS_ONLINE|PASS_FSCK) \ - x(check_unreachable_inodes, 40, PASS_ONLINE|PASS_FSCK) \ - x(check_subvolume_structure, 36, PASS_ONLINE|PASS_FSCK) \ - x(check_directory_structure, 30, PASS_ONLINE|PASS_FSCK) \ - x(check_nlinks, 31, PASS_FSCK) \ - x(resume_logged_ops, 23, PASS_ALWAYS) \ - x(delete_dead_inodes, 32, PASS_ALWAYS) \ - x(fix_reflink_p, 33, 0) \ - x(set_fs_needs_rebalance, 34, 0) \ +#define BCH_RECOVERY_PASSES() \ + x(recovery_pass_empty, 41, PASS_SILENT) \ + x(scan_for_btree_nodes, 37, 0) \ + x(check_topology, 4, 0) \ + x(accounting_read, 39, PASS_ALWAYS) \ + x(alloc_read, 0, PASS_ALWAYS) \ + x(stripes_read, 1, PASS_ALWAYS) \ + x(initialize_subvolumes, 2, 0) \ + x(snapshots_read, 3, PASS_ALWAYS) \ + x(check_allocations, 5, PASS_FSCK) \ + x(trans_mark_dev_sbs, 6, PASS_ALWAYS|PASS_SILENT) \ + x(fs_journal_alloc, 7, PASS_ALWAYS|PASS_SILENT) \ + x(set_may_go_rw, 8, PASS_ALWAYS|PASS_SILENT) \ + x(journal_replay, 9, PASS_ALWAYS) \ + x(check_alloc_info, 10, PASS_ONLINE|PASS_FSCK) \ + x(check_lrus, 11, PASS_ONLINE|PASS_FSCK) \ + x(check_btree_backpointers, 12, PASS_ONLINE|PASS_FSCK) \ + x(check_backpointers_to_extents, 13, PASS_ONLINE|PASS_FSCK_DEBUG) \ + x(check_extents_to_backpointers, 14, PASS_ONLINE|PASS_FSCK) \ + x(check_alloc_to_lru_refs, 15, PASS_ONLINE|PASS_FSCK) \ + x(fs_freespace_init, 16, PASS_ALWAYS|PASS_SILENT) \ + x(bucket_gens_init, 17, 0) \ + x(reconstruct_snapshots, 38, 0) \ + x(check_snapshot_trees, 18, PASS_ONLINE|PASS_FSCK) \ + x(check_snapshots, 19, PASS_ONLINE|PASS_FSCK) \ + x(check_subvols, 20, PASS_ONLINE|PASS_FSCK) \ + x(check_subvol_children, 35, PASS_ONLINE|PASS_FSCK) \ + x(delete_dead_snapshots, 21, PASS_ONLINE|PASS_FSCK) \ + x(fs_upgrade_for_subvolumes, 22, 0) \ + x(check_inodes, 24, PASS_FSCK) \ + x(check_extents, 25, PASS_FSCK) \ + x(check_indirect_extents, 26, PASS_ONLINE|PASS_FSCK) \ + x(check_dirents, 27, PASS_FSCK) \ + x(check_xattrs, 28, PASS_FSCK) \ + x(check_root, 29, PASS_ONLINE|PASS_FSCK) \ + x(check_unreachable_inodes, 40, PASS_FSCK) \ + x(check_subvolume_structure, 36, PASS_ONLINE|PASS_FSCK) \ + x(check_directory_structure, 30, PASS_ONLINE|PASS_FSCK) \ + x(check_nlinks, 31, PASS_FSCK) \ + x(resume_logged_ops, 23, PASS_ALWAYS) \ + x(delete_dead_inodes, 32, PASS_ALWAYS) \ + x(fix_reflink_p, 33, 0) \ + x(set_fs_needs_rebalance, 34, 0) /* We normally enumerate recovery passes in the order we run them: */ enum bch_recovery_pass { diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c index f457925fa362..376fd0a6e868 100644 --- a/fs/bcachefs/reflink.c +++ b/fs/bcachefs/reflink.c @@ -15,6 +15,17 @@ #include <linux/sched/signal.h> +static inline bool bkey_extent_is_reflink_data(const struct bkey *k) +{ + switch (k->type) { + case KEY_TYPE_reflink_v: + case KEY_TYPE_indirect_inline_data: + return true; + default: + return false; + } +} + static inline unsigned bkey_type_to_indirect(const struct bkey *k) { switch (k->type) { @@ -30,15 +41,15 @@ static inline unsigned bkey_type_to_indirect(const struct bkey *k) /* reflink pointers */ int bch2_reflink_p_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k); int ret = 0; - bkey_fsck_err_on(le64_to_cpu(p.v->idx) < le32_to_cpu(p.v->front_pad), + bkey_fsck_err_on(REFLINK_P_IDX(p.v) < le32_to_cpu(p.v->front_pad), c, reflink_p_front_pad_bad, "idx < front_pad (%llu < %u)", - le64_to_cpu(p.v->idx), le32_to_cpu(p.v->front_pad)); + REFLINK_P_IDX(p.v), le32_to_cpu(p.v->front_pad)); fsck_err: return ret; } @@ -49,7 +60,7 @@ void bch2_reflink_p_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k); prt_printf(out, "idx %llu front_pad %u back_pad %u", - le64_to_cpu(p.v->idx), + REFLINK_P_IDX(p.v), le32_to_cpu(p.v->front_pad), le32_to_cpu(p.v->back_pad)); } @@ -65,49 +76,250 @@ bool bch2_reflink_p_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r */ return false; - if (le64_to_cpu(l.v->idx) + l.k->size != le64_to_cpu(r.v->idx)) + if (REFLINK_P_IDX(l.v) + l.k->size != REFLINK_P_IDX(r.v)) + return false; + + if (REFLINK_P_ERROR(l.v) != REFLINK_P_ERROR(r.v)) return false; bch2_key_resize(l.k, l.k->size + r.k->size); return true; } +/* indirect extents */ + +int bch2_reflink_v_validate(struct bch_fs *c, struct bkey_s_c k, + struct bkey_validate_context from) +{ + int ret = 0; + + bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, REFLINK_P_IDX_MAX)), + c, reflink_v_pos_bad, + "indirect extent above maximum position 0:%llu", + REFLINK_P_IDX_MAX); + + ret = bch2_bkey_ptrs_validate(c, k, from); +fsck_err: + return ret; +} + +void bch2_reflink_v_to_text(struct printbuf *out, struct bch_fs *c, + struct bkey_s_c k) +{ + struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k); + + prt_printf(out, "refcount: %llu ", le64_to_cpu(r.v->refcount)); + + bch2_bkey_ptrs_to_text(out, c, k); +} + +#if 0 +Currently disabled, needs to be debugged: + +bool bch2_reflink_v_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r) +{ + struct bkey_s_reflink_v l = bkey_s_to_reflink_v(_l); + struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(_r); + + return l.v->refcount == r.v->refcount && bch2_extent_merge(c, _l, _r); +} +#endif + +/* indirect inline data */ + +int bch2_indirect_inline_data_validate(struct bch_fs *c, struct bkey_s_c k, + struct bkey_validate_context from) +{ + return 0; +} + +void bch2_indirect_inline_data_to_text(struct printbuf *out, + struct bch_fs *c, struct bkey_s_c k) +{ + struct bkey_s_c_indirect_inline_data d = bkey_s_c_to_indirect_inline_data(k); + unsigned datalen = bkey_inline_data_bytes(k.k); + + prt_printf(out, "refcount %llu datalen %u: %*phN", + le64_to_cpu(d.v->refcount), datalen, + min(datalen, 32U), d.v->data); +} + +/* lookup */ + +static int bch2_indirect_extent_not_missing(struct btree_trans *trans, struct bkey_s_c_reflink_p p, + bool should_commit) +{ + struct bkey_i_reflink_p *new = bch2_bkey_make_mut_noupdate_typed(trans, p.s_c, reflink_p); + int ret = PTR_ERR_OR_ZERO(new); + if (ret) + return ret; + + SET_REFLINK_P_ERROR(&new->v, false); + ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, &new->k_i, BTREE_TRIGGER_norun); + if (ret) + return ret; + + if (!should_commit) + return 0; + + return bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?: + -BCH_ERR_transaction_restart_nested; +} + +static int bch2_indirect_extent_missing_error(struct btree_trans *trans, + struct bkey_s_c_reflink_p p, + u64 missing_start, u64 missing_end, + bool should_commit) +{ + if (REFLINK_P_ERROR(p.v)) + return -BCH_ERR_missing_indirect_extent; + + struct bch_fs *c = trans->c; + u64 live_start = REFLINK_P_IDX(p.v); + u64 live_end = REFLINK_P_IDX(p.v) + p.k->size; + u64 refd_start = live_start - le32_to_cpu(p.v->front_pad); + u64 refd_end = live_end + le32_to_cpu(p.v->back_pad); + struct printbuf buf = PRINTBUF; + int ret = 0; + + BUG_ON(missing_start < refd_start); + BUG_ON(missing_end > refd_end); + + if (fsck_err(trans, reflink_p_to_missing_reflink_v, + "pointer to missing indirect extent\n" + " %s\n" + " missing range %llu-%llu", + (bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf), + missing_start, missing_end)) { + struct bkey_i_reflink_p *new = bch2_bkey_make_mut_noupdate_typed(trans, p.s_c, reflink_p); + ret = PTR_ERR_OR_ZERO(new); + if (ret) + goto err; + + /* + * Is the missing range not actually needed? + * + * p.v->idx refers to the data that we actually want, but if the + * indirect extent we point to was bigger, front_pad and back_pad + * indicate the range we took a reference on. + */ + + if (missing_end <= live_start) { + new->v.front_pad = cpu_to_le32(live_start - missing_end); + } else if (missing_start >= live_end) { + new->v.back_pad = cpu_to_le32(missing_start - live_end); + } else { + struct bpos new_start = bkey_start_pos(&new->k); + struct bpos new_end = new->k.p; + + if (missing_start > live_start) + new_start.offset += missing_start - live_start; + if (missing_end < live_end) + new_end.offset -= live_end - missing_end; + + bch2_cut_front(new_start, &new->k_i); + bch2_cut_back(new_end, &new->k_i); + + SET_REFLINK_P_ERROR(&new->v, true); + } + + ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, &new->k_i, BTREE_TRIGGER_norun); + if (ret) + goto err; + + if (should_commit) + ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?: + -BCH_ERR_transaction_restart_nested; + } +err: +fsck_err: + printbuf_exit(&buf); + return ret; +} + +/* + * This is used from the read path, which doesn't expect to have to do a + * transaction commit, and from triggers, which should not be doing a commit: + */ +struct bkey_s_c bch2_lookup_indirect_extent(struct btree_trans *trans, + struct btree_iter *iter, + s64 *offset_into_extent, + struct bkey_s_c_reflink_p p, + bool should_commit, + unsigned iter_flags) +{ + BUG_ON(*offset_into_extent < -((s64) le32_to_cpu(p.v->front_pad))); + BUG_ON(*offset_into_extent >= p.k->size + le32_to_cpu(p.v->back_pad)); + + u64 reflink_offset = REFLINK_P_IDX(p.v) + *offset_into_extent; + + struct bkey_s_c k = bch2_bkey_get_iter(trans, iter, BTREE_ID_reflink, + POS(0, reflink_offset), iter_flags); + if (bkey_err(k)) + return k; + + if (unlikely(!bkey_extent_is_reflink_data(k.k))) { + bch2_trans_iter_exit(trans, iter); + + unsigned size = min((u64) k.k->size, + REFLINK_P_IDX(p.v) + p.k->size + le32_to_cpu(p.v->back_pad) - + reflink_offset); + bch2_key_resize(&iter->k, size); + + int ret = bch2_indirect_extent_missing_error(trans, p, reflink_offset, + k.k->p.offset, should_commit); + if (ret) + return bkey_s_c_err(ret); + } else if (unlikely(REFLINK_P_ERROR(p.v))) { + bch2_trans_iter_exit(trans, iter); + + int ret = bch2_indirect_extent_not_missing(trans, p, should_commit); + if (ret) + return bkey_s_c_err(ret); + } + + *offset_into_extent = reflink_offset - bkey_start_offset(k.k); + return k; +} + +/* reflink pointer trigger */ + static int trans_trigger_reflink_p_segment(struct btree_trans *trans, struct bkey_s_c_reflink_p p, u64 *idx, enum btree_iter_update_trigger_flags flags) { struct bch_fs *c = trans->c; - struct btree_iter iter; - struct bkey_i *k; - __le64 *refcount; - int add = !(flags & BTREE_TRIGGER_overwrite) ? 1 : -1; struct printbuf buf = PRINTBUF; - int ret; - k = bch2_bkey_get_mut_noupdate(trans, &iter, - BTREE_ID_reflink, POS(0, *idx), - BTREE_ITER_with_updates); - ret = PTR_ERR_OR_ZERO(k); + s64 offset_into_extent = *idx - REFLINK_P_IDX(p.v); + struct btree_iter iter; + struct bkey_s_c k = bch2_lookup_indirect_extent(trans, &iter, &offset_into_extent, p, false, + BTREE_ITER_intent| + BTREE_ITER_with_updates); + int ret = bkey_err(k); if (ret) - goto err; + return ret; - refcount = bkey_refcount(bkey_i_to_s(k)); - if (!refcount) { - bch2_bkey_val_to_text(&buf, c, p.s_c); - bch2_trans_inconsistent(trans, - "nonexistent indirect extent at %llu while marking\n %s", - *idx, buf.buf); - ret = -EIO; - goto err; + if (bkey_deleted(k.k)) { + if (!(flags & BTREE_TRIGGER_overwrite)) + ret = -BCH_ERR_missing_indirect_extent; + goto next; } + struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k); + ret = PTR_ERR_OR_ZERO(new); + if (ret) + goto err; + + __le64 *refcount = bkey_refcount(bkey_i_to_s(new)); if (!*refcount && (flags & BTREE_TRIGGER_overwrite)) { bch2_bkey_val_to_text(&buf, c, p.s_c); - bch2_trans_inconsistent(trans, - "indirect extent refcount underflow at %llu while marking\n %s", - *idx, buf.buf); - ret = -EIO; - goto err; + prt_printf(&buf, "\n "); + bch2_bkey_val_to_text(&buf, c, k); + log_fsck_err(trans, reflink_refcount_underflow, + "indirect extent refcount underflow while marking\n %s", + buf.buf); + goto next; } if (flags & BTREE_TRIGGER_insert) { @@ -115,25 +327,26 @@ static int trans_trigger_reflink_p_segment(struct btree_trans *trans, u64 pad; pad = max_t(s64, le32_to_cpu(v->front_pad), - le64_to_cpu(v->idx) - bkey_start_offset(&k->k)); + REFLINK_P_IDX(v) - bkey_start_offset(&new->k)); BUG_ON(pad > U32_MAX); v->front_pad = cpu_to_le32(pad); pad = max_t(s64, le32_to_cpu(v->back_pad), - k->k.p.offset - p.k->size - le64_to_cpu(v->idx)); + new->k.p.offset - p.k->size - REFLINK_P_IDX(v)); BUG_ON(pad > U32_MAX); v->back_pad = cpu_to_le32(pad); } - le64_add_cpu(refcount, add); + le64_add_cpu(refcount, !(flags & BTREE_TRIGGER_overwrite) ? 1 : -1); bch2_btree_iter_set_pos_to_extent_start(&iter); - ret = bch2_trans_update(trans, &iter, k, 0); + ret = bch2_trans_update(trans, &iter, new, 0); if (ret) goto err; - - *idx = k->k.p.offset; +next: + *idx = k.k->p.offset; err: +fsck_err: bch2_trans_iter_exit(trans, &iter); printbuf_exit(&buf); return ret; @@ -147,9 +360,7 @@ static s64 gc_trigger_reflink_p_segment(struct btree_trans *trans, struct bch_fs *c = trans->c; struct reflink_gc *r; int add = !(flags & BTREE_TRIGGER_overwrite) ? 1 : -1; - u64 start = le64_to_cpu(p.v->idx); - u64 end = le64_to_cpu(p.v->idx) + p.k->size; - u64 next_idx = end + le32_to_cpu(p.v->back_pad); + u64 next_idx = REFLINK_P_IDX(p.v) + p.k->size + le32_to_cpu(p.v->back_pad); s64 ret = 0; struct printbuf buf = PRINTBUF; @@ -168,36 +379,16 @@ static s64 gc_trigger_reflink_p_segment(struct btree_trans *trans, *idx = r->offset; return 0; not_found: - BUG_ON(!(flags & BTREE_TRIGGER_check_repair)); - - if (fsck_err(trans, reflink_p_to_missing_reflink_v, - "pointer to missing indirect extent\n" - " %s\n" - " missing range %llu-%llu", - (bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf), - *idx, next_idx)) { - struct bkey_i *update = bch2_bkey_make_mut_noupdate(trans, p.s_c); - ret = PTR_ERR_OR_ZERO(update); + if (flags & BTREE_TRIGGER_check_repair) { + ret = bch2_indirect_extent_missing_error(trans, p, *idx, next_idx, false); + if (ret == -BCH_ERR_missing_indirect_extent) + ret = 0; if (ret) goto err; - - if (next_idx <= start) { - bkey_i_to_reflink_p(update)->v.front_pad = cpu_to_le32(start - next_idx); - } else if (*idx >= end) { - bkey_i_to_reflink_p(update)->v.back_pad = cpu_to_le32(*idx - end); - } else { - bkey_error_init(update); - update->k.p = p.k->p; - update->k.size = p.k->size; - set_bkey_val_u64s(&update->k, 0); - } - - ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, update, BTREE_TRIGGER_norun); } *idx = next_idx; err: -fsck_err: printbuf_exit(&buf); return ret; } @@ -210,8 +401,8 @@ static int __trigger_reflink_p(struct btree_trans *trans, struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k); int ret = 0; - u64 idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad); - u64 end = le64_to_cpu(p.v->idx) + p.k->size + le32_to_cpu(p.v->back_pad); + u64 idx = REFLINK_P_IDX(p.v) - le32_to_cpu(p.v->front_pad); + u64 end = REFLINK_P_IDX(p.v) + p.k->size + le32_to_cpu(p.v->back_pad); if (flags & BTREE_TRIGGER_transactional) { while (idx < end && !ret) @@ -253,35 +444,7 @@ int bch2_trigger_reflink_p(struct btree_trans *trans, return trigger_run_overwrite_then_insert(__trigger_reflink_p, trans, btree_id, level, old, new, flags); } -/* indirect extents */ - -int bch2_reflink_v_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) -{ - return bch2_bkey_ptrs_validate(c, k, flags); -} - -void bch2_reflink_v_to_text(struct printbuf *out, struct bch_fs *c, - struct bkey_s_c k) -{ - struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k); - - prt_printf(out, "refcount: %llu ", le64_to_cpu(r.v->refcount)); - - bch2_bkey_ptrs_to_text(out, c, k); -} - -#if 0 -Currently disabled, needs to be debugged: - -bool bch2_reflink_v_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r) -{ - struct bkey_s_reflink_v l = bkey_s_to_reflink_v(_l); - struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(_r); - - return l.v->refcount == r.v->refcount && bch2_extent_merge(c, _l, _r); -} -#endif +/* indirect extent trigger */ static inline void check_indirect_extent_deleting(struct bkey_s new, @@ -307,25 +470,6 @@ int bch2_trigger_reflink_v(struct btree_trans *trans, return bch2_trigger_extent(trans, btree_id, level, old, new, flags); } -/* indirect inline data */ - -int bch2_indirect_inline_data_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) -{ - return 0; -} - -void bch2_indirect_inline_data_to_text(struct printbuf *out, - struct bch_fs *c, struct bkey_s_c k) -{ - struct bkey_s_c_indirect_inline_data d = bkey_s_c_to_indirect_inline_data(k); - unsigned datalen = bkey_inline_data_bytes(k.k); - - prt_printf(out, "refcount %llu datalen %u: %*phN", - le64_to_cpu(d.v->refcount), datalen, - min(datalen, 32U), d.v->data); -} - int bch2_trigger_indirect_inline_data(struct btree_trans *trans, enum btree_id btree_id, unsigned level, struct bkey_s_c old, struct bkey_s new, @@ -336,9 +480,12 @@ int bch2_trigger_indirect_inline_data(struct btree_trans *trans, return 0; } +/* create */ + static int bch2_make_extent_indirect(struct btree_trans *trans, struct btree_iter *extent_iter, - struct bkey_i *orig) + struct bkey_i *orig, + bool reflink_p_may_update_opts_field) { struct bch_fs *c = trans->c; struct btree_iter reflink_iter = { NULL }; @@ -358,6 +505,14 @@ static int bch2_make_extent_indirect(struct btree_trans *trans, if (ret) goto err; + /* + * XXX: we're assuming that 56 bits will be enough for the life of the + * filesystem: we need to implement wraparound, with a cursor in the + * logged ops btree: + */ + if (bkey_ge(reflink_iter.pos, POS(0, REFLINK_P_IDX_MAX - orig->k.size))) + return -ENOSPC; + r_v = bch2_trans_kmalloc(trans, sizeof(__le64) + bkey_bytes(&orig->k)); ret = PTR_ERR_OR_ZERO(r_v); if (ret) @@ -394,7 +549,10 @@ static int bch2_make_extent_indirect(struct btree_trans *trans, memset(&r_p->v, 0, sizeof(r_p->v)); #endif - r_p->v.idx = cpu_to_le64(bkey_start_offset(&r_v->k)); + SET_REFLINK_P_IDX(&r_p->v, bkey_start_offset(&r_v->k)); + + if (reflink_p_may_update_opts_field) + SET_REFLINK_P_MAY_UPDATE_OPTIONS(&r_p->v, true); ret = bch2_trans_update(trans, extent_iter, &r_p->k_i, BTREE_UPDATE_internal_snapshot_node); @@ -409,7 +567,7 @@ static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end) struct bkey_s_c k; int ret; - for_each_btree_key_upto_continue_norestart(*iter, end, 0, k, ret) { + for_each_btree_key_max_continue_norestart(*iter, end, 0, k, ret) { if (bkey_extent_is_unwritten(k)) continue; @@ -426,7 +584,8 @@ s64 bch2_remap_range(struct bch_fs *c, subvol_inum dst_inum, u64 dst_offset, subvol_inum src_inum, u64 src_offset, u64 remap_sectors, - u64 new_i_size, s64 *i_sectors_delta) + u64 new_i_size, s64 *i_sectors_delta, + bool may_change_src_io_path_opts) { struct btree_trans *trans; struct btree_iter dst_iter, src_iter; @@ -439,6 +598,8 @@ s64 bch2_remap_range(struct bch_fs *c, struct bpos src_want; u64 dst_done = 0; u32 dst_snapshot, src_snapshot; + bool reflink_p_may_update_opts_field = + bch2_request_incompat_feature(c, bcachefs_metadata_version_reflink_p_may_update_opts); int ret = 0, ret2 = 0; if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_reflink)) @@ -520,7 +681,8 @@ s64 bch2_remap_range(struct bch_fs *c, src_k = bkey_i_to_s_c(new_src.k); ret = bch2_make_extent_indirect(trans, &src_iter, - new_src.k); + new_src.k, + reflink_p_may_update_opts_field); if (ret) continue; @@ -533,11 +695,15 @@ s64 bch2_remap_range(struct bch_fs *c, struct bkey_i_reflink_p *dst_p = bkey_reflink_p_init(new_dst.k); - u64 offset = le64_to_cpu(src_p.v->idx) + + u64 offset = REFLINK_P_IDX(src_p.v) + (src_want.offset - bkey_start_offset(src_k.k)); - dst_p->v.idx = cpu_to_le64(offset); + SET_REFLINK_P_IDX(&dst_p->v, offset); + + if (reflink_p_may_update_opts_field && + may_change_src_io_path_opts) + SET_REFLINK_P_MAY_UPDATE_OPTIONS(&dst_p->v, true); } else { BUG(); } @@ -547,7 +713,7 @@ s64 bch2_remap_range(struct bch_fs *c, min(src_k.k->p.offset - src_want.offset, dst_end.offset - dst_iter.pos.offset)); - ret = bch2_bkey_set_needs_rebalance(c, new_dst.k, &opts) ?: + ret = bch2_bkey_set_needs_rebalance(c, &opts, new_dst.k) ?: bch2_extent_update(trans, dst_inum, &dst_iter, new_dst.k, &disk_res, new_i_size, i_sectors_delta, @@ -591,3 +757,97 @@ err: return dst_done ?: ret ?: ret2; } + +/* fsck */ + +static int bch2_gc_write_reflink_key(struct btree_trans *trans, + struct btree_iter *iter, + struct bkey_s_c k, + size_t *idx) +{ + struct bch_fs *c = trans->c; + const __le64 *refcount = bkey_refcount_c(k); + struct printbuf buf = PRINTBUF; + struct reflink_gc *r; + int ret = 0; + + if (!refcount) + return 0; + + while ((r = genradix_ptr(&c->reflink_gc_table, *idx)) && + r->offset < k.k->p.offset) + ++*idx; + + if (!r || + r->offset != k.k->p.offset || + r->size != k.k->size) { + bch_err(c, "unexpected inconsistency walking reflink table at gc finish"); + return -EINVAL; + } + + if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), + trans, reflink_v_refcount_wrong, + "reflink key has wrong refcount:\n" + " %s\n" + " should be %u", + (bch2_bkey_val_to_text(&buf, c, k), buf.buf), + r->refcount)) { + struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k); + ret = PTR_ERR_OR_ZERO(new); + if (ret) + goto out; + + if (!r->refcount) + new->k.type = KEY_TYPE_deleted; + else + *bkey_refcount(bkey_i_to_s(new)) = cpu_to_le64(r->refcount); + ret = bch2_trans_update(trans, iter, new, 0); + } +out: +fsck_err: + printbuf_exit(&buf); + return ret; +} + +int bch2_gc_reflink_done(struct bch_fs *c) +{ + size_t idx = 0; + + int ret = bch2_trans_run(c, + for_each_btree_key_commit(trans, iter, + BTREE_ID_reflink, POS_MIN, + BTREE_ITER_prefetch, k, + NULL, NULL, BCH_TRANS_COMMIT_no_enospc, + bch2_gc_write_reflink_key(trans, &iter, k, &idx))); + c->reflink_gc_nr = 0; + return ret; +} + +int bch2_gc_reflink_start(struct bch_fs *c) +{ + c->reflink_gc_nr = 0; + + int ret = bch2_trans_run(c, + for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN, + BTREE_ITER_prefetch, k, ({ + const __le64 *refcount = bkey_refcount_c(k); + + if (!refcount) + continue; + + struct reflink_gc *r = genradix_ptr_alloc(&c->reflink_gc_table, + c->reflink_gc_nr++, GFP_KERNEL); + if (!r) { + ret = -BCH_ERR_ENOMEM_gc_reflink_start; + break; + } + + r->offset = k.k->p.offset; + r->size = k.k->size; + r->refcount = 0; + 0; + }))); + + bch_err_fn(c, ret); + return ret; +} diff --git a/fs/bcachefs/reflink.h b/fs/bcachefs/reflink.h index 51afe11d8ed6..1632780bdf18 100644 --- a/fs/bcachefs/reflink.h +++ b/fs/bcachefs/reflink.h @@ -2,9 +2,8 @@ #ifndef _BCACHEFS_REFLINK_H #define _BCACHEFS_REFLINK_H -enum bch_validate_flags; - -int bch2_reflink_p_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_reflink_p_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); void bch2_reflink_p_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); bool bch2_reflink_p_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c); int bch2_trigger_reflink_p(struct btree_trans *, enum btree_id, unsigned, @@ -19,7 +18,8 @@ int bch2_trigger_reflink_p(struct btree_trans *, enum btree_id, unsigned, .min_val_size = 16, \ }) -int bch2_reflink_v_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_reflink_v_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); void bch2_reflink_v_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); int bch2_trigger_reflink_v(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_s, @@ -34,7 +34,7 @@ int bch2_trigger_reflink_v(struct btree_trans *, enum btree_id, unsigned, }) int bch2_indirect_inline_data_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); void bch2_indirect_inline_data_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); int bch2_trigger_indirect_inline_data(struct btree_trans *, @@ -73,7 +73,15 @@ static inline __le64 *bkey_refcount(struct bkey_s k) } } +struct bkey_s_c bch2_lookup_indirect_extent(struct btree_trans *, struct btree_iter *, + s64 *, struct bkey_s_c_reflink_p, + bool, unsigned); + s64 bch2_remap_range(struct bch_fs *, subvol_inum, u64, - subvol_inum, u64, u64, u64, s64 *); + subvol_inum, u64, u64, u64, s64 *, + bool); + +int bch2_gc_reflink_done(struct bch_fs *); +int bch2_gc_reflink_start(struct bch_fs *); #endif /* _BCACHEFS_REFLINK_H */ diff --git a/fs/bcachefs/reflink_format.h b/fs/bcachefs/reflink_format.h index 6772eebb1fc6..92995e4f898e 100644 --- a/fs/bcachefs/reflink_format.h +++ b/fs/bcachefs/reflink_format.h @@ -4,7 +4,7 @@ struct bch_reflink_p { struct bch_val v; - __le64 idx; + __le64 idx_flags; /* * A reflink pointer might point to an indirect extent which is then * later split (by copygc or rebalance). If we only pointed to part of @@ -17,6 +17,11 @@ struct bch_reflink_p { __le32 back_pad; } __packed __aligned(8); +LE64_BITMASK(REFLINK_P_IDX, struct bch_reflink_p, idx_flags, 0, 56); +LE64_BITMASK(REFLINK_P_ERROR, struct bch_reflink_p, idx_flags, 56, 57); +LE64_BITMASK(REFLINK_P_MAY_UPDATE_OPTIONS, + struct bch_reflink_p, idx_flags, 57, 58); + struct bch_reflink_v { struct bch_val v; __le64 refcount; diff --git a/fs/bcachefs/sb-clean.c b/fs/bcachefs/sb-clean.c index 005275281804..59c8770e4a0e 100644 --- a/fs/bcachefs/sb-clean.c +++ b/fs/bcachefs/sb-clean.c @@ -23,6 +23,10 @@ int bch2_sb_clean_validate_late(struct bch_fs *c, struct bch_sb_field_clean *clean, int write) { + struct bkey_validate_context from = { + .flags = write, + .from = BKEY_VALIDATE_superblock, + }; struct jset_entry *entry; int ret; @@ -40,7 +44,7 @@ int bch2_sb_clean_validate_late(struct bch_fs *c, struct bch_sb_field_clean *cle ret = bch2_journal_entry_validate(c, NULL, entry, le16_to_cpu(c->disk_sb.sb->version), BCH_SB_BIG_ENDIAN(c->disk_sb.sb), - write); + from); if (ret) return ret; } diff --git a/fs/bcachefs/sb-counters_format.h b/fs/bcachefs/sb-counters_format.h index 62ea478215d0..fdcf598f08b1 100644 --- a/fs/bcachefs/sb-counters_format.h +++ b/fs/bcachefs/sb-counters_format.h @@ -2,86 +2,91 @@ #ifndef _BCACHEFS_SB_COUNTERS_FORMAT_H #define _BCACHEFS_SB_COUNTERS_FORMAT_H -#define BCH_PERSISTENT_COUNTERS() \ - x(io_read, 0) \ - x(io_write, 1) \ - x(io_move, 2) \ - x(bucket_invalidate, 3) \ - x(bucket_discard, 4) \ - x(bucket_alloc, 5) \ - x(bucket_alloc_fail, 6) \ - x(btree_cache_scan, 7) \ - x(btree_cache_reap, 8) \ - x(btree_cache_cannibalize, 9) \ - x(btree_cache_cannibalize_lock, 10) \ - x(btree_cache_cannibalize_lock_fail, 11) \ - x(btree_cache_cannibalize_unlock, 12) \ - x(btree_node_write, 13) \ - x(btree_node_read, 14) \ - x(btree_node_compact, 15) \ - x(btree_node_merge, 16) \ - x(btree_node_split, 17) \ - x(btree_node_rewrite, 18) \ - x(btree_node_alloc, 19) \ - x(btree_node_free, 20) \ - x(btree_node_set_root, 21) \ - x(btree_path_relock_fail, 22) \ - x(btree_path_upgrade_fail, 23) \ - x(btree_reserve_get_fail, 24) \ - x(journal_entry_full, 25) \ - x(journal_full, 26) \ - x(journal_reclaim_finish, 27) \ - x(journal_reclaim_start, 28) \ - x(journal_write, 29) \ - x(read_promote, 30) \ - x(read_bounce, 31) \ - x(read_split, 33) \ - x(read_retry, 32) \ - x(read_reuse_race, 34) \ - x(move_extent_read, 35) \ - x(move_extent_write, 36) \ - x(move_extent_finish, 37) \ - x(move_extent_fail, 38) \ - x(move_extent_start_fail, 39) \ - x(copygc, 40) \ - x(copygc_wait, 41) \ - x(gc_gens_end, 42) \ - x(gc_gens_start, 43) \ - x(trans_blocked_journal_reclaim, 44) \ - x(trans_restart_btree_node_reused, 45) \ - x(trans_restart_btree_node_split, 46) \ - x(trans_restart_fault_inject, 47) \ - x(trans_restart_iter_upgrade, 48) \ - x(trans_restart_journal_preres_get, 49) \ - x(trans_restart_journal_reclaim, 50) \ - x(trans_restart_journal_res_get, 51) \ - x(trans_restart_key_cache_key_realloced, 52) \ - x(trans_restart_key_cache_raced, 53) \ - x(trans_restart_mark_replicas, 54) \ - x(trans_restart_mem_realloced, 55) \ - x(trans_restart_memory_allocation_failure, 56) \ - x(trans_restart_relock, 57) \ - x(trans_restart_relock_after_fill, 58) \ - x(trans_restart_relock_key_cache_fill, 59) \ - x(trans_restart_relock_next_node, 60) \ - x(trans_restart_relock_parent_for_fill, 61) \ - x(trans_restart_relock_path, 62) \ - x(trans_restart_relock_path_intent, 63) \ - x(trans_restart_too_many_iters, 64) \ - x(trans_restart_traverse, 65) \ - x(trans_restart_upgrade, 66) \ - x(trans_restart_would_deadlock, 67) \ - x(trans_restart_would_deadlock_write, 68) \ - x(trans_restart_injected, 69) \ - x(trans_restart_key_cache_upgrade, 70) \ - x(trans_traverse_all, 71) \ - x(transaction_commit, 72) \ - x(write_super, 73) \ - x(trans_restart_would_deadlock_recursion_limit, 74) \ - x(trans_restart_write_buffer_flush, 75) \ - x(trans_restart_split_race, 76) \ - x(write_buffer_flush_slowpath, 77) \ - x(write_buffer_flush_sync, 78) +enum counters_flags { + TYPE_COUNTER = BIT(0), /* event counters */ + TYPE_SECTORS = BIT(1), /* amount counters, the unit is sectors */ +}; + +#define BCH_PERSISTENT_COUNTERS() \ + x(io_read, 0, TYPE_SECTORS) \ + x(io_write, 1, TYPE_SECTORS) \ + x(io_move, 2, TYPE_SECTORS) \ + x(bucket_invalidate, 3, TYPE_COUNTER) \ + x(bucket_discard, 4, TYPE_COUNTER) \ + x(bucket_alloc, 5, TYPE_COUNTER) \ + x(bucket_alloc_fail, 6, TYPE_COUNTER) \ + x(btree_cache_scan, 7, TYPE_COUNTER) \ + x(btree_cache_reap, 8, TYPE_COUNTER) \ + x(btree_cache_cannibalize, 9, TYPE_COUNTER) \ + x(btree_cache_cannibalize_lock, 10, TYPE_COUNTER) \ + x(btree_cache_cannibalize_lock_fail, 11, TYPE_COUNTER) \ + x(btree_cache_cannibalize_unlock, 12, TYPE_COUNTER) \ + x(btree_node_write, 13, TYPE_COUNTER) \ + x(btree_node_read, 14, TYPE_COUNTER) \ + x(btree_node_compact, 15, TYPE_COUNTER) \ + x(btree_node_merge, 16, TYPE_COUNTER) \ + x(btree_node_split, 17, TYPE_COUNTER) \ + x(btree_node_rewrite, 18, TYPE_COUNTER) \ + x(btree_node_alloc, 19, TYPE_COUNTER) \ + x(btree_node_free, 20, TYPE_COUNTER) \ + x(btree_node_set_root, 21, TYPE_COUNTER) \ + x(btree_path_relock_fail, 22, TYPE_COUNTER) \ + x(btree_path_upgrade_fail, 23, TYPE_COUNTER) \ + x(btree_reserve_get_fail, 24, TYPE_COUNTER) \ + x(journal_entry_full, 25, TYPE_COUNTER) \ + x(journal_full, 26, TYPE_COUNTER) \ + x(journal_reclaim_finish, 27, TYPE_COUNTER) \ + x(journal_reclaim_start, 28, TYPE_COUNTER) \ + x(journal_write, 29, TYPE_COUNTER) \ + x(read_promote, 30, TYPE_COUNTER) \ + x(read_bounce, 31, TYPE_COUNTER) \ + x(read_split, 33, TYPE_COUNTER) \ + x(read_retry, 32, TYPE_COUNTER) \ + x(read_reuse_race, 34, TYPE_COUNTER) \ + x(move_extent_read, 35, TYPE_SECTORS) \ + x(move_extent_write, 36, TYPE_SECTORS) \ + x(move_extent_finish, 37, TYPE_SECTORS) \ + x(move_extent_fail, 38, TYPE_COUNTER) \ + x(move_extent_start_fail, 39, TYPE_COUNTER) \ + x(copygc, 40, TYPE_COUNTER) \ + x(copygc_wait, 41, TYPE_COUNTER) \ + x(gc_gens_end, 42, TYPE_COUNTER) \ + x(gc_gens_start, 43, TYPE_COUNTER) \ + x(trans_blocked_journal_reclaim, 44, TYPE_COUNTER) \ + x(trans_restart_btree_node_reused, 45, TYPE_COUNTER) \ + x(trans_restart_btree_node_split, 46, TYPE_COUNTER) \ + x(trans_restart_fault_inject, 47, TYPE_COUNTER) \ + x(trans_restart_iter_upgrade, 48, TYPE_COUNTER) \ + x(trans_restart_journal_preres_get, 49, TYPE_COUNTER) \ + x(trans_restart_journal_reclaim, 50, TYPE_COUNTER) \ + x(trans_restart_journal_res_get, 51, TYPE_COUNTER) \ + x(trans_restart_key_cache_key_realloced, 52, TYPE_COUNTER) \ + x(trans_restart_key_cache_raced, 53, TYPE_COUNTER) \ + x(trans_restart_mark_replicas, 54, TYPE_COUNTER) \ + x(trans_restart_mem_realloced, 55, TYPE_COUNTER) \ + x(trans_restart_memory_allocation_failure, 56, TYPE_COUNTER) \ + x(trans_restart_relock, 57, TYPE_COUNTER) \ + x(trans_restart_relock_after_fill, 58, TYPE_COUNTER) \ + x(trans_restart_relock_key_cache_fill, 59, TYPE_COUNTER) \ + x(trans_restart_relock_next_node, 60, TYPE_COUNTER) \ + x(trans_restart_relock_parent_for_fill, 61, TYPE_COUNTER) \ + x(trans_restart_relock_path, 62, TYPE_COUNTER) \ + x(trans_restart_relock_path_intent, 63, TYPE_COUNTER) \ + x(trans_restart_too_many_iters, 64, TYPE_COUNTER) \ + x(trans_restart_traverse, 65, TYPE_COUNTER) \ + x(trans_restart_upgrade, 66, TYPE_COUNTER) \ + x(trans_restart_would_deadlock, 67, TYPE_COUNTER) \ + x(trans_restart_would_deadlock_write, 68, TYPE_COUNTER) \ + x(trans_restart_injected, 69, TYPE_COUNTER) \ + x(trans_restart_key_cache_upgrade, 70, TYPE_COUNTER) \ + x(trans_traverse_all, 71, TYPE_COUNTER) \ + x(transaction_commit, 72, TYPE_COUNTER) \ + x(write_super, 73, TYPE_COUNTER) \ + x(trans_restart_would_deadlock_recursion_limit, 74, TYPE_COUNTER) \ + x(trans_restart_write_buffer_flush, 75, TYPE_COUNTER) \ + x(trans_restart_split_race, 76, TYPE_COUNTER) \ + x(write_buffer_flush_slowpath, 77, TYPE_COUNTER) \ + x(write_buffer_flush_sync, 78, TYPE_COUNTER) enum bch_persistent_counters { #define x(t, n, ...) BCH_COUNTER_##t, diff --git a/fs/bcachefs/sb-downgrade.c b/fs/bcachefs/sb-downgrade.c index 8767c33c2b51..14f6b6a5fb38 100644 --- a/fs/bcachefs/sb-downgrade.c +++ b/fs/bcachefs/sb-downgrade.c @@ -81,7 +81,19 @@ BCH_FSCK_ERR_accounting_mismatch) \ x(inode_has_child_snapshots, \ BIT_ULL(BCH_RECOVERY_PASS_check_inodes), \ - BCH_FSCK_ERR_inode_has_child_snapshots_wrong) + BCH_FSCK_ERR_inode_has_child_snapshots_wrong) \ + x(backpointer_bucket_gen, \ + BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers),\ + BCH_FSCK_ERR_backpointer_to_missing_ptr, \ + BCH_FSCK_ERR_ptr_to_missing_backpointer) \ + x(disk_accounting_big_endian, \ + BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \ + BCH_FSCK_ERR_accounting_mismatch, \ + BCH_FSCK_ERR_accounting_key_replicas_nr_devs_0, \ + BCH_FSCK_ERR_accounting_key_junk_at_end) \ + x(directory_size, \ + BIT_ULL(BCH_RECOVERY_PASS_check_inodes), \ + BCH_FSCK_ERR_directory_size_mismatch) \ #define DOWNGRADE_TABLE() \ x(bucket_stripe_sectors, \ @@ -117,7 +129,19 @@ BCH_FSCK_ERR_bkey_version_in_future) \ x(rebalance_work_acct_fix, \ BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \ - BCH_FSCK_ERR_accounting_mismatch) + BCH_FSCK_ERR_accounting_mismatch, \ + BCH_FSCK_ERR_accounting_key_replicas_nr_devs_0, \ + BCH_FSCK_ERR_accounting_key_junk_at_end) \ + x(backpointer_bucket_gen, \ + BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers),\ + BCH_FSCK_ERR_backpointer_bucket_offset_wrong, \ + BCH_FSCK_ERR_backpointer_to_missing_ptr, \ + BCH_FSCK_ERR_ptr_to_missing_backpointer) \ + x(disk_accounting_big_endian, \ + BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \ + BCH_FSCK_ERR_accounting_mismatch, \ + BCH_FSCK_ERR_accounting_key_replicas_nr_devs_0, \ + BCH_FSCK_ERR_accounting_key_junk_at_end) struct upgrade_downgrade_entry { u64 recovery_passes; diff --git a/fs/bcachefs/sb-errors_format.h b/fs/bcachefs/sb-errors_format.h index 9feb6739f77a..b86ec013d7d7 100644 --- a/fs/bcachefs/sb-errors_format.h +++ b/fs/bcachefs/sb-errors_format.h @@ -5,9 +5,8 @@ enum bch_fsck_flags { FSCK_CAN_FIX = 1 << 0, FSCK_CAN_IGNORE = 1 << 1, - FSCK_NEED_FSCK = 1 << 2, - FSCK_NO_RATELIMIT = 1 << 3, - FSCK_AUTOFIX = 1 << 4, + FSCK_NO_RATELIMIT = 1 << 2, + FSCK_AUTOFIX = 1 << 3, }; #define BCH_SB_ERRS() \ @@ -58,8 +57,8 @@ enum bch_fsck_flags { x(bset_wrong_sector_offset, 44, 0) \ x(bset_empty, 45, 0) \ x(bset_bad_seq, 46, 0) \ - x(bset_blacklisted_journal_seq, 47, 0) \ - x(first_bset_blacklisted_journal_seq, 48, 0) \ + x(bset_blacklisted_journal_seq, 47, FSCK_AUTOFIX) \ + x(first_bset_blacklisted_journal_seq, 48, FSCK_AUTOFIX) \ x(btree_node_bad_btree, 49, 0) \ x(btree_node_bad_level, 50, 0) \ x(btree_node_bad_min_key, 51, 0) \ @@ -68,17 +67,17 @@ enum bch_fsck_flags { x(btree_node_bkey_past_bset_end, 54, 0) \ x(btree_node_bkey_bad_format, 55, 0) \ x(btree_node_bad_bkey, 56, 0) \ - x(btree_node_bkey_out_of_order, 57, 0) \ - x(btree_root_bkey_invalid, 58, 0) \ - x(btree_root_read_error, 59, 0) \ + x(btree_node_bkey_out_of_order, 57, FSCK_AUTOFIX) \ + x(btree_root_bkey_invalid, 58, FSCK_AUTOFIX) \ + x(btree_root_read_error, 59, FSCK_AUTOFIX) \ x(btree_root_bad_min_key, 60, 0) \ x(btree_root_bad_max_key, 61, 0) \ - x(btree_node_read_error, 62, 0) \ - x(btree_node_topology_bad_min_key, 63, 0) \ - x(btree_node_topology_bad_max_key, 64, 0) \ - x(btree_node_topology_overwritten_by_prev_node, 65, 0) \ - x(btree_node_topology_overwritten_by_next_node, 66, 0) \ - x(btree_node_topology_interior_node_empty, 67, 0) \ + x(btree_node_read_error, 62, FSCK_AUTOFIX) \ + x(btree_node_topology_bad_min_key, 63, FSCK_AUTOFIX) \ + x(btree_node_topology_bad_max_key, 64, FSCK_AUTOFIX) \ + x(btree_node_topology_overwritten_by_prev_node, 65, FSCK_AUTOFIX) \ + x(btree_node_topology_overwritten_by_next_node, 66, FSCK_AUTOFIX) \ + x(btree_node_topology_interior_node_empty, 67, FSCK_AUTOFIX) \ x(fs_usage_hidden_wrong, 68, FSCK_AUTOFIX) \ x(fs_usage_btree_wrong, 69, FSCK_AUTOFIX) \ x(fs_usage_data_wrong, 70, FSCK_AUTOFIX) \ @@ -123,11 +122,12 @@ enum bch_fsck_flags { x(alloc_key_cached_sectors_wrong, 109, FSCK_AUTOFIX) \ x(alloc_key_stripe_wrong, 110, FSCK_AUTOFIX) \ x(alloc_key_stripe_redundancy_wrong, 111, FSCK_AUTOFIX) \ + x(alloc_key_journal_seq_in_future, 298, FSCK_AUTOFIX) \ x(bucket_sector_count_overflow, 112, 0) \ x(bucket_metadata_type_mismatch, 113, 0) \ - x(need_discard_key_wrong, 114, 0) \ - x(freespace_key_wrong, 115, 0) \ - x(freespace_hole_missing, 116, 0) \ + x(need_discard_key_wrong, 114, FSCK_AUTOFIX) \ + x(freespace_key_wrong, 115, FSCK_AUTOFIX) \ + x(freespace_hole_missing, 116, FSCK_AUTOFIX) \ x(bucket_gens_val_size_bad, 117, 0) \ x(bucket_gens_key_wrong, 118, FSCK_AUTOFIX) \ x(bucket_gens_hole_wrong, 119, FSCK_AUTOFIX) \ @@ -139,9 +139,10 @@ enum bch_fsck_flags { x(discarding_bucket_not_in_need_discard_btree, 291, 0) \ x(backpointer_bucket_offset_wrong, 125, 0) \ x(backpointer_level_bad, 294, 0) \ - x(backpointer_to_missing_device, 126, 0) \ - x(backpointer_to_missing_alloc, 127, 0) \ - x(backpointer_to_missing_ptr, 128, 0) \ + x(backpointer_dev_bad, 297, 0) \ + x(backpointer_to_missing_device, 126, FSCK_AUTOFIX) \ + x(backpointer_to_missing_alloc, 127, FSCK_AUTOFIX) \ + x(backpointer_to_missing_ptr, 128, FSCK_AUTOFIX) \ x(lru_entry_at_time_0, 129, FSCK_AUTOFIX) \ x(lru_entry_to_invalid_bucket, 130, FSCK_AUTOFIX) \ x(lru_entry_bad, 131, FSCK_AUTOFIX) \ @@ -167,20 +168,21 @@ enum bch_fsck_flags { x(ptr_to_incorrect_stripe, 151, 0) \ x(ptr_gen_newer_than_bucket_gen, 152, 0) \ x(ptr_too_stale, 153, 0) \ - x(stale_dirty_ptr, 154, 0) \ + x(stale_dirty_ptr, 154, FSCK_AUTOFIX) \ x(ptr_bucket_data_type_mismatch, 155, 0) \ x(ptr_cached_and_erasure_coded, 156, 0) \ x(ptr_crc_uncompressed_size_too_small, 157, 0) \ + x(ptr_crc_uncompressed_size_too_big, 161, 0) \ + x(ptr_crc_uncompressed_size_mismatch, 300, 0) \ x(ptr_crc_csum_type_unknown, 158, 0) \ x(ptr_crc_compression_type_unknown, 159, 0) \ x(ptr_crc_redundant, 160, 0) \ - x(ptr_crc_uncompressed_size_too_big, 161, 0) \ x(ptr_crc_nonce_mismatch, 162, 0) \ x(ptr_stripe_redundant, 163, 0) \ x(reservation_key_nr_replicas_invalid, 164, 0) \ - x(reflink_v_refcount_wrong, 165, 0) \ + x(reflink_v_refcount_wrong, 165, FSCK_AUTOFIX) \ x(reflink_v_pos_bad, 292, 0) \ - x(reflink_p_to_missing_reflink_v, 166, 0) \ + x(reflink_p_to_missing_reflink_v, 166, FSCK_AUTOFIX) \ x(reflink_refcount_underflow, 293, 0) \ x(stripe_pos_bad, 167, 0) \ x(stripe_val_size_bad, 168, 0) \ @@ -209,6 +211,7 @@ enum bch_fsck_flags { x(bkey_in_missing_snapshot, 190, 0) \ x(inode_pos_inode_nonzero, 191, 0) \ x(inode_pos_blockdev_range, 192, 0) \ + x(inode_alloc_cursor_inode_bad, 301, 0) \ x(inode_unpack_error, 193, 0) \ x(inode_str_hash_invalid, 194, 0) \ x(inode_v3_fields_start_bad, 195, 0) \ @@ -232,6 +235,7 @@ enum bch_fsck_flags { x(inode_wrong_nlink, 209, FSCK_AUTOFIX) \ x(inode_has_child_snapshots_wrong, 287, 0) \ x(inode_unreachable, 210, FSCK_AUTOFIX) \ + x(inode_journal_seq_in_future, 299, FSCK_AUTOFIX) \ x(deleted_inode_but_clean, 211, FSCK_AUTOFIX) \ x(deleted_inode_missing, 212, FSCK_AUTOFIX) \ x(deleted_inode_is_dir, 213, FSCK_AUTOFIX) \ @@ -252,6 +256,7 @@ enum bch_fsck_flags { x(dirent_in_missing_dir_inode, 227, 0) \ x(dirent_in_non_dir_inode, 228, 0) \ x(dirent_to_missing_inode, 229, 0) \ + x(dirent_to_overwritten_inode, 302, 0) \ x(dirent_to_missing_subvol, 230, 0) \ x(dirent_to_itself, 231, 0) \ x(quota_type_invalid, 232, 0) \ @@ -288,7 +293,7 @@ enum bch_fsck_flags { x(btree_root_unreadable_and_scan_found_nothing, 263, 0) \ x(snapshot_node_missing, 264, 0) \ x(dup_backpointer_to_bad_csum_extent, 265, 0) \ - x(btree_bitmap_not_marked, 266, 0) \ + x(btree_bitmap_not_marked, 266, FSCK_AUTOFIX) \ x(sb_clean_entry_overrun, 267, 0) \ x(btree_ptr_v2_written_0, 268, 0) \ x(subvol_snapshot_bad, 269, 0) \ @@ -306,7 +311,10 @@ enum bch_fsck_flags { x(accounting_key_replicas_devs_unsorted, 280, FSCK_AUTOFIX) \ x(accounting_key_version_0, 282, FSCK_AUTOFIX) \ x(logged_op_but_clean, 283, FSCK_AUTOFIX) \ - x(MAX, 295, 0) + x(compression_opt_not_marked_in_sb, 295, FSCK_AUTOFIX) \ + x(compression_type_not_marked_in_sb, 296, FSCK_AUTOFIX) \ + x(directory_size_mismatch, 303, FSCK_AUTOFIX) \ + x(MAX, 304, 0) enum bch_sb_error_id { #define x(t, n, ...) BCH_FSCK_ERR_##t = n, diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c index 617d07e53b20..7e7c66a1e1a6 100644 --- a/fs/bcachefs/six.c +++ b/fs/bcachefs/six.c @@ -491,8 +491,12 @@ static int six_lock_slowpath(struct six_lock *lock, enum six_lock_type type, list_del(&wait->list); raw_spin_unlock(&lock->wait_lock); - if (unlikely(acquired)) + if (unlikely(acquired)) { do_six_unlock_type(lock, type); + } else if (type == SIX_LOCK_write) { + six_clear_bitmask(lock, SIX_LOCK_HELD_write); + six_lock_wakeup(lock, atomic_read(&lock->state), SIX_LOCK_read); + } break; } @@ -501,10 +505,6 @@ static int six_lock_slowpath(struct six_lock *lock, enum six_lock_type type, __set_current_state(TASK_RUNNING); out: - if (ret && type == SIX_LOCK_write) { - six_clear_bitmask(lock, SIX_LOCK_HELD_write); - six_lock_wakeup(lock, atomic_read(&lock->state), SIX_LOCK_read); - } trace_contention_end(lock, 0); return ret; @@ -616,8 +616,6 @@ void six_unlock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long if (type != SIX_LOCK_write) six_release(&lock->dep_map, ip); - else - lock->seq++; if (type == SIX_LOCK_intent && lock->intent_lock_recurse) { @@ -625,6 +623,15 @@ void six_unlock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long return; } + if (type == SIX_LOCK_write && + lock->write_lock_recurse) { + --lock->write_lock_recurse; + return; + } + + if (type == SIX_LOCK_write) + lock->seq++; + do_six_unlock_type(lock, type); } EXPORT_SYMBOL_GPL(six_unlock_ip); @@ -735,13 +742,13 @@ void six_lock_increment(struct six_lock *lock, enum six_lock_type type) atomic_add(l[type].lock_val, &lock->state); } break; + case SIX_LOCK_write: + lock->write_lock_recurse++; + fallthrough; case SIX_LOCK_intent: EBUG_ON(!(atomic_read(&lock->state) & SIX_LOCK_HELD_intent)); lock->intent_lock_recurse++; break; - case SIX_LOCK_write: - BUG(); - break; } } EXPORT_SYMBOL_GPL(six_lock_increment); diff --git a/fs/bcachefs/six.h b/fs/bcachefs/six.h index 68d46fd7f391..c142e06b7a3a 100644 --- a/fs/bcachefs/six.h +++ b/fs/bcachefs/six.h @@ -137,6 +137,7 @@ struct six_lock { atomic_t state; u32 seq; unsigned intent_lock_recurse; + unsigned write_lock_recurse; struct task_struct *owner; unsigned __percpu *readers; raw_spinlock_t wait_lock; diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c index ae57638506c3..c54091a28909 100644 --- a/fs/bcachefs/snapshot.c +++ b/fs/bcachefs/snapshot.c @@ -2,6 +2,7 @@ #include "bcachefs.h" #include "bkey_buf.h" +#include "btree_cache.h" #include "btree_key_cache.h" #include "btree_update.h" #include "buckets.h" @@ -32,7 +33,7 @@ void bch2_snapshot_tree_to_text(struct printbuf *out, struct bch_fs *c, } int bch2_snapshot_tree_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { int ret = 0; @@ -225,7 +226,7 @@ void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c, } int bch2_snapshot_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_snapshot s; u32 i, id; @@ -279,23 +280,6 @@ fsck_err: return ret; } -static void __set_is_ancestor_bitmap(struct bch_fs *c, u32 id) -{ - struct snapshot_t *t = snapshot_t_mut(c, id); - u32 parent = id; - - while ((parent = bch2_snapshot_parent_early(c, parent)) && - parent - id - 1 < IS_ANCESTOR_BITMAP) - __set_bit(parent - id - 1, t->is_ancestor); -} - -static void set_is_ancestor_bitmap(struct bch_fs *c, u32 id) -{ - mutex_lock(&c->snapshot_table_lock); - __set_is_ancestor_bitmap(c, id); - mutex_unlock(&c->snapshot_table_lock); -} - static int __bch2_mark_snapshot(struct btree_trans *trans, enum btree_id btree, unsigned level, struct bkey_s_c old, struct bkey_s_c new, @@ -317,6 +301,7 @@ static int __bch2_mark_snapshot(struct btree_trans *trans, if (new.k->type == KEY_TYPE_snapshot) { struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new); + t->live = true; t->parent = le32_to_cpu(s.v->parent); t->children[0] = le32_to_cpu(s.v->children[0]); t->children[1] = le32_to_cpu(s.v->children[1]); @@ -335,7 +320,11 @@ static int __bch2_mark_snapshot(struct btree_trans *trans, t->skip[2] = 0; } - __set_is_ancestor_bitmap(c, id); + u32 parent = id; + + while ((parent = bch2_snapshot_parent_early(c, parent)) && + parent - id - 1 < IS_ANCESTOR_BITMAP) + __set_bit(parent - id - 1, t->is_ancestor); if (BCH_SNAPSHOT_DELETED(s.v)) { set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags); @@ -365,70 +354,6 @@ int bch2_snapshot_lookup(struct btree_trans *trans, u32 id, BTREE_ITER_with_updates, snapshot, s); } -static int bch2_snapshot_live(struct btree_trans *trans, u32 id) -{ - struct bch_snapshot v; - int ret; - - if (!id) - return 0; - - ret = bch2_snapshot_lookup(trans, id, &v); - if (bch2_err_matches(ret, ENOENT)) - bch_err(trans->c, "snapshot node %u not found", id); - if (ret) - return ret; - - return !BCH_SNAPSHOT_DELETED(&v); -} - -/* - * If @k is a snapshot with just one live child, it's part of a linear chain, - * which we consider to be an equivalence class: and then after snapshot - * deletion cleanup, there should only be a single key at a given position in - * this equivalence class. - * - * This sets the equivalence class of @k to be the child's equivalence class, if - * it's part of such a linear chain: this correctly sets equivalence classes on - * startup if we run leaf to root (i.e. in natural key order). - */ -static int bch2_snapshot_set_equiv(struct btree_trans *trans, struct bkey_s_c k) -{ - struct bch_fs *c = trans->c; - unsigned i, nr_live = 0, live_idx = 0; - struct bkey_s_c_snapshot snap; - u32 id = k.k->p.offset, child[2]; - - if (k.k->type != KEY_TYPE_snapshot) - return 0; - - snap = bkey_s_c_to_snapshot(k); - - child[0] = le32_to_cpu(snap.v->children[0]); - child[1] = le32_to_cpu(snap.v->children[1]); - - for (i = 0; i < 2; i++) { - int ret = bch2_snapshot_live(trans, child[i]); - - if (ret < 0) - return ret; - - if (ret) - live_idx = i; - nr_live += ret; - } - - mutex_lock(&c->snapshot_table_lock); - - snapshot_t_mut(c, id)->equiv = nr_live == 1 - ? snapshot_t_mut(c, child[live_idx])->equiv - : id; - - mutex_unlock(&c->snapshot_table_lock); - - return 0; -} - /* fsck: */ static u32 bch2_snapshot_child(struct bch_fs *c, u32 id, unsigned child) @@ -506,7 +431,6 @@ static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans, break; } } - bch2_trans_iter_exit(trans, &iter); if (!ret && !found) { @@ -536,6 +460,7 @@ static int check_snapshot_tree(struct btree_trans *trans, struct bch_snapshot s; struct bch_subvolume subvol; struct printbuf buf = PRINTBUF; + struct btree_iter snapshot_iter = {}; u32 root_id; int ret; @@ -545,22 +470,35 @@ static int check_snapshot_tree(struct btree_trans *trans, st = bkey_s_c_to_snapshot_tree(k); root_id = le32_to_cpu(st.v->root_snapshot); - ret = bch2_snapshot_lookup(trans, root_id, &s); + struct bkey_s_c_snapshot snapshot_k = + bch2_bkey_get_iter_typed(trans, &snapshot_iter, BTREE_ID_snapshots, + POS(0, root_id), 0, snapshot); + ret = bkey_err(snapshot_k); if (ret && !bch2_err_matches(ret, ENOENT)) goto err; + if (!ret) + bkey_val_copy(&s, snapshot_k); + if (fsck_err_on(ret || root_id != bch2_snapshot_root(c, root_id) || st.k->p.offset != le32_to_cpu(s.tree), trans, snapshot_tree_to_missing_snapshot, "snapshot tree points to missing/incorrect snapshot:\n %s", - (bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) { + (bch2_bkey_val_to_text(&buf, c, st.s_c), + prt_newline(&buf), + ret + ? prt_printf(&buf, "(%s)", bch2_err_str(ret)) + : bch2_bkey_val_to_text(&buf, c, snapshot_k.s_c), + buf.buf))) { ret = bch2_btree_delete_at(trans, iter, 0); goto err; } - ret = bch2_subvolume_get(trans, le32_to_cpu(st.v->master_subvol), - false, 0, &subvol); + if (!st.v->master_subvol) + goto out; + + ret = bch2_subvolume_get(trans, le32_to_cpu(st.v->master_subvol), false, &subvol); if (ret && !bch2_err_matches(ret, ENOENT)) goto err; @@ -603,8 +541,10 @@ static int check_snapshot_tree(struct btree_trans *trans, u->v.master_subvol = cpu_to_le32(subvol_id); st = snapshot_tree_i_to_s_c(u); } +out: err: fsck_err: + bch2_trans_iter_exit(trans, &snapshot_iter); printbuf_exit(&buf); return ret; } @@ -799,7 +739,7 @@ static int check_snapshot(struct btree_trans *trans, if (should_have_subvol) { id = le32_to_cpu(s.subvol); - ret = bch2_subvolume_get(trans, id, 0, false, &subvol); + ret = bch2_subvolume_get(trans, id, false, &subvol); if (bch2_err_matches(ret, ENOENT)) bch_err(c, "snapshot points to nonexistent subvolume:\n %s", (bch2_bkey_val_to_text(&buf, c, k), buf.buf)); @@ -902,7 +842,7 @@ static int check_snapshot_exists(struct btree_trans *trans, u32 id) { struct bch_fs *c = trans->c; - if (bch2_snapshot_equiv(c, id)) + if (bch2_snapshot_exists(c, id)) return 0; /* Do we need to reconstruct the snapshot_tree entry as well? */ @@ -951,8 +891,7 @@ static int check_snapshot_exists(struct btree_trans *trans, u32 id) return bch2_btree_insert_trans(trans, BTREE_ID_snapshots, &snapshot->k_i, 0) ?: bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, - bkey_s_c_null, bkey_i_to_s(&snapshot->k_i), 0) ?: - bch2_snapshot_set_equiv(trans, bkey_i_to_s_c(&snapshot->k_i)); + bkey_s_c_null, bkey_i_to_s(&snapshot->k_i), 0); } /* Figure out which snapshot nodes belong in the same tree: */ @@ -1050,7 +989,7 @@ int bch2_reconstruct_snapshots(struct bch_fs *c) snapshot_id_list_to_text(&buf, t); darray_for_each(*t, id) { - if (fsck_err_on(!bch2_snapshot_equiv(c, *id), + if (fsck_err_on(!bch2_snapshot_exists(c, *id), trans, snapshot_node_missing, "snapshot node %u from tree %s missing, recreate?", *id, buf.buf)) { if (t->nr > 1) { @@ -1083,10 +1022,12 @@ int bch2_check_key_has_snapshot(struct btree_trans *trans, struct printbuf buf = PRINTBUF; int ret = 0; - if (fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot), + if (fsck_err_on(!bch2_snapshot_exists(c, k.k->p.snapshot), trans, bkey_in_missing_snapshot, "key in missing snapshot %s, delete?", - (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) + (bch2_btree_id_to_text(&buf, iter->btree_id), + prt_char(&buf, ' '), + bch2_bkey_val_to_text(&buf, c, k), buf.buf))) ret = bch2_btree_delete_at(trans, iter, BTREE_UPDATE_internal_snapshot_node) ?: 1; fsck_err: @@ -1100,13 +1041,11 @@ fsck_err: int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id) { struct btree_iter iter; - struct bkey_i_snapshot *s; - int ret = 0; - - s = bch2_bkey_get_mut_typed(trans, &iter, + struct bkey_i_snapshot *s = + bch2_bkey_get_mut_typed(trans, &iter, BTREE_ID_snapshots, POS(0, id), 0, snapshot); - ret = PTR_ERR_OR_ZERO(s); + int ret = PTR_ERR_OR_ZERO(s); if (unlikely(ret)) { bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c, "missing snapshot %u", id); @@ -1294,10 +1233,6 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree, goto err; new_snapids[i] = iter.pos.offset; - - mutex_lock(&c->snapshot_table_lock); - snapshot_t_mut(c, new_snapids[i])->equiv = new_snapids[i]; - mutex_unlock(&c->snapshot_table_lock); } err: bch2_trans_iter_exit(trans, &iter); @@ -1403,129 +1338,153 @@ int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent, * that key to snapshot leaf nodes, where we can mutate it */ -static int delete_dead_snapshots_process_key(struct btree_trans *trans, - struct btree_iter *iter, - struct bkey_s_c k, - snapshot_id_list *deleted, - snapshot_id_list *equiv_seen, - struct bpos *last_pos) +struct snapshot_interior_delete { + u32 id; + u32 live_child; +}; +typedef DARRAY(struct snapshot_interior_delete) interior_delete_list; + +static inline u32 interior_delete_has_id(interior_delete_list *l, u32 id) { - int ret = bch2_check_key_has_snapshot(trans, iter, k); - if (ret) - return ret < 0 ? ret : 0; + darray_for_each(*l, i) + if (i->id == id) + return i->live_child; + return 0; +} - struct bch_fs *c = trans->c; - u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot); - if (!equiv) /* key for invalid snapshot node, but we chose not to delete */ +static unsigned __live_child(struct snapshot_table *t, u32 id, + snapshot_id_list *delete_leaves, + interior_delete_list *delete_interior) +{ + struct snapshot_t *s = __snapshot_t(t, id); + if (!s) return 0; - if (!bkey_eq(k.k->p, *last_pos)) - equiv_seen->nr = 0; + for (unsigned i = 0; i < ARRAY_SIZE(s->children); i++) + if (s->children[i] && + !snapshot_list_has_id(delete_leaves, s->children[i]) && + !interior_delete_has_id(delete_interior, s->children[i])) + return s->children[i]; - if (snapshot_list_has_id(deleted, k.k->p.snapshot)) - return bch2_btree_delete_at(trans, iter, - BTREE_UPDATE_internal_snapshot_node); + for (unsigned i = 0; i < ARRAY_SIZE(s->children); i++) { + u32 live_child = s->children[i] + ? __live_child(t, s->children[i], delete_leaves, delete_interior) + : 0; + if (live_child) + return live_child; + } - if (!bpos_eq(*last_pos, k.k->p) && - snapshot_list_has_id(equiv_seen, equiv)) - return bch2_btree_delete_at(trans, iter, - BTREE_UPDATE_internal_snapshot_node); + return 0; +} - *last_pos = k.k->p; +static unsigned live_child(struct bch_fs *c, u32 id, + snapshot_id_list *delete_leaves, + interior_delete_list *delete_interior) +{ + rcu_read_lock(); + u32 ret = __live_child(rcu_dereference(c->snapshots), id, + delete_leaves, delete_interior); + rcu_read_unlock(); + return ret; +} - ret = snapshot_list_add_nodup(c, equiv_seen, equiv); - if (ret) - return ret; +static int delete_dead_snapshots_process_key(struct btree_trans *trans, + struct btree_iter *iter, + struct bkey_s_c k, + snapshot_id_list *delete_leaves, + interior_delete_list *delete_interior) +{ + if (snapshot_list_has_id(delete_leaves, k.k->p.snapshot)) + return bch2_btree_delete_at(trans, iter, + BTREE_UPDATE_internal_snapshot_node); - /* - * When we have a linear chain of snapshot nodes, we consider - * those to form an equivalence class: we're going to collapse - * them all down to a single node, and keep the leaf-most node - - * which has the same id as the equivalence class id. - * - * If there are multiple keys in different snapshots at the same - * position, we're only going to keep the one in the newest - * snapshot (we delete the others above) - the rest have been - * overwritten and are redundant, and for the key we're going to keep we - * need to move it to the equivalance class ID if it's not there - * already. - */ - if (equiv != k.k->p.snapshot) { + u32 live_child = interior_delete_has_id(delete_interior, k.k->p.snapshot); + if (live_child) { struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k); int ret = PTR_ERR_OR_ZERO(new); if (ret) return ret; - new->k.p.snapshot = equiv; - - struct btree_iter new_iter; - bch2_trans_iter_init(trans, &new_iter, iter->btree_id, new->k.p, - BTREE_ITER_all_snapshots| - BTREE_ITER_cached| - BTREE_ITER_intent); + new->k.p.snapshot = live_child; - ret = bch2_btree_iter_traverse(&new_iter) ?: - bch2_trans_update(trans, &new_iter, new, - BTREE_UPDATE_internal_snapshot_node) ?: - bch2_btree_delete_at(trans, iter, - BTREE_UPDATE_internal_snapshot_node); - bch2_trans_iter_exit(trans, &new_iter); + struct btree_iter dst_iter; + struct bkey_s_c dst_k = bch2_bkey_get_iter(trans, &dst_iter, + iter->btree_id, new->k.p, + BTREE_ITER_all_snapshots| + BTREE_ITER_intent); + ret = bkey_err(dst_k); if (ret) return ret; + + ret = (bkey_deleted(dst_k.k) + ? bch2_trans_update(trans, &dst_iter, new, + BTREE_UPDATE_internal_snapshot_node) + : 0) ?: + bch2_btree_delete_at(trans, iter, + BTREE_UPDATE_internal_snapshot_node); + bch2_trans_iter_exit(trans, &dst_iter); + return ret; } return 0; } -static int bch2_snapshot_needs_delete(struct btree_trans *trans, struct bkey_s_c k) +/* + * For a given snapshot, if it doesn't have a subvolume that points to it, and + * it doesn't have child snapshot nodes - it's now redundant and we can mark it + * as deleted. + */ +static int check_should_delete_snapshot(struct btree_trans *trans, struct bkey_s_c k, + snapshot_id_list *delete_leaves, + interior_delete_list *delete_interior) { - struct bkey_s_c_snapshot snap; - u32 children[2]; - int ret; - if (k.k->type != KEY_TYPE_snapshot) return 0; - snap = bkey_s_c_to_snapshot(k); - if (BCH_SNAPSHOT_DELETED(snap.v) || - BCH_SNAPSHOT_SUBVOL(snap.v)) + struct bch_fs *c = trans->c; + struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k); + unsigned live_children = 0; + + if (BCH_SNAPSHOT_SUBVOL(s.v)) return 0; - children[0] = le32_to_cpu(snap.v->children[0]); - children[1] = le32_to_cpu(snap.v->children[1]); + for (unsigned i = 0; i < 2; i++) { + u32 child = le32_to_cpu(s.v->children[i]); - ret = bch2_snapshot_live(trans, children[0]) ?: - bch2_snapshot_live(trans, children[1]); - if (ret < 0) - return ret; - return !ret; -} + live_children += child && + !snapshot_list_has_id(delete_leaves, child); + } -/* - * For a given snapshot, if it doesn't have a subvolume that points to it, and - * it doesn't have child snapshot nodes - it's now redundant and we can mark it - * as deleted. - */ -static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct bkey_s_c k) -{ - int ret = bch2_snapshot_needs_delete(trans, k); + if (live_children == 0) { + return snapshot_list_add(c, delete_leaves, s.k->p.offset); + } else if (live_children == 1) { + struct snapshot_interior_delete d = { + .id = s.k->p.offset, + .live_child = live_child(c, s.k->p.offset, delete_leaves, delete_interior), + }; + + if (!d.live_child) { + bch_err(c, "error finding live child of snapshot %u", d.id); + return -EINVAL; + } - return ret <= 0 - ? ret - : bch2_snapshot_node_set_deleted(trans, k.k->p.offset); + return darray_push(delete_interior, d); + } else { + return 0; + } } static inline u32 bch2_snapshot_nth_parent_skip(struct bch_fs *c, u32 id, u32 n, - snapshot_id_list *skip) + interior_delete_list *skip) { rcu_read_lock(); - while (snapshot_list_has_id(skip, id)) + while (interior_delete_has_id(skip, id)) id = __bch2_snapshot_parent(c, id); while (n--) { do { id = __bch2_snapshot_parent(c, id); - } while (snapshot_list_has_id(skip, id)); + } while (interior_delete_has_id(skip, id)); } rcu_read_unlock(); @@ -1534,7 +1493,7 @@ static inline u32 bch2_snapshot_nth_parent_skip(struct bch_fs *c, u32 id, u32 n, static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k, - snapshot_id_list *deleted) + interior_delete_list *deleted) { struct bch_fs *c = trans->c; u32 nr_deleted_ancestors = 0; @@ -1544,7 +1503,7 @@ static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans, if (k.k->type != KEY_TYPE_snapshot) return 0; - if (snapshot_list_has_id(deleted, k.k->p.offset)) + if (interior_delete_has_id(deleted, k.k->p.offset)) return 0; s = bch2_bkey_make_mut_noupdate_typed(trans, k, snapshot); @@ -1553,7 +1512,7 @@ static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans, return ret; darray_for_each(*deleted, i) - nr_deleted_ancestors += bch2_snapshot_is_ancestor(c, s->k.p.offset, *i); + nr_deleted_ancestors += bch2_snapshot_is_ancestor(c, s->k.p.offset, i->id); if (!nr_deleted_ancestors) return 0; @@ -1571,7 +1530,7 @@ static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans, for (unsigned j = 0; j < ARRAY_SIZE(s->v.skip); j++) { u32 id = le32_to_cpu(s->v.skip[j]); - if (snapshot_list_has_id(deleted, id)) { + if (interior_delete_has_id(deleted, id)) { id = bch2_snapshot_nth_parent_skip(c, parent, depth > 1 @@ -1590,51 +1549,45 @@ static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans, int bch2_delete_dead_snapshots(struct bch_fs *c) { - struct btree_trans *trans; - snapshot_id_list deleted = { 0 }; - snapshot_id_list deleted_interior = { 0 }; - int ret = 0; - if (!test_and_clear_bit(BCH_FS_need_delete_dead_snapshots, &c->flags)) return 0; - trans = bch2_trans_get(c); + struct btree_trans *trans = bch2_trans_get(c); + snapshot_id_list delete_leaves = {}; + interior_delete_list delete_interior = {}; + int ret = 0; /* * For every snapshot node: If we have no live children and it's not * pointed to by a subvolume, delete it: */ - ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots, - POS_MIN, 0, k, - NULL, NULL, 0, - bch2_delete_redundant_snapshot(trans, k)); - bch_err_msg(c, ret, "deleting redundant snapshots"); + ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots, POS_MIN, 0, k, + check_should_delete_snapshot(trans, k, &delete_leaves, &delete_interior)); + if (!bch2_err_matches(ret, EROFS)) + bch_err_msg(c, ret, "walking snapshots"); if (ret) goto err; - ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots, - POS_MIN, 0, k, - bch2_snapshot_set_equiv(trans, k)); - bch_err_msg(c, ret, "in bch2_snapshots_set_equiv"); - if (ret) + if (!delete_leaves.nr && !delete_interior.nr) goto err; - ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots, - POS_MIN, 0, k, ({ - if (k.k->type != KEY_TYPE_snapshot) - continue; + { + struct printbuf buf = PRINTBUF; + prt_printf(&buf, "deleting leaves"); + darray_for_each(delete_leaves, i) + prt_printf(&buf, " %u", *i); - BCH_SNAPSHOT_DELETED(bkey_s_c_to_snapshot(k).v) - ? snapshot_list_add(c, &deleted, k.k->p.offset) - : 0; - })); - bch_err_msg(c, ret, "walking snapshots"); - if (ret) - goto err; + prt_printf(&buf, " interior"); + darray_for_each(delete_interior, i) + prt_printf(&buf, " %u->%u", i->id, i->live_child); + + ret = commit_do(trans, NULL, NULL, 0, bch2_trans_log_msg(trans, &buf)); + printbuf_exit(&buf); + if (ret) + goto err; + } for (unsigned btree = 0; btree < BTREE_ID_NR; btree++) { - struct bpos last_pos = POS_MIN; - snapshot_id_list equiv_seen = { 0 }; struct disk_reservation res = { 0 }; if (!btree_type_has_snapshots(btree)) @@ -1644,33 +1597,26 @@ int bch2_delete_dead_snapshots(struct bch_fs *c) btree, POS_MIN, BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, &res, NULL, BCH_TRANS_COMMIT_no_enospc, - delete_dead_snapshots_process_key(trans, &iter, k, &deleted, - &equiv_seen, &last_pos)); + delete_dead_snapshots_process_key(trans, &iter, k, + &delete_leaves, + &delete_interior)); bch2_disk_reservation_put(c, &res); - darray_exit(&equiv_seen); - bch_err_msg(c, ret, "deleting keys from dying snapshots"); + if (!bch2_err_matches(ret, EROFS)) + bch_err_msg(c, ret, "deleting keys from dying snapshots"); if (ret) goto err; } - bch2_trans_unlock(trans); - down_write(&c->snapshot_create_lock); - - ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots, - POS_MIN, 0, k, ({ - u32 snapshot = k.k->p.offset; - u32 equiv = bch2_snapshot_equiv(c, snapshot); - - equiv != snapshot - ? snapshot_list_add(c, &deleted_interior, snapshot) - : 0; - })); - - bch_err_msg(c, ret, "walking snapshots"); - if (ret) - goto err_create_lock; + darray_for_each(delete_leaves, i) { + ret = commit_do(trans, NULL, NULL, 0, + bch2_snapshot_node_delete(trans, *i)); + if (!bch2_err_matches(ret, EROFS)) + bch_err_msg(c, ret, "deleting snapshot %u", *i); + if (ret) + goto err; + } /* * Fixing children of deleted snapshots can't be done completely @@ -1680,32 +1626,24 @@ int bch2_delete_dead_snapshots(struct bch_fs *c) ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots, POS_MIN, BTREE_ITER_intent, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, - bch2_fix_child_of_deleted_snapshot(trans, &iter, k, &deleted_interior)); + bch2_fix_child_of_deleted_snapshot(trans, &iter, k, &delete_interior)); if (ret) - goto err_create_lock; - - darray_for_each(deleted, i) { - ret = commit_do(trans, NULL, NULL, 0, - bch2_snapshot_node_delete(trans, *i)); - bch_err_msg(c, ret, "deleting snapshot %u", *i); - if (ret) - goto err_create_lock; - } + goto err; - darray_for_each(deleted_interior, i) { + darray_for_each(delete_interior, i) { ret = commit_do(trans, NULL, NULL, 0, - bch2_snapshot_node_delete(trans, *i)); - bch_err_msg(c, ret, "deleting snapshot %u", *i); + bch2_snapshot_node_delete(trans, i->id)); + if (!bch2_err_matches(ret, EROFS)) + bch_err_msg(c, ret, "deleting snapshot %u", i->id); if (ret) - goto err_create_lock; + goto err; } -err_create_lock: - up_write(&c->snapshot_create_lock); err: - darray_exit(&deleted_interior); - darray_exit(&deleted); + darray_exit(&delete_interior); + darray_exit(&delete_leaves); bch2_trans_put(trans); - bch_err_fn(c, ret); + if (!bch2_err_matches(ret, EROFS)) + bch_err_fn(c, ret); return ret; } @@ -1721,8 +1659,12 @@ void bch2_delete_dead_snapshots_work(struct work_struct *work) void bch2_delete_dead_snapshots_async(struct bch_fs *c) { - if (bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots) && - !queue_work(c->write_ref_wq, &c->snapshot_delete_work)) + if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots)) + return; + + BUG_ON(!test_bit(BCH_FS_may_go_rw, &c->flags)); + + if (!queue_work(c->write_ref_wq, &c->snapshot_delete_work)) bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots); } @@ -1735,18 +1677,10 @@ int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans, struct bkey_s_c k; int ret; - bch2_trans_iter_init(trans, &iter, id, pos, - BTREE_ITER_not_extents| - BTREE_ITER_all_snapshots); - while (1) { - k = bch2_btree_iter_prev(&iter); - ret = bkey_err(k); - if (ret) - break; - - if (!k.k) - break; - + for_each_btree_key_reverse_norestart(trans, iter, id, bpos_predecessor(pos), + BTREE_ITER_not_extents| + BTREE_ITER_all_snapshots, + k, ret) { if (!bkey_eq(pos, k.k->p)) break; @@ -1760,37 +1694,36 @@ int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans, return ret; } -static int bch2_check_snapshot_needs_deletion(struct btree_trans *trans, struct bkey_s_c k) +static bool interior_snapshot_needs_delete(struct bkey_s_c_snapshot snap) { - struct bch_fs *c = trans->c; - struct bkey_s_c_snapshot snap; - int ret = 0; + /* If there's one child, it's redundant and keys will be moved to the child */ + return !!snap.v->children[0] + !!snap.v->children[1] == 1; +} +static int bch2_check_snapshot_needs_deletion(struct btree_trans *trans, struct bkey_s_c k) +{ if (k.k->type != KEY_TYPE_snapshot) return 0; - snap = bkey_s_c_to_snapshot(k); + struct bkey_s_c_snapshot snap = bkey_s_c_to_snapshot(k); if (BCH_SNAPSHOT_DELETED(snap.v) || - bch2_snapshot_equiv(c, k.k->p.offset) != k.k->p.offset || - (ret = bch2_snapshot_needs_delete(trans, k)) > 0) { - set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags); - return 0; - } + interior_snapshot_needs_delete(snap)) + set_bit(BCH_FS_need_delete_dead_snapshots, &trans->c->flags); - return ret; + return 0; } int bch2_snapshots_read(struct bch_fs *c) { + /* + * Initializing the is_ancestor bitmaps requires ancestors to already be + * initialized - so mark in reverse: + */ int ret = bch2_trans_run(c, - for_each_btree_key(trans, iter, BTREE_ID_snapshots, - POS_MIN, 0, k, + for_each_btree_key_reverse(trans, iter, BTREE_ID_snapshots, + POS_MAX, 0, k, __bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?: - bch2_snapshot_set_equiv(trans, k) ?: - bch2_check_snapshot_needs_deletion(trans, k)) ?: - for_each_btree_key(trans, iter, BTREE_ID_snapshots, - POS_MIN, 0, k, - (set_is_ancestor_bitmap(c, k.k->p.offset), 0))); + bch2_check_snapshot_needs_deletion(trans, k))); bch_err_fn(c, ret); /* diff --git a/fs/bcachefs/snapshot.h b/fs/bcachefs/snapshot.h index 29c94716293e..00373cf32e7b 100644 --- a/fs/bcachefs/snapshot.h +++ b/fs/bcachefs/snapshot.h @@ -2,11 +2,9 @@ #ifndef _BCACHEFS_SNAPSHOT_H #define _BCACHEFS_SNAPSHOT_H -enum bch_validate_flags; - void bch2_snapshot_tree_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); int bch2_snapshot_tree_validate(struct bch_fs *, struct bkey_s_c, - enum bch_validate_flags); + struct bkey_validate_context); #define bch2_bkey_ops_snapshot_tree ((struct bkey_ops) { \ .key_validate = bch2_snapshot_tree_validate, \ @@ -19,7 +17,8 @@ struct bkey_i_snapshot_tree *__bch2_snapshot_tree_create(struct btree_trans *); int bch2_snapshot_tree_lookup(struct btree_trans *, u32, struct bch_snapshot_tree *); void bch2_snapshot_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); -int bch2_snapshot_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_snapshot_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); int bch2_mark_snapshot(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_s, enum btree_iter_update_trigger_flags); @@ -120,19 +119,19 @@ static inline u32 bch2_snapshot_root(struct bch_fs *c, u32 id) return id; } -static inline u32 __bch2_snapshot_equiv(struct bch_fs *c, u32 id) +static inline bool __bch2_snapshot_exists(struct bch_fs *c, u32 id) { const struct snapshot_t *s = snapshot_t(c, id); - return s ? s->equiv : 0; + return s ? s->live : 0; } -static inline u32 bch2_snapshot_equiv(struct bch_fs *c, u32 id) +static inline bool bch2_snapshot_exists(struct bch_fs *c, u32 id) { rcu_read_lock(); - id = __bch2_snapshot_equiv(c, id); + bool ret = __bch2_snapshot_exists(c, id); rcu_read_unlock(); - return id; + return ret; } static inline int bch2_snapshot_is_internal_node(struct bch_fs *c, u32 id) diff --git a/fs/bcachefs/str_hash.c b/fs/bcachefs/str_hash.c new file mode 100644 index 000000000000..d78451c2a0c6 --- /dev/null +++ b/fs/bcachefs/str_hash.c @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "bcachefs.h" +#include "btree_cache.h" +#include "btree_update.h" +#include "dirent.h" +#include "fsck.h" +#include "str_hash.h" +#include "subvolume.h" + +static int bch2_dirent_has_target(struct btree_trans *trans, struct bkey_s_c_dirent d) +{ + if (d.v->d_type == DT_SUBVOL) { + struct bch_subvolume subvol; + int ret = bch2_subvolume_get(trans, le32_to_cpu(d.v->d_child_subvol), + false, &subvol); + if (ret && !bch2_err_matches(ret, ENOENT)) + return ret; + return !ret; + } else { + struct btree_iter iter; + struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, + SPOS(0, le64_to_cpu(d.v->d_inum), d.k->p.snapshot), 0); + int ret = bkey_err(k); + if (ret) + return ret; + + ret = bkey_is_inode(k.k); + bch2_trans_iter_exit(trans, &iter); + return ret; + } +} + +static noinline int fsck_rename_dirent(struct btree_trans *trans, + struct snapshots_seen *s, + const struct bch_hash_desc desc, + struct bch_hash_info *hash_info, + struct bkey_s_c_dirent old) +{ + struct qstr old_name = bch2_dirent_get_name(old); + struct bkey_i_dirent *new = bch2_trans_kmalloc(trans, bkey_bytes(old.k) + 32); + int ret = PTR_ERR_OR_ZERO(new); + if (ret) + return ret; + + bkey_dirent_init(&new->k_i); + dirent_copy_target(new, old); + new->k.p = old.k->p; + + for (unsigned i = 0; i < 1000; i++) { + unsigned len = sprintf(new->v.d_name, "%.*s.fsck_renamed-%u", + old_name.len, old_name.name, i); + unsigned u64s = BKEY_U64s + dirent_val_u64s(len); + + if (u64s > U8_MAX) + return -EINVAL; + + new->k.u64s = u64s; + + ret = bch2_hash_set_in_snapshot(trans, bch2_dirent_hash_desc, hash_info, + (subvol_inum) { 0, old.k->p.inode }, + old.k->p.snapshot, &new->k_i, + BTREE_UPDATE_internal_snapshot_node); + if (!bch2_err_matches(ret, EEXIST)) + break; + } + + if (ret) + return ret; + + return bch2_fsck_update_backpointers(trans, s, desc, hash_info, &new->k_i); +} + +static noinline int hash_pick_winner(struct btree_trans *trans, + const struct bch_hash_desc desc, + struct bch_hash_info *hash_info, + struct bkey_s_c k1, + struct bkey_s_c k2) +{ + if (bkey_val_bytes(k1.k) == bkey_val_bytes(k2.k) && + !memcmp(k1.v, k2.v, bkey_val_bytes(k1.k))) + return 0; + + switch (desc.btree_id) { + case BTREE_ID_dirents: { + int ret = bch2_dirent_has_target(trans, bkey_s_c_to_dirent(k1)); + if (ret < 0) + return ret; + if (!ret) + return 0; + + ret = bch2_dirent_has_target(trans, bkey_s_c_to_dirent(k2)); + if (ret < 0) + return ret; + if (!ret) + return 1; + return 2; + } + default: + return 0; + } +} + +static int repair_inode_hash_info(struct btree_trans *trans, + struct bch_inode_unpacked *snapshot_root) +{ + struct btree_iter iter; + struct bkey_s_c k; + int ret = 0; + + for_each_btree_key_reverse_norestart(trans, iter, BTREE_ID_inodes, + SPOS(0, snapshot_root->bi_inum, snapshot_root->bi_snapshot - 1), + BTREE_ITER_all_snapshots, k, ret) { + if (k.k->p.offset != snapshot_root->bi_inum) + break; + if (!bkey_is_inode(k.k)) + continue; + + struct bch_inode_unpacked inode; + ret = bch2_inode_unpack(k, &inode); + if (ret) + break; + + if (fsck_err_on(inode.bi_hash_seed != snapshot_root->bi_hash_seed || + INODE_STR_HASH(&inode) != INODE_STR_HASH(snapshot_root), + trans, inode_snapshot_mismatch, + "inode hash info in different snapshots don't match")) { + inode.bi_hash_seed = snapshot_root->bi_hash_seed; + SET_INODE_STR_HASH(&inode, INODE_STR_HASH(snapshot_root)); + ret = __bch2_fsck_write_inode(trans, &inode) ?: + bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?: + -BCH_ERR_transaction_restart_nested; + break; + } + } +fsck_err: + bch2_trans_iter_exit(trans, &iter); + return ret; +} + +/* + * All versions of the same inode in different snapshots must have the same hash + * seed/type: verify that the hash info we're using matches the root + */ +static noinline int check_inode_hash_info_matches_root(struct btree_trans *trans, u64 inum, + struct bch_hash_info *hash_info) +{ + struct bch_fs *c = trans->c; + struct btree_iter iter; + struct bkey_s_c k; + int ret = 0; + + for_each_btree_key_reverse_norestart(trans, iter, BTREE_ID_inodes, SPOS(0, inum, U32_MAX), + BTREE_ITER_all_snapshots, k, ret) { + if (k.k->p.offset != inum) + break; + if (bkey_is_inode(k.k)) + goto found; + } + bch_err(c, "%s(): inum %llu not found", __func__, inum); + ret = -BCH_ERR_fsck_repair_unimplemented; + goto err; +found:; + struct bch_inode_unpacked inode; + ret = bch2_inode_unpack(k, &inode); + if (ret) + goto err; + + struct bch_hash_info hash2 = bch2_hash_info_init(c, &inode); + if (hash_info->type != hash2.type || + memcmp(&hash_info->siphash_key, &hash2.siphash_key, sizeof(hash2.siphash_key))) { + ret = repair_inode_hash_info(trans, &inode); + if (!ret) { + bch_err(c, "inode hash info mismatch with root, but mismatch not found\n" + "%u %llx %llx\n" + "%u %llx %llx", + hash_info->type, + hash_info->siphash_key.k0, + hash_info->siphash_key.k1, + hash2.type, + hash2.siphash_key.k0, + hash2.siphash_key.k1); + ret = -BCH_ERR_fsck_repair_unimplemented; + } + } +err: + bch2_trans_iter_exit(trans, &iter); + return ret; +} + +int __bch2_str_hash_check_key(struct btree_trans *trans, + struct snapshots_seen *s, + const struct bch_hash_desc *desc, + struct bch_hash_info *hash_info, + struct btree_iter *k_iter, struct bkey_s_c hash_k) +{ + struct bch_fs *c = trans->c; + struct btree_iter iter = { NULL }; + struct printbuf buf = PRINTBUF; + struct bkey_s_c k; + int ret = 0; + + u64 hash = desc->hash_bkey(hash_info, hash_k); + if (hash_k.k->p.offset < hash) + goto bad_hash; + + for_each_btree_key_norestart(trans, iter, desc->btree_id, + SPOS(hash_k.k->p.inode, hash, hash_k.k->p.snapshot), + BTREE_ITER_slots, k, ret) { + if (bkey_eq(k.k->p, hash_k.k->p)) + break; + + if (k.k->type == desc->key_type && + !desc->cmp_bkey(k, hash_k)) + goto duplicate_entries; + + if (bkey_deleted(k.k)) { + bch2_trans_iter_exit(trans, &iter); + goto bad_hash; + } + } +out: + bch2_trans_iter_exit(trans, &iter); + printbuf_exit(&buf); + return ret; +bad_hash: + /* + * Before doing any repair, check hash_info itself: + */ + ret = check_inode_hash_info_matches_root(trans, hash_k.k->p.inode, hash_info); + if (ret) + goto out; + + if (fsck_err(trans, hash_table_key_wrong_offset, + "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n %s", + bch2_btree_id_str(desc->btree_id), hash_k.k->p.inode, hash_k.k->p.offset, hash, + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf))) { + struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, hash_k); + if (IS_ERR(new)) + return PTR_ERR(new); + + k = bch2_hash_set_or_get_in_snapshot(trans, &iter, *desc, hash_info, + (subvol_inum) { 0, hash_k.k->p.inode }, + hash_k.k->p.snapshot, new, + STR_HASH_must_create| + BTREE_ITER_with_updates| + BTREE_UPDATE_internal_snapshot_node); + ret = bkey_err(k); + if (ret) + goto out; + if (k.k) + goto duplicate_entries; + + ret = bch2_hash_delete_at(trans, *desc, hash_info, k_iter, + BTREE_UPDATE_internal_snapshot_node) ?: + bch2_fsck_update_backpointers(trans, s, *desc, hash_info, new) ?: + bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?: + -BCH_ERR_transaction_restart_nested; + goto out; + } +fsck_err: + goto out; +duplicate_entries: + ret = hash_pick_winner(trans, *desc, hash_info, hash_k, k); + if (ret < 0) + goto out; + + if (!fsck_err(trans, hash_table_key_duplicate, + "duplicate hash table keys%s:\n%s", + ret != 2 ? "" : ", both point to valid inodes", + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, hash_k), + prt_newline(&buf), + bch2_bkey_val_to_text(&buf, c, k), + buf.buf))) + goto out; + + switch (ret) { + case 0: + ret = bch2_hash_delete_at(trans, *desc, hash_info, k_iter, 0); + break; + case 1: + ret = bch2_hash_delete_at(trans, *desc, hash_info, &iter, 0); + break; + case 2: + ret = fsck_rename_dirent(trans, s, *desc, hash_info, bkey_s_c_to_dirent(hash_k)) ?: + bch2_hash_delete_at(trans, *desc, hash_info, k_iter, 0); + goto out; + } + + ret = bch2_trans_commit(trans, NULL, NULL, 0) ?: + -BCH_ERR_transaction_restart_nested; + goto out; +} diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h index ec2b1feea520..55a4ac7bf220 100644 --- a/fs/bcachefs/str_hash.h +++ b/fs/bcachefs/str_hash.h @@ -160,7 +160,7 @@ bch2_hash_lookup_in_snapshot(struct btree_trans *trans, struct bkey_s_c k; int ret; - for_each_btree_key_upto_norestart(trans, *iter, desc.btree_id, + for_each_btree_key_max_norestart(trans, *iter, desc.btree_id, SPOS(inum.inum, desc.hash_key(info, key), snapshot), POS(inum.inum, U64_MAX), BTREE_ITER_slots|flags, k, ret) { @@ -210,7 +210,7 @@ bch2_hash_hole(struct btree_trans *trans, if (ret) return ret; - for_each_btree_key_upto_norestart(trans, *iter, desc.btree_id, + for_each_btree_key_max_norestart(trans, *iter, desc.btree_id, SPOS(inum.inum, desc.hash_key(info, key), snapshot), POS(inum.inum, U64_MAX), BTREE_ITER_slots|BTREE_ITER_intent, k, ret) @@ -265,7 +265,7 @@ struct bkey_s_c bch2_hash_set_or_get_in_snapshot(struct btree_trans *trans, bool found = false; int ret; - for_each_btree_key_upto_norestart(trans, *iter, desc.btree_id, + for_each_btree_key_max_norestart(trans, *iter, desc.btree_id, SPOS(insert->k.p.inode, desc.hash_bkey(info, bkey_i_to_s_c(insert)), snapshot), @@ -393,4 +393,26 @@ int bch2_hash_delete(struct btree_trans *trans, return ret; } +struct snapshots_seen; +int __bch2_str_hash_check_key(struct btree_trans *, + struct snapshots_seen *, + const struct bch_hash_desc *, + struct bch_hash_info *, + struct btree_iter *, struct bkey_s_c); + +static inline int bch2_str_hash_check_key(struct btree_trans *trans, + struct snapshots_seen *s, + const struct bch_hash_desc *desc, + struct bch_hash_info *hash_info, + struct btree_iter *k_iter, struct bkey_s_c hash_k) +{ + if (hash_k.k->type != desc->key_type) + return 0; + + if (likely(desc->hash_bkey(hash_info, hash_k) == hash_k.k->p.offset)) + return 0; + + return __bch2_str_hash_check_key(trans, s, desc, hash_info, k_iter, hash_k); +} + #endif /* _BCACHEFS_STR_HASH_H */ diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c index 80e5efaff524..b7b96283c316 100644 --- a/fs/bcachefs/subvolume.c +++ b/fs/bcachefs/subvolume.c @@ -207,7 +207,7 @@ int bch2_check_subvol_children(struct bch_fs *c) /* Subvolumes: */ int bch2_subvolume_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_subvolume subvol = bkey_s_c_to_subvolume(k); int ret = 0; @@ -286,11 +286,11 @@ int bch2_subvol_has_children(struct btree_trans *trans, u32 subvol) static __always_inline int bch2_subvolume_get_inlined(struct btree_trans *trans, unsigned subvol, bool inconsistent_if_not_found, - int iter_flags, struct bch_subvolume *s) { int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_subvolumes, POS(0, subvol), - iter_flags, subvolume, s); + BTREE_ITER_cached| + BTREE_ITER_with_updates, subvolume, s); bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT) && inconsistent_if_not_found, trans->c, "missing subvolume %u", subvol); @@ -299,16 +299,15 @@ bch2_subvolume_get_inlined(struct btree_trans *trans, unsigned subvol, int bch2_subvolume_get(struct btree_trans *trans, unsigned subvol, bool inconsistent_if_not_found, - int iter_flags, struct bch_subvolume *s) { - return bch2_subvolume_get_inlined(trans, subvol, inconsistent_if_not_found, iter_flags, s); + return bch2_subvolume_get_inlined(trans, subvol, inconsistent_if_not_found, s); } int bch2_subvol_is_ro_trans(struct btree_trans *trans, u32 subvol) { struct bch_subvolume s; - int ret = bch2_subvolume_get_inlined(trans, subvol, true, 0, &s); + int ret = bch2_subvolume_get_inlined(trans, subvol, true, &s); if (ret) return ret; @@ -328,7 +327,7 @@ int bch2_snapshot_get_subvol(struct btree_trans *trans, u32 snapshot, struct bch_snapshot snap; return bch2_snapshot_lookup(trans, snapshot, &snap) ?: - bch2_subvolume_get(trans, le32_to_cpu(snap.subvol), true, 0, subvol); + bch2_subvolume_get(trans, le32_to_cpu(snap.subvol), true, subvol); } int __bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvolid, @@ -396,8 +395,7 @@ static int bch2_subvolumes_reparent(struct btree_trans *trans, u32 subvolid_to_d struct bch_subvolume s; return lockrestart_do(trans, - bch2_subvolume_get(trans, subvolid_to_delete, true, - BTREE_ITER_cached, &s)) ?: + bch2_subvolume_get(trans, subvolid_to_delete, true, &s)) ?: for_each_btree_key_commit(trans, iter, BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_prefetch, k, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, @@ -411,26 +409,61 @@ static int bch2_subvolumes_reparent(struct btree_trans *trans, u32 subvolid_to_d */ static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid) { - struct btree_iter iter; - struct bkey_s_c_subvolume subvol; - u32 snapid; - int ret = 0; + struct btree_iter subvol_iter = {}, snapshot_iter = {}, snapshot_tree_iter = {}; - subvol = bch2_bkey_get_iter_typed(trans, &iter, + struct bkey_s_c_subvolume subvol = + bch2_bkey_get_iter_typed(trans, &subvol_iter, BTREE_ID_subvolumes, POS(0, subvolid), BTREE_ITER_cached|BTREE_ITER_intent, subvolume); - ret = bkey_err(subvol); + int ret = bkey_err(subvol); bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c, "missing subvolume %u", subvolid); if (ret) - return ret; + goto err; - snapid = le32_to_cpu(subvol.v->snapshot); + u32 snapid = le32_to_cpu(subvol.v->snapshot); + + struct bkey_s_c_snapshot snapshot = + bch2_bkey_get_iter_typed(trans, &snapshot_iter, + BTREE_ID_snapshots, POS(0, snapid), + 0, snapshot); + ret = bkey_err(snapshot); + bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c, + "missing snapshot %u", snapid); + if (ret) + goto err; - ret = bch2_btree_delete_at(trans, &iter, 0) ?: + u32 treeid = le32_to_cpu(snapshot.v->tree); + + struct bkey_s_c_snapshot_tree snapshot_tree = + bch2_bkey_get_iter_typed(trans, &snapshot_tree_iter, + BTREE_ID_snapshot_trees, POS(0, treeid), + 0, snapshot_tree); + ret = bkey_err(snapshot_tree); + bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c, + "missing snapshot tree %u", treeid); + if (ret) + goto err; + + if (le32_to_cpu(snapshot_tree.v->master_subvol) == subvolid) { + struct bkey_i_snapshot_tree *snapshot_tree_mut = + bch2_bkey_make_mut_typed(trans, &snapshot_tree_iter, + &snapshot_tree.s_c, + 0, snapshot_tree); + ret = PTR_ERR_OR_ZERO(snapshot_tree_mut); + if (ret) + goto err; + + snapshot_tree_mut->v.master_subvol = 0; + } + + ret = bch2_btree_delete_at(trans, &subvol_iter, 0) ?: bch2_snapshot_node_set_deleted(trans, snapid); - bch2_trans_iter_exit(trans, &iter); +err: + bch2_trans_iter_exit(trans, &snapshot_tree_iter); + bch2_trans_iter_exit(trans, &snapshot_iter); + bch2_trans_iter_exit(trans, &subvol_iter); return ret; } @@ -675,7 +708,7 @@ err: /* set bi_subvol on root inode */ int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c) { - int ret = bch2_trans_commit_do(c, NULL, NULL, BCH_TRANS_COMMIT_lazy_rw, + int ret = bch2_trans_commit_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc, __bch2_fs_upgrade_for_subvolumes(trans)); bch_err_fn(c, ret); return ret; diff --git a/fs/bcachefs/subvolume.h b/fs/bcachefs/subvolume.h index f897d106e142..910f6196700e 100644 --- a/fs/bcachefs/subvolume.h +++ b/fs/bcachefs/subvolume.h @@ -5,12 +5,11 @@ #include "darray.h" #include "subvolume_types.h" -enum bch_validate_flags; - int bch2_check_subvols(struct bch_fs *); int bch2_check_subvol_children(struct bch_fs *); -int bch2_subvolume_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_subvolume_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); void bch2_subvolume_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); int bch2_subvolume_trigger(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_s, @@ -25,7 +24,7 @@ int bch2_subvolume_trigger(struct btree_trans *, enum btree_id, unsigned, int bch2_subvol_has_children(struct btree_trans *, u32); int bch2_subvolume_get(struct btree_trans *, unsigned, - bool, int, struct bch_subvolume *); + bool, struct bch_subvolume *); int __bch2_subvolume_get_snapshot(struct btree_trans *, u32, u32 *, bool); int bch2_subvolume_get_snapshot(struct btree_trans *, u32, u32 *); @@ -34,7 +33,7 @@ int bch2_subvol_is_ro_trans(struct btree_trans *, u32); int bch2_subvol_is_ro(struct bch_fs *, u32); static inline struct bkey_s_c -bch2_btree_iter_peek_in_subvolume_upto_type(struct btree_iter *iter, struct bpos end, +bch2_btree_iter_peek_in_subvolume_max_type(struct btree_iter *iter, struct bpos end, u32 subvolid, unsigned flags) { u32 snapshot; @@ -43,10 +42,10 @@ bch2_btree_iter_peek_in_subvolume_upto_type(struct btree_iter *iter, struct bpos return bkey_s_c_err(ret); bch2_btree_iter_set_snapshot(iter, snapshot); - return bch2_btree_iter_peek_upto_type(iter, end, flags); + return bch2_btree_iter_peek_max_type(iter, end, flags); } -#define for_each_btree_key_in_subvolume_upto_continue(_trans, _iter, \ +#define for_each_btree_key_in_subvolume_max_continue(_trans, _iter, \ _end, _subvolid, _flags, _k, _do) \ ({ \ struct bkey_s_c _k; \ @@ -54,7 +53,7 @@ bch2_btree_iter_peek_in_subvolume_upto_type(struct btree_iter *iter, struct bpos \ do { \ _ret3 = lockrestart_do(_trans, ({ \ - (_k) = bch2_btree_iter_peek_in_subvolume_upto_type(&(_iter), \ + (_k) = bch2_btree_iter_peek_in_subvolume_max_type(&(_iter), \ _end, _subvolid, (_flags)); \ if (!(_k).k) \ break; \ @@ -67,14 +66,14 @@ bch2_btree_iter_peek_in_subvolume_upto_type(struct btree_iter *iter, struct bpos _ret3; \ }) -#define for_each_btree_key_in_subvolume_upto(_trans, _iter, _btree_id, \ +#define for_each_btree_key_in_subvolume_max(_trans, _iter, _btree_id, \ _start, _end, _subvolid, _flags, _k, _do) \ ({ \ struct btree_iter _iter; \ bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ (_start), (_flags)); \ \ - for_each_btree_key_in_subvolume_upto_continue(_trans, _iter, \ + for_each_btree_key_in_subvolume_max_continue(_trans, _iter, \ _end, _subvolid, _flags, _k, _do); \ }) diff --git a/fs/bcachefs/subvolume_types.h b/fs/bcachefs/subvolume_types.h index f2ec4277c2a5..1549d6daf7af 100644 --- a/fs/bcachefs/subvolume_types.h +++ b/fs/bcachefs/subvolume_types.h @@ -9,13 +9,13 @@ typedef DARRAY(u32) snapshot_id_list; #define IS_ANCESTOR_BITMAP 128 struct snapshot_t { + bool live; u32 parent; u32 skip[3]; u32 depth; u32 children[2]; u32 subvol; /* Nonzero only if a subvolume points to this node: */ u32 tree; - u32 equiv; unsigned long is_ancestor[BITS_TO_LONGS(IS_ANCESTOR_BITMAP)]; }; diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c index 7c71594f6a8b..8037ccbacf6a 100644 --- a/fs/bcachefs/super-io.c +++ b/fs/bcachefs/super-io.c @@ -23,6 +23,7 @@ #include <linux/backing-dev.h> #include <linux/sort.h> +#include <linux/string_choices.h> static const struct blk_holder_ops bch2_sb_handle_bdev_ops = { }; @@ -41,7 +42,7 @@ static const struct bch2_metadata_version bch2_metadata_versions[] = { #undef x }; -void bch2_version_to_text(struct printbuf *out, unsigned v) +void bch2_version_to_text(struct printbuf *out, enum bcachefs_metadata_version v) { const char *str = "(unknown version)"; @@ -54,7 +55,7 @@ void bch2_version_to_text(struct printbuf *out, unsigned v) prt_printf(out, "%u.%u: %s", BCH_VERSION_MAJOR(v), BCH_VERSION_MINOR(v), str); } -unsigned bch2_latest_compatible_version(unsigned v) +enum bcachefs_metadata_version bch2_latest_compatible_version(enum bcachefs_metadata_version v) { if (!BCH_VERSION_MAJOR(v)) return v; @@ -68,6 +69,16 @@ unsigned bch2_latest_compatible_version(unsigned v) return v; } +void bch2_set_version_incompat(struct bch_fs *c, enum bcachefs_metadata_version version) +{ + mutex_lock(&c->sb_lock); + SET_BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb, + max(BCH_SB_VERSION_INCOMPAT(c->disk_sb.sb), version)); + c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_FEATURE_incompat_version_field); + bch2_write_super(c); + mutex_unlock(&c->sb_lock); +} + const char * const bch2_sb_fields[] = { #define x(name, nr) #name, BCH_SB_FIELDS() @@ -368,6 +379,12 @@ static int bch2_sb_validate(struct bch_sb_handle *disk_sb, return -BCH_ERR_invalid_sb_features; } + if (BCH_VERSION_MAJOR(le16_to_cpu(sb->version)) > BCH_VERSION_MAJOR(bcachefs_metadata_version_current) || + BCH_SB_VERSION_INCOMPAT(sb) > bcachefs_metadata_version_current) { + prt_printf(out, "Filesystem has incompatible version"); + return -BCH_ERR_invalid_sb_features; + } + block_size = le16_to_cpu(sb->block_size); if (block_size > PAGE_SECTORS) { @@ -406,6 +423,21 @@ static int bch2_sb_validate(struct bch_sb_handle *disk_sb, return -BCH_ERR_invalid_sb_time_precision; } + /* old versions didn't know to downgrade this field */ + if (BCH_SB_VERSION_INCOMPAT_ALLOWED(sb) > le16_to_cpu(sb->version)) + SET_BCH_SB_VERSION_INCOMPAT_ALLOWED(sb, le16_to_cpu(sb->version)); + + if (BCH_SB_VERSION_INCOMPAT(sb) > BCH_SB_VERSION_INCOMPAT_ALLOWED(sb)) { + prt_printf(out, "Invalid version_incompat "); + bch2_version_to_text(out, BCH_SB_VERSION_INCOMPAT(sb)); + prt_str(out, " > incompat_allowed "); + bch2_version_to_text(out, BCH_SB_VERSION_INCOMPAT_ALLOWED(sb)); + if (flags & BCH_VALIDATE_write) + return -BCH_ERR_invalid_sb_version; + else + SET_BCH_SB_VERSION_INCOMPAT_ALLOWED(sb, BCH_SB_VERSION_INCOMPAT(sb)); + } + if (!flags) { /* * Been seeing a bug where these are getting inexplicably @@ -428,6 +460,11 @@ static int bch2_sb_validate(struct bch_sb_handle *disk_sb, SET_BCH_SB_PROMOTE_WHOLE_EXTENTS(sb, true); } +#ifdef __KERNEL__ + if (!BCH_SB_SHARD_INUMS_NBITS(sb)) + SET_BCH_SB_SHARD_INUMS_NBITS(sb, ilog2(roundup_pow_of_two(num_online_cpus()))); +#endif + for (opt_id = 0; opt_id < bch2_opts_nr; opt_id++) { const struct bch_option *opt = bch2_opt_table + opt_id; @@ -519,6 +556,9 @@ static void bch2_sb_update(struct bch_fs *c) c->sb.uuid = src->uuid; c->sb.user_uuid = src->user_uuid; c->sb.version = le16_to_cpu(src->version); + c->sb.version_incompat = BCH_SB_VERSION_INCOMPAT(src); + c->sb.version_incompat_allowed + = BCH_SB_VERSION_INCOMPAT_ALLOWED(src); c->sb.version_min = le16_to_cpu(src->version_min); c->sb.version_upgrade_complete = BCH_SB_VERSION_UPGRADE_COMPLETE(src); c->sb.nr_devices = src->nr_devices; @@ -676,7 +716,8 @@ reread: } enum bch_csum_type csum_type = BCH_SB_CSUM_TYPE(sb->sb); - if (csum_type >= BCH_CSUM_NR) { + if (csum_type >= BCH_CSUM_NR || + bch2_csum_type_is_encryption(csum_type)) { prt_printf(err, "unknown checksum type %llu", BCH_SB_CSUM_TYPE(sb->sb)); return -BCH_ERR_invalid_sb_csum_type; } @@ -878,7 +919,7 @@ static void write_super_endio(struct bio *bio) ? BCH_MEMBER_ERROR_write : BCH_MEMBER_ERROR_read, "superblock %s error: %s", - bio_data_dir(bio) ? "write" : "read", + str_write_read(bio_data_dir(bio)), bch2_blk_status_to_str(bio->bi_status))) ca->sb_write_error = 1; @@ -891,14 +932,15 @@ static void read_back_super(struct bch_fs *c, struct bch_dev *ca) struct bch_sb *sb = ca->disk_sb.sb; struct bio *bio = ca->disk_sb.bio; + memset(ca->sb_read_scratch, 0, BCH_SB_READ_SCRATCH_BUF_SIZE); + bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ|REQ_SYNC|REQ_META); bio->bi_iter.bi_sector = le64_to_cpu(sb->layout.sb_offset[0]); bio->bi_end_io = write_super_endio; bio->bi_private = ca; - bch2_bio_map(bio, ca->sb_read_scratch, PAGE_SIZE); + bch2_bio_map(bio, ca->sb_read_scratch, BCH_SB_READ_SCRATCH_BUF_SIZE); - this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_sb], - bio_sectors(bio)); + this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_sb], bio_sectors(bio)); percpu_ref_get(&ca->io_ref); closure_bio_submit(bio, &c->sb_write); @@ -1042,9 +1084,16 @@ int bch2_write_super(struct bch_fs *c) ": Superblock write was silently dropped! (seq %llu expected %llu)", le64_to_cpu(ca->sb_read_scratch->seq), ca->disk_sb.seq); - bch2_fs_fatal_error(c, "%s", buf.buf); + + if (c->opts.errors != BCH_ON_ERROR_continue && + c->opts.errors != BCH_ON_ERROR_fix_safe) { + ret = -BCH_ERR_erofs_sb_err; + bch2_fs_fatal_error(c, "%s", buf.buf); + } else { + bch_err(c, "%s", buf.buf); + } + printbuf_exit(&buf); - ret = -BCH_ERR_erofs_sb_err; } if (le64_to_cpu(ca->sb_read_scratch->seq) > ca->disk_sb.seq) { @@ -1149,6 +1198,8 @@ bool bch2_check_version_downgrade(struct bch_fs *c) */ if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) > bcachefs_metadata_version_current) SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current); + if (BCH_SB_VERSION_INCOMPAT_ALLOWED(c->disk_sb.sb) > bcachefs_metadata_version_current) + SET_BCH_SB_VERSION_INCOMPAT_ALLOWED(c->disk_sb.sb, bcachefs_metadata_version_current); if (c->sb.version > bcachefs_metadata_version_current) c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current); if (c->sb.version_min > bcachefs_metadata_version_current) @@ -1157,7 +1208,7 @@ bool bch2_check_version_downgrade(struct bch_fs *c) return ret; } -void bch2_sb_upgrade(struct bch_fs *c, unsigned new_version) +void bch2_sb_upgrade(struct bch_fs *c, unsigned new_version, bool incompat) { lockdep_assert_held(&c->sb_lock); @@ -1167,6 +1218,10 @@ void bch2_sb_upgrade(struct bch_fs *c, unsigned new_version) c->disk_sb.sb->version = cpu_to_le16(new_version); c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL); + + if (incompat) + SET_BCH_SB_VERSION_INCOMPAT_ALLOWED(c->disk_sb.sb, + max(BCH_SB_VERSION_INCOMPAT_ALLOWED(c->disk_sb.sb), new_version)); } static int bch2_sb_ext_validate(struct bch_sb *sb, struct bch_sb_field *f, @@ -1331,6 +1386,14 @@ void bch2_sb_to_text(struct printbuf *out, struct bch_sb *sb, bch2_version_to_text(out, le16_to_cpu(sb->version)); prt_newline(out); + prt_printf(out, "Incompatible features allowed:\t"); + bch2_version_to_text(out, BCH_SB_VERSION_INCOMPAT_ALLOWED(sb)); + prt_newline(out); + + prt_printf(out, "Incompatible features in use:\t"); + bch2_version_to_text(out, BCH_SB_VERSION_INCOMPAT(sb)); + prt_newline(out); + prt_printf(out, "Version upgrade complete:\t"); bch2_version_to_text(out, BCH_SB_VERSION_UPGRADE_COMPLETE(sb)); prt_newline(out); diff --git a/fs/bcachefs/super-io.h b/fs/bcachefs/super-io.h index fadd364e2802..f1ab4f943720 100644 --- a/fs/bcachefs/super-io.h +++ b/fs/bcachefs/super-io.h @@ -10,14 +10,29 @@ #include <asm/byteorder.h> +#define BCH_SB_READ_SCRATCH_BUF_SIZE 4096 + static inline bool bch2_version_compatible(u16 version) { return BCH_VERSION_MAJOR(version) <= BCH_VERSION_MAJOR(bcachefs_metadata_version_current) && version >= bcachefs_metadata_version_min; } -void bch2_version_to_text(struct printbuf *, unsigned); -unsigned bch2_latest_compatible_version(unsigned); +void bch2_version_to_text(struct printbuf *, enum bcachefs_metadata_version); +enum bcachefs_metadata_version bch2_latest_compatible_version(enum bcachefs_metadata_version); + +void bch2_set_version_incompat(struct bch_fs *, enum bcachefs_metadata_version); + +static inline bool bch2_request_incompat_feature(struct bch_fs *c, + enum bcachefs_metadata_version version) +{ + if (unlikely(version > c->sb.version_incompat)) { + if (version > c->sb.version_incompat_allowed) + return false; + bch2_set_version_incompat(c, version); + } + return true; +} static inline size_t bch2_sb_field_bytes(struct bch_sb_field *f) { @@ -92,7 +107,7 @@ static inline void bch2_check_set_feature(struct bch_fs *c, unsigned feat) } bool bch2_check_version_downgrade(struct bch_fs *); -void bch2_sb_upgrade(struct bch_fs *, unsigned); +void bch2_sb_upgrade(struct bch_fs *, unsigned, bool); void __bch2_sb_field_to_text(struct printbuf *, struct bch_sb *, struct bch_sb_field *); diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c index a6ed9a0bf1c7..6d97d412fed9 100644 --- a/fs/bcachefs/super.c +++ b/fs/bcachefs/super.c @@ -290,7 +290,7 @@ static void __bch2_fs_read_only(struct bch_fs *c) bch2_fs_journal_stop(&c->journal); - bch_info(c, "%sshutdown complete, journal seq %llu", + bch_info(c, "%sclean shutdown complete, journal seq %llu", test_bit(BCH_FS_clean_shutdown, &c->flags) ? "" : "un", c->journal.seq_ondisk); @@ -411,6 +411,17 @@ bool bch2_fs_emergency_read_only(struct bch_fs *c) return ret; } +bool bch2_fs_emergency_read_only_locked(struct bch_fs *c) +{ + bool ret = !test_and_set_bit(BCH_FS_emergency_ro, &c->flags); + + bch2_journal_halt_locked(&c->journal); + bch2_fs_read_only_async(c); + + wake_up(&bch2_read_only_wait); + return ret; +} + static int bch2_fs_read_write_late(struct bch_fs *c) { int ret; @@ -441,6 +452,8 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early) { int ret; + BUG_ON(!test_bit(BCH_FS_may_go_rw, &c->flags)); + if (test_bit(BCH_FS_initial_gc_unfixed, &c->flags)) { bch_err(c, "cannot go rw, unfixed btree errors"); return -BCH_ERR_erofs_unfixed_errors; @@ -561,6 +574,7 @@ static void __bch2_fs_free(struct bch_fs *c) bch2_io_clock_exit(&c->io_clock[WRITE]); bch2_io_clock_exit(&c->io_clock[READ]); bch2_fs_compress_exit(c); + bch2_fs_btree_gc_exit(c); bch2_journal_keys_put_initial(c); bch2_find_btree_nodes_exit(&c->found_btree_nodes); BUG_ON(atomic_read(&c->journal_keys.ref)); @@ -584,7 +598,6 @@ static void __bch2_fs_free(struct bch_fs *c) #endif kfree(rcu_dereference_protected(c->disk_groups, 1)); kfree(c->journal_seq_blacklist_table); - kfree(c->unused_inode_hints); if (c->write_ref_wq) destroy_workqueue(c->write_ref_wq); @@ -766,21 +779,17 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) refcount_set(&c->ro_ref, 1); init_waitqueue_head(&c->ro_ref_wait); + spin_lock_init(&c->recovery_pass_lock); sema_init(&c->online_fsck_mutex, 1); - init_rwsem(&c->gc_lock); - mutex_init(&c->gc_gens_lock); - atomic_set(&c->journal_keys.ref, 1); - c->journal_keys.initial_ref_held = true; - for (i = 0; i < BCH_TIME_STAT_NR; i++) bch2_time_stats_init(&c->times[i]); - bch2_fs_gc_init(c); bch2_fs_copygc_init(c); bch2_fs_btree_key_cache_init_early(&c->btree_key_cache); bch2_fs_btree_iter_init_early(c); bch2_fs_btree_interior_update_init_early(c); + bch2_fs_journal_keys_init(c); bch2_fs_allocator_background_init(c); bch2_fs_allocator_foreground_init(c); bch2_fs_rebalance_init(c); @@ -809,9 +818,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) INIT_LIST_HEAD(&c->vfs_inodes_list); mutex_init(&c->vfs_inodes_lock); - c->copy_gc_enabled = 1; - c->rebalance.enabled = 1; - c->journal.flush_write_time = &c->times[BCH_TIME_journal_flush_write]; c->journal.noflush_write_time = &c->times[BCH_TIME_journal_noflush_write]; c->journal.flush_seq_time = &c->times[BCH_TIME_journal_flush_seq]; @@ -873,8 +879,6 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) (btree_blocks(c) + 1) * 2 * sizeof(struct sort_iter_set); - c->inode_shard_bits = ilog2(roundup_pow_of_two(num_possible_cpus())); - if (!(c->btree_update_wq = alloc_workqueue("bcachefs", WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_UNBOUND, 512)) || !(c->btree_io_complete_wq = alloc_workqueue("bcachefs_btree_io", @@ -901,9 +905,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) !(c->online_reserved = alloc_percpu(u64)) || mempool_init_kvmalloc_pool(&c->btree_bounce_pool, 1, c->opts.btree_node_size) || - mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) || - !(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits, - sizeof(u64), GFP_KERNEL))) { + mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048)) { ret = -BCH_ERR_ENOMEM_fs_other_alloc; goto err; } @@ -917,6 +919,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) bch2_fs_btree_cache_init(c) ?: bch2_fs_btree_key_cache_init(&c->btree_key_cache) ?: bch2_fs_btree_interior_update_init(c) ?: + bch2_fs_btree_gc_init(c) ?: bch2_fs_buckets_waiting_for_journal_init(c) ?: bch2_fs_btree_write_buffer_init(c) ?: bch2_fs_subvolumes_init(c) ?: @@ -1033,9 +1036,12 @@ int bch2_fs_start(struct bch_fs *c) bch2_dev_allocator_add(c, ca); bch2_recalc_capacity(c); + c->recovery_task = current; ret = BCH_SB_INITIALIZED(c->disk_sb.sb) ? bch2_fs_recovery(c) : bch2_fs_initialize(c); + c->recovery_task = NULL; + if (ret) goto err; @@ -1120,12 +1126,12 @@ static int bch2_dev_in_fs(struct bch_sb_handle *fs, prt_bdevname(&buf, fs->bdev); prt_char(&buf, ' '); - bch2_prt_datetime(&buf, le64_to_cpu(fs->sb->write_time));; + bch2_prt_datetime(&buf, le64_to_cpu(fs->sb->write_time)); prt_newline(&buf); prt_bdevname(&buf, sb->bdev); prt_char(&buf, ' '); - bch2_prt_datetime(&buf, le64_to_cpu(sb->sb->write_time));; + bch2_prt_datetime(&buf, le64_to_cpu(sb->sb->write_time)); prt_newline(&buf); if (!opts->no_splitbrain_check) @@ -1198,7 +1204,7 @@ static void bch2_dev_free(struct bch_dev *ca) free_percpu(ca->io_done); bch2_dev_buckets_free(ca); - free_page((unsigned long) ca->sb_read_scratch); + kfree(ca->sb_read_scratch); bch2_time_stats_quantiles_exit(&ca->io_latency[WRITE]); bch2_time_stats_quantiles_exit(&ca->io_latency[READ]); @@ -1309,8 +1315,6 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c, init_completion(&ca->ref_completion); init_completion(&ca->io_ref_completion); - init_rwsem(&ca->bucket_lock); - INIT_WORK(&ca->io_error_work, bch2_io_error_work); bch2_time_stats_quantiles_init(&ca->io_latency[READ]); @@ -1337,7 +1341,7 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c, if (percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete, PERCPU_REF_INIT_DEAD, GFP_KERNEL) || - !(ca->sb_read_scratch = (void *) __get_free_page(GFP_KERNEL)) || + !(ca->sb_read_scratch = kmalloc(BCH_SB_READ_SCRATCH_BUF_SIZE, GFP_KERNEL)) || bch2_dev_buckets_alloc(c, ca) || !(ca->io_done = alloc_percpu(*ca->io_done))) goto err; @@ -1366,7 +1370,6 @@ static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx) { struct bch_member member = bch2_sb_member_get(c->disk_sb.sb, dev_idx); struct bch_dev *ca = NULL; - int ret = 0; if (bch2_fs_init_fault("dev_alloc")) goto err; @@ -1378,10 +1381,8 @@ static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx) ca->fs = c; bch2_dev_attach(c, ca, dev_idx); - return ret; + return 0; err: - if (ca) - bch2_dev_free(ca); return -BCH_ERR_ENOMEM_dev_alloc; } @@ -1751,11 +1752,6 @@ int bch2_dev_add(struct bch_fs *c, const char *path) if (ret) goto err; - ret = bch2_dev_journal_alloc(ca, true); - bch_err_msg(c, ret, "allocating journal"); - if (ret) - goto err; - down_write(&c->state_lock); mutex_lock(&c->sb_lock); @@ -1806,11 +1802,14 @@ int bch2_dev_add(struct bch_fs *c, const char *path) if (ret) goto err_late; - ca->new_fs_bucket_idx = 0; - if (ca->mi.state == BCH_MEMBER_STATE_rw) __bch2_dev_read_write(c, ca); + ret = bch2_dev_journal_alloc(ca, false); + bch_err_msg(c, ret, "allocating journal"); + if (ret) + goto err_late; + up_write(&c->state_lock); return 0; diff --git a/fs/bcachefs/super.h b/fs/bcachefs/super.h index dada09331d2e..04f8287eff5c 100644 --- a/fs/bcachefs/super.h +++ b/fs/bcachefs/super.h @@ -29,21 +29,12 @@ int bch2_dev_resize(struct bch_fs *, struct bch_dev *, u64); struct bch_dev *bch2_dev_lookup(struct bch_fs *, const char *); bool bch2_fs_emergency_read_only(struct bch_fs *); +bool bch2_fs_emergency_read_only_locked(struct bch_fs *); void bch2_fs_read_only(struct bch_fs *); int bch2_fs_read_write(struct bch_fs *); int bch2_fs_read_write_early(struct bch_fs *); -/* - * Only for use in the recovery/fsck path: - */ -static inline void bch2_fs_lazy_rw(struct bch_fs *c) -{ - if (!test_bit(BCH_FS_rw, &c->flags) && - !test_bit(BCH_FS_was_rw, &c->flags)) - bch2_fs_read_write_early(c); -} - void __bch2_fs_stop(struct bch_fs *); void bch2_fs_free(struct bch_fs *); void bch2_fs_stop(struct bch_fs *); diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c index 03e59f86f360..a7eb1f511484 100644 --- a/fs/bcachefs/sysfs.c +++ b/fs/bcachefs/sysfs.c @@ -146,7 +146,7 @@ write_attribute(trigger_journal_writes); write_attribute(trigger_btree_cache_shrink); write_attribute(trigger_btree_key_cache_shrink); write_attribute(trigger_freelist_wakeup); -rw_attribute(gc_gens_pos); +read_attribute(gc_gens_pos); read_attribute(uuid); read_attribute(minor); @@ -203,7 +203,6 @@ read_attribute(disk_groups); read_attribute(has_data); read_attribute(alloc_debug); -read_attribute(accounting); read_attribute(usage_base); #define x(t, n, ...) read_attribute(t); @@ -211,12 +210,11 @@ BCH_PERSISTENT_COUNTERS() #undef x rw_attribute(discard); +read_attribute(state); rw_attribute(label); -rw_attribute(copy_gc_enabled); read_attribute(copy_gc_wait); -rw_attribute(rebalance_enabled); sysfs_pd_controller_attribute(rebalance); read_attribute(rebalance_status); @@ -237,11 +235,6 @@ write_attribute(perf_test); BCH_TIME_STATS() #undef x -static struct attribute sysfs_state_rw = { - .name = "state", - .mode = 0444, -}; - static size_t bch2_btree_cache_size(struct bch_fs *c) { struct btree_cache *bc = &c->btree_cache; @@ -302,7 +295,8 @@ static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c) { - prt_printf(out, "%s: ", bch2_btree_id_str(c->gc_gens_btree)); + bch2_btree_id_to_text(out, c->gc_gens_btree); + prt_printf(out, ": "); bch2_bpos_to_text(out, c->gc_gens_pos); prt_printf(out, "\n"); } @@ -339,9 +333,6 @@ SHOW(bch2_fs) if (attr == &sysfs_gc_gens_pos) bch2_gc_gens_pos_to_text(out, c); - sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); - - sysfs_printf(rebalance_enabled, "%i", c->rebalance.enabled); sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */ if (attr == &sysfs_copy_gc_wait) @@ -405,9 +396,6 @@ SHOW(bch2_fs) if (attr == &sysfs_alloc_debug) bch2_fs_alloc_debug_to_text(out, c); - if (attr == &sysfs_accounting) - bch2_fs_accounting_to_text(out, c); - if (attr == &sysfs_usage_base) bch2_fs_usage_base_to_text(out, c); @@ -418,23 +406,6 @@ STORE(bch2_fs) { struct bch_fs *c = container_of(kobj, struct bch_fs, kobj); - if (attr == &sysfs_copy_gc_enabled) { - ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled) - ?: (ssize_t) size; - - if (c->copygc_thread) - wake_up_process(c->copygc_thread); - return ret; - } - - if (attr == &sysfs_rebalance_enabled) { - ssize_t ret = strtoul_safe(buf, c->rebalance.enabled) - ?: (ssize_t) size; - - rebalance_wakeup(c); - return ret; - } - sysfs_pd_controller_store(rebalance, &c->rebalance.pd); /* Debugging: */ @@ -534,15 +505,22 @@ SHOW(bch2_fs_counters) printbuf_tabstop_push(out, 32); - #define x(t, ...) \ + #define x(t, n, f, ...) \ if (attr == &sysfs_##t) { \ counter = percpu_u64_get(&c->counters[BCH_COUNTER_##t]);\ counter_since_mount = counter - c->counters_on_mount[BCH_COUNTER_##t];\ + if (f & TYPE_SECTORS) { \ + counter <<= 9; \ + counter_since_mount <<= 9; \ + } \ + \ prt_printf(out, "since mount:\t"); \ + (f & TYPE_COUNTER) ? prt_u64(out, counter_since_mount) :\ prt_human_readable_u64(out, counter_since_mount); \ prt_newline(out); \ \ prt_printf(out, "since filesystem creation:\t"); \ + (f & TYPE_COUNTER) ? prt_u64(out, counter) : \ prt_human_readable_u64(out, counter); \ prt_newline(out); \ } @@ -610,10 +588,8 @@ struct attribute *bch2_fs_internal_files[] = { &sysfs_gc_gens_pos, - &sysfs_copy_gc_enabled, &sysfs_copy_gc_wait, - &sysfs_rebalance_enabled, sysfs_pd_controller_files(rebalance), &sysfs_moving_ctxts, @@ -622,7 +598,6 @@ struct attribute *bch2_fs_internal_files[] = { &sysfs_disk_groups, &sysfs_alloc_debug, - &sysfs_accounting, &sysfs_usage_base, NULL }; @@ -682,6 +657,13 @@ STORE(bch2_fs_opts_dir) (id == Opt_compression && !c->opts.background_compression))) bch2_set_rebalance_needs_scan(c, 0); + if (v && id == Opt_rebalance_enabled) + rebalance_wakeup(c); + + if (v && id == Opt_copygc_enabled && + c->copygc_thread) + wake_up_process(c->copygc_thread); + ret = size; err: bch2_write_ref_put(c, BCH_WRITE_REF_sysfs); @@ -790,7 +772,7 @@ SHOW(bch2_dev) prt_char(out, '\n'); } - if (attr == &sysfs_state_rw) { + if (attr == &sysfs_state) { prt_string_option(out, bch2_member_states, ca->mi.state); prt_char(out, '\n'); } @@ -870,7 +852,7 @@ struct attribute *bch2_dev_files[] = { /* settings: */ &sysfs_discard, - &sysfs_state_rw, + &sysfs_state, &sysfs_label, &sysfs_has_data, diff --git a/fs/bcachefs/tests.c b/fs/bcachefs/tests.c index fb5c1543e52f..6c6469814637 100644 --- a/fs/bcachefs/tests.c +++ b/fs/bcachefs/tests.c @@ -131,7 +131,7 @@ static int test_iterate(struct bch_fs *c, u64 nr) i = 0; ret = bch2_trans_run(c, - for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs, + for_each_btree_key_max(trans, iter, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), POS(0, U64_MAX), 0, k, ({ BUG_ON(k.k->p.offset != i++); @@ -186,7 +186,7 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr) i = 0; ret = bch2_trans_run(c, - for_each_btree_key_upto(trans, iter, BTREE_ID_extents, + for_each_btree_key_max(trans, iter, BTREE_ID_extents, SPOS(0, 0, U32_MAX), POS(0, U64_MAX), 0, k, ({ BUG_ON(bkey_start_offset(k.k) != i); @@ -242,7 +242,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr) i = 0; ret = bch2_trans_run(c, - for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs, + for_each_btree_key_max(trans, iter, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), POS(0, U64_MAX), 0, k, ({ BUG_ON(k.k->p.offset != i); @@ -259,7 +259,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr) i = 0; ret = bch2_trans_run(c, - for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs, + for_each_btree_key_max(trans, iter, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), POS(0, U64_MAX), BTREE_ITER_slots, k, ({ if (i >= nr * 2) @@ -302,7 +302,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr) i = 0; ret = bch2_trans_run(c, - for_each_btree_key_upto(trans, iter, BTREE_ID_extents, + for_each_btree_key_max(trans, iter, BTREE_ID_extents, SPOS(0, 0, U32_MAX), POS(0, U64_MAX), 0, k, ({ BUG_ON(bkey_start_offset(k.k) != i + 8); @@ -320,7 +320,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr) i = 0; ret = bch2_trans_run(c, - for_each_btree_key_upto(trans, iter, BTREE_ID_extents, + for_each_btree_key_max(trans, iter, BTREE_ID_extents, SPOS(0, 0, U32_MAX), POS(0, U64_MAX), BTREE_ITER_slots, k, ({ if (i == nr) @@ -349,10 +349,10 @@ static int test_peek_end(struct bch_fs *c, u64 nr) bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), 0); - lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)))); + lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)))); BUG_ON(k.k); - lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)))); + lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)))); BUG_ON(k.k); bch2_trans_iter_exit(trans, &iter); @@ -369,10 +369,10 @@ static int test_peek_end_extents(struct bch_fs *c, u64 nr) bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, SPOS(0, 0, U32_MAX), 0); - lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)))); + lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)))); BUG_ON(k.k); - lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)))); + lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)))); BUG_ON(k.k); bch2_trans_iter_exit(trans, &iter); @@ -488,7 +488,7 @@ static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi) trans = bch2_trans_get(c); bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, SPOS(0, 0, snapid_lo), 0); - lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)))); + lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)))); BUG_ON(k.k->p.snapshot != U32_MAX); @@ -672,7 +672,7 @@ static int __do_delete(struct btree_trans *trans, struct bpos pos) bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos, BTREE_ITER_intent); - k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)); + k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX)); ret = bkey_err(k); if (ret) goto err; @@ -726,7 +726,7 @@ static int seq_insert(struct bch_fs *c, u64 nr) static int seq_lookup(struct bch_fs *c, u64 nr) { return bch2_trans_run(c, - for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs, + for_each_btree_key_max(trans, iter, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX), POS(0, U64_MAX), 0, k, 0)); diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h index 5597b9d6297f..c1b51009edf6 100644 --- a/fs/bcachefs/trace.h +++ b/fs/bcachefs/trace.h @@ -199,6 +199,30 @@ DECLARE_EVENT_CLASS(bio, (unsigned long long)__entry->sector, __entry->nr_sector) ); +/* disk_accounting.c */ + +TRACE_EVENT(accounting_mem_insert, + TP_PROTO(struct bch_fs *c, const char *acc), + TP_ARGS(c, acc), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(unsigned, new_nr ) + __string(acc, acc ) + ), + + TP_fast_assign( + __entry->dev = c->dev; + __entry->new_nr = c->accounting.k.nr; + __assign_str(acc); + ), + + TP_printk("%d,%d entries %u added %s", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->new_nr, + __get_str(acc)) +); + /* fs.c: */ TRACE_EVENT(bch2_sync_fs, TP_PROTO(struct super_block *sb, int wait), @@ -703,7 +727,7 @@ DEFINE_EVENT(fs_str, bucket_alloc_fail, TP_ARGS(c, str) ); -TRACE_EVENT(discard_buckets, +DECLARE_EVENT_CLASS(discard_buckets_class, TP_PROTO(struct bch_fs *c, u64 seen, u64 open, u64 need_journal_commit, u64 discarded, const char *err), TP_ARGS(c, seen, open, need_journal_commit, discarded, err), @@ -735,6 +759,18 @@ TRACE_EVENT(discard_buckets, __entry->err) ); +DEFINE_EVENT(discard_buckets_class, discard_buckets, + TP_PROTO(struct bch_fs *c, u64 seen, u64 open, + u64 need_journal_commit, u64 discarded, const char *err), + TP_ARGS(c, seen, open, need_journal_commit, discarded, err) +); + +DEFINE_EVENT(discard_buckets_class, discard_buckets_fast, + TP_PROTO(struct bch_fs *c, u64 seen, u64 open, + u64 need_journal_commit, u64 discarded, const char *err), + TP_ARGS(c, seen, open, need_journal_commit, discarded, err) +); + TRACE_EVENT(bucket_invalidate, TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors), TP_ARGS(c, dev, bucket, sectors), @@ -848,8 +884,8 @@ TRACE_EVENT(move_data, TRACE_EVENT(evacuate_bucket, TP_PROTO(struct bch_fs *c, struct bpos *bucket, unsigned sectors, unsigned bucket_size, - u64 fragmentation, int ret), - TP_ARGS(c, bucket, sectors, bucket_size, fragmentation, ret), + int ret), + TP_ARGS(c, bucket, sectors, bucket_size, ret), TP_STRUCT__entry( __field(dev_t, dev ) @@ -857,7 +893,6 @@ TRACE_EVENT(evacuate_bucket, __field(u64, bucket ) __field(u32, sectors ) __field(u32, bucket_size ) - __field(u64, fragmentation ) __field(int, ret ) ), @@ -867,45 +902,42 @@ TRACE_EVENT(evacuate_bucket, __entry->bucket = bucket->offset; __entry->sectors = sectors; __entry->bucket_size = bucket_size; - __entry->fragmentation = fragmentation; __entry->ret = ret; ), - TP_printk("%d,%d %llu:%llu sectors %u/%u fragmentation %llu ret %i", + TP_printk("%d,%d %llu:%llu sectors %u/%u ret %i", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->member, __entry->bucket, __entry->sectors, __entry->bucket_size, - __entry->fragmentation, __entry->ret) + __entry->ret) ); TRACE_EVENT(copygc, TP_PROTO(struct bch_fs *c, - u64 sectors_moved, u64 sectors_not_moved, - u64 buckets_moved, u64 buckets_not_moved), - TP_ARGS(c, - sectors_moved, sectors_not_moved, - buckets_moved, buckets_not_moved), + u64 buckets, + u64 sectors_seen, + u64 sectors_moved), + TP_ARGS(c, buckets, sectors_seen, sectors_moved), TP_STRUCT__entry( __field(dev_t, dev ) + __field(u64, buckets ) + __field(u64, sectors_seen ) __field(u64, sectors_moved ) - __field(u64, sectors_not_moved ) - __field(u64, buckets_moved ) - __field(u64, buckets_not_moved ) ), TP_fast_assign( __entry->dev = c->dev; + __entry->buckets = buckets; + __entry->sectors_seen = sectors_seen; __entry->sectors_moved = sectors_moved; - __entry->sectors_not_moved = sectors_not_moved; - __entry->buckets_moved = buckets_moved; - __entry->buckets_not_moved = buckets_moved; ), - TP_printk("%d,%d sectors moved %llu remain %llu buckets moved %llu remain %llu", + TP_printk("%d,%d buckets %llu sectors seen %llu moved %llu", MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->sectors_moved, __entry->sectors_not_moved, - __entry->buckets_moved, __entry->buckets_not_moved) + __entry->buckets, + __entry->sectors_seen, + __entry->sectors_moved) ); TRACE_EVENT(copygc_wait, @@ -1316,6 +1348,12 @@ TRACE_EVENT(trans_restart_key_cache_key_realloced, __entry->new_u64s) ); +DEFINE_EVENT(transaction_event, trans_restart_write_buffer_flush, + TP_PROTO(struct btree_trans *trans, + unsigned long caller_ip), + TP_ARGS(trans, caller_ip) +); + TRACE_EVENT(path_downgrade, TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, @@ -1352,10 +1390,21 @@ TRACE_EVENT(path_downgrade, __entry->pos_snapshot) ); -DEFINE_EVENT(transaction_event, trans_restart_write_buffer_flush, - TP_PROTO(struct btree_trans *trans, - unsigned long caller_ip), - TP_ARGS(trans, caller_ip) +TRACE_EVENT(key_cache_fill, + TP_PROTO(struct btree_trans *trans, const char *key), + TP_ARGS(trans, key), + + TP_STRUCT__entry( + __array(char, trans_fn, 32 ) + __string(key, key ) + ), + + TP_fast_assign( + strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); + __assign_str(key); + ), + + TP_printk("%s %s", __entry->trans_fn, __get_str(key)) ); TRACE_EVENT(write_buffer_flush, @@ -1414,6 +1463,24 @@ TRACE_EVENT(write_buffer_flush_slowpath, TP_printk("%zu/%zu", __entry->slowpath, __entry->total) ); +TRACE_EVENT(write_buffer_maybe_flush, + TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *key), + TP_ARGS(trans, caller_ip, key), + + TP_STRUCT__entry( + __array(char, trans_fn, 32 ) + __field(unsigned long, caller_ip ) + __string(key, key ) + ), + + TP_fast_assign( + strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); + __assign_str(key); + ), + + TP_printk("%s %pS %s", __entry->trans_fn, (void *) __entry->caller_ip, __get_str(key)) +); + DEFINE_EVENT(fs_str, rebalance_extent, TP_PROTO(struct bch_fs *c, const char *str), TP_ARGS(c, str) diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h index fb02c1c36004..e7c3541b38f3 100644 --- a/fs/bcachefs/util.h +++ b/fs/bcachefs/util.h @@ -55,6 +55,16 @@ static inline size_t buf_pages(void *p, size_t len) PAGE_SIZE); } +static inline void *bch2_kvmalloc(size_t n, gfp_t flags) +{ + void *p = unlikely(n >= INT_MAX) + ? vmalloc(n) + : kvmalloc(n, flags & ~__GFP_ZERO); + if (p && (flags & __GFP_ZERO)) + memset(p, 0, n); + return p; +} + #define init_heap(heap, _size, gfp) \ ({ \ (heap)->nr = 0; \ @@ -317,6 +327,19 @@ do { \ _ptr ? container_of(_ptr, type, member) : NULL; \ }) +static inline struct list_head *list_pop(struct list_head *head) +{ + if (list_empty(head)) + return NULL; + + struct list_head *ret = head->next; + list_del_init(ret); + return ret; +} + +#define list_pop_entry(head, type, member) \ + container_of_or_null(list_pop(head), type, member) + /* Does linear interpolation between powers of two */ static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits) { @@ -647,8 +670,6 @@ static inline int cmp_le32(__le32 l, __le32 r) #include <linux/uuid.h> -#define QSTR(n) { { { .len = strlen(n) } }, .name = n } - static inline bool qstr_eq(const struct qstr l, const struct qstr r) { return l.len == r.len && !memcmp(l.name, r.name, l.len); @@ -696,4 +717,13 @@ static inline bool test_bit_le64(size_t bit, __le64 *addr) return (addr[bit / 64] & cpu_to_le64(BIT_ULL(bit % 64))) != 0; } +static inline void memcpy_swab(void *_dst, void *_src, size_t len) +{ + u8 *dst = _dst + len; + u8 *src = _src; + + while (len--) + *--dst = *src++; +} + #endif /* _BCACHEFS_UTIL_H */ diff --git a/fs/bcachefs/varint.c b/fs/bcachefs/varint.c index 6a78553d9b0c..6620ecae26af 100644 --- a/fs/bcachefs/varint.c +++ b/fs/bcachefs/varint.c @@ -9,6 +9,7 @@ #include <valgrind/memcheck.h> #endif +#include "errcode.h" #include "varint.h" /** @@ -53,7 +54,7 @@ int bch2_varint_decode(const u8 *in, const u8 *end, u64 *out) u64 v; if (unlikely(in + bytes > end)) - return -1; + return -BCH_ERR_varint_decode_error; if (likely(bytes < 9)) { __le64 v_le = 0; @@ -115,7 +116,7 @@ int bch2_varint_decode_fast(const u8 *in, const u8 *end, u64 *out) unsigned bytes = ffz(*in) + 1; if (unlikely(in + bytes > end)) - return -1; + return -BCH_ERR_varint_decode_error; if (likely(bytes < 9)) { v >>= bytes; diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c index 952aca400faf..aed7c6984173 100644 --- a/fs/bcachefs/xattr.c +++ b/fs/bcachefs/xattr.c @@ -71,7 +71,7 @@ const struct bch_hash_desc bch2_xattr_hash_desc = { }; int bch2_xattr_validate(struct bch_fs *c, struct bkey_s_c k, - enum bch_validate_flags flags) + struct bkey_validate_context from) { struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k); unsigned val_u64s = xattr_val_u64s(xattr.v->x_name_len, @@ -309,7 +309,7 @@ ssize_t bch2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) u64 offset = 0, inum = inode->ei_inode.bi_inum; int ret = bch2_trans_run(c, - for_each_btree_key_in_subvolume_upto(trans, iter, BTREE_ID_xattrs, + for_each_btree_key_in_subvolume_max(trans, iter, BTREE_ID_xattrs, POS(inum, offset), POS(inum, U64_MAX), inode->ei_inum.subvol, 0, k, ({ @@ -565,13 +565,6 @@ static int bch2_xattr_bcachefs_set(const struct xattr_handler *handler, ret = bch2_write_inode(c, inode, inode_opt_set_fn, &s, 0); err: mutex_unlock(&inode->ei_update_lock); - - if (value && - (opt_id == Opt_background_target || - opt_id == Opt_background_compression || - (opt_id == Opt_compression && !inode_opt_get(c, &inode->ei_inode, background_compression)))) - bch2_set_rebalance_needs_scan(c, inode->ei_inode.bi_inum); - err_class_exit: return bch2_err_class(ret); } @@ -609,7 +602,7 @@ static const struct xattr_handler bch_xattr_bcachefs_effective_handler = { #endif /* NO_BCACHEFS_FS */ -const struct xattr_handler *bch2_xattr_handlers[] = { +const struct xattr_handler * const bch2_xattr_handlers[] = { &bch_xattr_user_handler, &bch_xattr_trusted_handler, &bch_xattr_security_handler, diff --git a/fs/bcachefs/xattr.h b/fs/bcachefs/xattr.h index c188a5ad64ce..132fbbd15a66 100644 --- a/fs/bcachefs/xattr.h +++ b/fs/bcachefs/xattr.h @@ -6,7 +6,8 @@ extern const struct bch_hash_desc bch2_xattr_hash_desc; -int bch2_xattr_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags); +int bch2_xattr_validate(struct bch_fs *, struct bkey_s_c, + struct bkey_validate_context); void bch2_xattr_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); #define bch2_bkey_ops_xattr ((struct bkey_ops) { \ @@ -44,6 +45,6 @@ int bch2_xattr_set(struct btree_trans *, subvol_inum, ssize_t bch2_xattr_list(struct dentry *, char *, size_t); -extern const struct xattr_handler *bch2_xattr_handlers[]; +extern const struct xattr_handler * const bch2_xattr_handlers[]; #endif /* _BCACHEFS_XATTR_H */ diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 106f0e8af177..8054f44d39cf 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1257,7 +1257,7 @@ out_free_interp: } reloc_func_desc = interp_load_addr; - allow_write_access(interpreter); + exe_file_allow_write_access(interpreter); fput(interpreter); kfree(interp_elf_ex); @@ -1354,7 +1354,7 @@ out_free_dentry: kfree(interp_elf_ex); kfree(interp_elf_phdata); out_free_file: - allow_write_access(interpreter); + exe_file_allow_write_access(interpreter); if (interpreter) fput(interpreter); out_free_ph: diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index f1a7c4875c4a..c13ee8180b17 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -394,7 +394,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm) goto error; } - allow_write_access(interpreter); + exe_file_allow_write_access(interpreter); fput(interpreter); interpreter = NULL; } @@ -467,7 +467,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm) error: if (interpreter) { - allow_write_access(interpreter); + exe_file_allow_write_access(interpreter); fput(interpreter); } kfree(interpreter_name); diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 390808ce935d..b5b5ca1a44f7 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -478,7 +478,7 @@ static int load_flat_file(struct linux_binprm *bprm, * 28 bits (256 MB) is way more than reasonable in this case. * If some top bits are set we have probable binary corruption. */ - if ((text_len | data_len | bss_len | stack_len | full_data) >> 28) { + if ((text_len | data_len | bss_len | stack_len | relocs | full_data) >> 28) { pr_err("bad header\n"); ret = -ENOEXEC; goto err; diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index 6a3a16f91051..5a7ebd160724 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c @@ -1001,7 +1001,7 @@ static int bm_fill_super(struct super_block *sb, struct fs_context *fc) /* * If it turns out that most user namespaces actually want to * register their own binary type handler and therefore all - * create their own separate binfm_misc mounts we should + * create their own separate binfmt_misc mounts we should * consider turning this into a kmem cache. */ misc = kzalloc(sizeof(struct binfmt_misc), GFP_KERNEL); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index d8ded597edad..b2fae67f8fa3 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -628,7 +628,7 @@ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array, for (allocated = 0; allocated < nr_pages;) { unsigned int last = allocated; - allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array); + allocated = alloc_pages_bulk(gfp, nr_pages, page_array); if (unlikely(allocated == last)) { /* No progress, fail and do cleanup. */ for (int i = 0; i < allocated; i++) { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index fe2c810335ff..a9322601ab5c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -10173,7 +10173,6 @@ out_unlock_mmap: *span = bsi.highest_ppage - bsi.lowest_ppage + 1; sis->max = bsi.nr_pages; sis->pages = bsi.nr_pages - 1; - sis->highest_bit = bsi.nr_pages - 1; return bsi.nr_extents; } #else diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index ae98269a5e3a..6c18bad53cd3 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2544,6 +2544,15 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp) goto out; } + /* + * Don't allow defrag on pre-content watched files, as it could + * populate the page cache with 0's via readahead. + */ + if (unlikely(FMODE_FSNOTIFY_HSM(file->f_mode))) { + ret = -EINVAL; + goto out; + } + if (argp) { if (copy_from_user(&range, argp, sizeof(range))) { ret = -EFAULT; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index f809c3200c21..dc4fee519ca6 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -961,7 +961,7 @@ static int btrfs_fill_super(struct super_block *sb, #endif sb->s_xattr = btrfs_xattr_handlers; sb->s_time_gran = 1; - sb->s_iflags |= SB_I_CGROUPWB; + sb->s_iflags |= SB_I_CGROUPWB | SB_I_ALLOW_HSM; err = super_setup_bdi(sb); if (err) { diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index a594f66daedf..0a0776489055 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -798,6 +798,10 @@ static int get_canonical_dev_path(const char *dev_path, char *canonical) if (ret) goto out; resolved_path = d_path(&path, path_buf, PATH_MAX); + if (IS_ERR(resolved_path)) { + ret = PTR_ERR(resolved_path); + goto out; + } ret = strscpy(canonical, resolved_path, PATH_MAX); out: kfree(path_buf); diff --git a/fs/cachefiles/error_inject.c b/fs/cachefiles/error_inject.c index 1715d5ca2b2d..e341ade47dd8 100644 --- a/fs/cachefiles/error_inject.c +++ b/fs/cachefiles/error_inject.c @@ -11,7 +11,7 @@ unsigned int cachefiles_error_injection_state; static struct ctl_table_header *cachefiles_sysctl; -static struct ctl_table cachefiles_sysctls[] = { +static const struct ctl_table cachefiles_sysctls[] = { { .procname = "error_injection", .data = &cachefiles_error_injection_state, diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c index 6a821a959b59..92058ae43488 100644 --- a/fs/cachefiles/io.c +++ b/fs/cachefiles/io.c @@ -13,6 +13,7 @@ #include <linux/falloc.h> #include <linux/sched/mm.h> #include <trace/events/fscache.h> +#include <trace/events/netfs.h> #include "internal.h" struct cachefiles_kiocb { @@ -366,6 +367,7 @@ static int cachefiles_write(struct netfs_cache_resources *cres, if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE)) { if (term_func) term_func(term_func_priv, -ENOBUFS, false); + trace_netfs_sreq(term_func_priv, netfs_sreq_trace_cache_nowrite); return -ENOBUFS; } @@ -695,6 +697,7 @@ static void cachefiles_issue_write(struct netfs_io_subrequest *subreq) iov_iter_truncate(&subreq->io_iter, len); } + trace_netfs_sreq(subreq, netfs_sreq_trace_cache_prepare); cachefiles_begin_secure(cache, &saved_cred); ret = __cachefiles_prepare_write(object, cachefiles_cres_file(cres), &start, &len, len, true); @@ -704,6 +707,7 @@ static void cachefiles_issue_write(struct netfs_io_subrequest *subreq) return; } + trace_netfs_sreq(subreq, netfs_sreq_trace_cache_write); cachefiles_write(&subreq->rreq->cache_resources, subreq->start, &subreq->io_iter, netfs_write_subrequest_terminated, subreq); diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c index 7c6f260a3be5..52383b1d0ba6 100644 --- a/fs/cachefiles/xattr.c +++ b/fs/cachefiles/xattr.c @@ -77,6 +77,7 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object) trace_cachefiles_vfs_error(object, file_inode(file), ret, cachefiles_trace_setxattr_error); trace_cachefiles_coherency(object, file_inode(file)->i_ino, + be64_to_cpup((__be64 *)buf->data), buf->content, cachefiles_coherency_set_fail); if (ret != -ENOMEM) @@ -85,6 +86,7 @@ int cachefiles_set_object_xattr(struct cachefiles_object *object) "Failed to set xattr with error %d", ret); } else { trace_cachefiles_coherency(object, file_inode(file)->i_ino, + be64_to_cpup((__be64 *)buf->data), buf->content, cachefiles_coherency_set_ok); } @@ -126,7 +128,10 @@ int cachefiles_check_auxdata(struct cachefiles_object *object, struct file *file object, "Failed to read aux with error %zd", xlen); why = cachefiles_coherency_check_xattr; - } else if (buf->type != CACHEFILES_COOKIE_TYPE_DATA) { + goto out; + } + + if (buf->type != CACHEFILES_COOKIE_TYPE_DATA) { why = cachefiles_coherency_check_type; } else if (memcmp(buf->data, p, len) != 0) { why = cachefiles_coherency_check_aux; @@ -141,7 +146,9 @@ int cachefiles_check_auxdata(struct cachefiles_object *object, struct file *file ret = 0; } +out: trace_cachefiles_coherency(object, file_inode(file)->i_ino, + be64_to_cpup((__be64 *)buf->data), buf->content, why); kfree(buf); return ret; diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 85936f6d2bf7..f5224a566b69 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -223,10 +223,13 @@ static void finish_netfs_read(struct ceph_osd_request *req) subreq->len, i_size_read(req->r_inode)); /* no object means success but no data */ - if (err == -ENOENT) + if (err == -ENOENT) { + __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); + __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags); err = 0; - else if (err == -EBLOCKLISTED) + } else if (err == -EBLOCKLISTED) { fsc->blocklisted = true; + } if (err >= 0) { if (sparse && err > 0) @@ -242,6 +245,8 @@ static void finish_netfs_read(struct ceph_osd_request *req) if (err > subreq->len) err = subreq->len; } + if (err > 0) + __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); } if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { @@ -253,8 +258,9 @@ static void finish_netfs_read(struct ceph_osd_request *req) subreq->transferred = err; err = 0; } + subreq->error = err; trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress); - netfs_read_subreq_terminated(subreq, err, false); + netfs_read_subreq_terminated(subreq); iput(req->r_inode); ceph_dec_osd_stopping_blocker(fsc->mdsc); } @@ -314,7 +320,9 @@ static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq) ceph_mdsc_put_request(req); out: - netfs_read_subreq_terminated(subreq, err, false); + subreq->error = err; + trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress); + netfs_read_subreq_terminated(subreq); return true; } @@ -426,8 +434,10 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq) ceph_osdc_start_request(req->r_osdc, req); out: ceph_osdc_put_request(req); - if (err) - netfs_read_subreq_terminated(subreq, err, false); + if (err) { + subreq->error = err; + netfs_read_subreq_terminated(subreq); + } doutc(cl, "%llx.%llx result %d\n", ceph_vinop(inode), err); } diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index fdf9dc15eafa..fdd404fc8112 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -412,7 +412,7 @@ void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc) void ceph_fs_debugfs_init(struct ceph_fs_client *fsc) { - char name[100]; + char name[NAME_MAX]; doutc(fsc->client, "begin\n"); fsc->debugfs_congestion_kb = diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 0bf388e07a02..62e99e65250d 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -1940,29 +1940,19 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry, /* * Check if cached dentry can be trusted. */ -static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) +static int ceph_d_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dentry->d_sb)->mdsc; struct ceph_client *cl = mdsc->fsc->client; int valid = 0; - struct dentry *parent; - struct inode *dir, *inode; + struct inode *inode; - valid = fscrypt_d_revalidate(dentry, flags); + valid = fscrypt_d_revalidate(dir, name, dentry, flags); if (valid <= 0) return valid; - if (flags & LOOKUP_RCU) { - parent = READ_ONCE(dentry->d_parent); - dir = d_inode_rcu(parent); - if (!dir) - return -ECHILD; - inode = d_inode_rcu(dentry); - } else { - parent = dget_parent(dentry); - dir = d_inode(parent); - inode = d_inode(dentry); - } + inode = d_inode_rcu(dentry); doutc(cl, "%p '%pd' inode %p offset 0x%llx nokey %d\n", dentry, dentry, inode, ceph_dentry(dentry)->offset, @@ -2008,6 +1998,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) req->r_parent = dir; ihold(dir); + req->r_dname = name; + mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; if (ceph_security_xattr_wanted(dir)) mask |= CEPH_CAP_XATTR_SHARED; @@ -2038,9 +2030,6 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) doutc(cl, "%p '%pd' %s\n", dentry, dentry, valid ? "valid" : "invalid"); if (!valid) ceph_dir_clear_complete(dir); - - if (!(flags & LOOKUP_RCU)) - dput(parent); return valid; } diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 785fe489ef4b..54b3421501e9 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2621,6 +2621,7 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen) { struct inode *dir = req->r_parent; struct dentry *dentry = req->r_dentry; + const struct qstr *name = req->r_dname; u8 *cryptbuf = NULL; u32 len = 0; int ret = 0; @@ -2641,8 +2642,10 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen) if (!fscrypt_has_encryption_key(dir)) goto success; - if (!fscrypt_fname_encrypted_size(dir, dentry->d_name.len, NAME_MAX, - &len)) { + if (!name) + name = &dentry->d_name; + + if (!fscrypt_fname_encrypted_size(dir, name->len, NAME_MAX, &len)) { WARN_ON_ONCE(1); return ERR_PTR(-ENAMETOOLONG); } @@ -2657,7 +2660,7 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen) if (!cryptbuf) return ERR_PTR(-ENOMEM); - ret = fscrypt_fname_encrypt(dir, &dentry->d_name, cryptbuf, len); + ret = fscrypt_fname_encrypt(dir, name, cryptbuf, len); if (ret) { kfree(cryptbuf); return ERR_PTR(ret); @@ -2945,12 +2948,12 @@ static struct ceph_mds_request_head_legacy * find_legacy_request_head(void *p, u64 features) { bool legacy = !(features & CEPH_FEATURE_FS_BTIME); - struct ceph_mds_request_head_old *ohead; + struct ceph_mds_request_head *head; if (legacy) return (struct ceph_mds_request_head_legacy *)p; - ohead = (struct ceph_mds_request_head_old *)p; - return (struct ceph_mds_request_head_legacy *)&ohead->oldest_client_tid; + head = (struct ceph_mds_request_head *)p; + return (struct ceph_mds_request_head_legacy *)&head->oldest_client_tid; } /* @@ -3020,7 +3023,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session, if (legacy) len = sizeof(struct ceph_mds_request_head_legacy); else if (request_head_version == 1) - len = sizeof(struct ceph_mds_request_head_old); + len = offsetofend(struct ceph_mds_request_head, args); else if (request_head_version == 2) len = offsetofend(struct ceph_mds_request_head, ext_num_fwd); else @@ -3104,11 +3107,11 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session, msg->hdr.version = cpu_to_le16(3); p = msg->front.iov_base + sizeof(*lhead); } else if (request_head_version == 1) { - struct ceph_mds_request_head_old *ohead = msg->front.iov_base; + struct ceph_mds_request_head *nhead = msg->front.iov_base; msg->hdr.version = cpu_to_le16(4); - ohead->version = cpu_to_le16(1); - p = msg->front.iov_base + sizeof(*ohead); + nhead->version = cpu_to_le16(1); + p = msg->front.iov_base + offsetofend(struct ceph_mds_request_head, args); } else if (request_head_version == 2) { struct ceph_mds_request_head *nhead = msg->front.iov_base; @@ -3265,7 +3268,7 @@ static int __prepare_send_request(struct ceph_mds_session *session, * so we limit to retry at most 256 times. */ if (req->r_attempts) { - old_max_retry = sizeof_field(struct ceph_mds_request_head_old, + old_max_retry = sizeof_field(struct ceph_mds_request_head, num_retry); old_max_retry = 1 << (old_max_retry * BITS_PER_BYTE); if ((old_version && req->r_attempts >= old_max_retry) || @@ -5690,18 +5693,18 @@ static int ceph_mds_auth_match(struct ceph_mds_client *mdsc, * * All the other cases --> mismatch */ + bool path_matched = true; char *first = strstr(_tpath, auth->match.path); - if (first != _tpath) { - if (free_tpath) - kfree(_tpath); - return 0; + if (first != _tpath || + (tlen > len && _tpath[len] != '/')) { + path_matched = false; } - if (tlen > len && _tpath[len] != '/') { - if (free_tpath) - kfree(_tpath); + if (free_tpath) + kfree(_tpath); + + if (!path_matched) return 0; - } } } diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 38bb7e0d2d79..7c9fee9e80d4 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -299,6 +299,8 @@ struct ceph_mds_request { struct inode *r_target_inode; /* resulting inode */ struct inode *r_new_inode; /* new inode (for creates) */ + const struct qstr *r_dname; /* stable name (for ->d_revalidate) */ + #define CEPH_MDS_R_DIRECT_IS_HASH (1) /* r_direct_hash is valid */ #define CEPH_MDS_R_ABORTED (2) /* call was aborted */ #define CEPH_MDS_R_GOT_UNSAFE (3) /* got an unsafe reply */ diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c index 06ee397e0c3a..d90eda19bcc4 100644 --- a/fs/ceph/quota.c +++ b/fs/ceph/quota.c @@ -166,7 +166,7 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc, if (IS_ERR(in)) { doutc(cl, "Can't lookup inode %llx (err: %ld)\n", realm->ino, PTR_ERR(in)); - qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */ + qri->timeout = jiffies + secs_to_jiffies(60); /* XXX */ } else { qri->timeout = 0; qri->inode = in; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index af14ec382246..7fa1e7be50e4 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -1132,8 +1132,7 @@ struct ceph_acl_sec_ctx { void *acl; #endif #ifdef CONFIG_CEPH_FS_SECURITY_LABEL - void *sec_ctx; - u32 sec_ctxlen; + struct lsm_context lsmctx; #endif #ifdef CONFIG_FS_ENCRYPTION struct ceph_fscrypt_auth *fscrypt_auth; diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 1a9f12204666..537165db4519 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -1383,8 +1383,7 @@ int ceph_security_init_secctx(struct dentry *dentry, umode_t mode, int err; err = security_dentry_init_security(dentry, mode, &dentry->d_name, - &name, &as_ctx->sec_ctx, - &as_ctx->sec_ctxlen); + &name, &as_ctx->lsmctx); if (err < 0) { WARN_ON_ONCE(err != -EOPNOTSUPP); err = 0; /* do nothing */ @@ -1409,7 +1408,7 @@ int ceph_security_init_secctx(struct dentry *dentry, umode_t mode, */ name_len = strlen(name); err = ceph_pagelist_reserve(pagelist, - 4 * 2 + name_len + as_ctx->sec_ctxlen); + 4 * 2 + name_len + as_ctx->lsmctx.len); if (err) goto out; @@ -1432,8 +1431,9 @@ int ceph_security_init_secctx(struct dentry *dentry, umode_t mode, ceph_pagelist_encode_32(pagelist, name_len); ceph_pagelist_append(pagelist, name, name_len); - ceph_pagelist_encode_32(pagelist, as_ctx->sec_ctxlen); - ceph_pagelist_append(pagelist, as_ctx->sec_ctx, as_ctx->sec_ctxlen); + ceph_pagelist_encode_32(pagelist, as_ctx->lsmctx.len); + ceph_pagelist_append(pagelist, as_ctx->lsmctx.context, + as_ctx->lsmctx.len); err = 0; out: @@ -1451,7 +1451,7 @@ void ceph_release_acl_sec_ctx(struct ceph_acl_sec_ctx *as_ctx) posix_acl_release(as_ctx->default_acl); #endif #ifdef CONFIG_CEPH_FS_SECURITY_LABEL - security_release_secctx(as_ctx->sec_ctx, as_ctx->sec_ctxlen); + security_release_secctx(&as_ctx->lsmctx); #endif #ifdef CONFIG_FS_ENCRYPTION kfree(as_ctx->fscrypt_auth); diff --git a/fs/coda/dir.c b/fs/coda/dir.c index 4e552ba7bd43..a3e2dfeedfbf 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c @@ -445,7 +445,8 @@ static int coda_readdir(struct file *coda_file, struct dir_context *ctx) } /* called when a cache lookup succeeds */ -static int coda_dentry_revalidate(struct dentry *de, unsigned int flags) +static int coda_dentry_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *de, unsigned int flags) { struct inode *inode; struct coda_inode_info *cii; diff --git a/fs/coda/sysctl.c b/fs/coda/sysctl.c index 9f2d5743e2c8..0df46f09b6cc 100644 --- a/fs/coda/sysctl.c +++ b/fs/coda/sysctl.c @@ -14,7 +14,7 @@ static struct ctl_table_header *fs_table_header; -static struct ctl_table coda_table[] = { +static const struct ctl_table coda_table[] = { { .procname = "timeout", .data = &coda_timeout, diff --git a/fs/coredump.c b/fs/coredump.c index d48edb37bc35..591700e1b2ce 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -995,7 +995,7 @@ static int proc_dostring_coredump(const struct ctl_table *table, int write, static const unsigned int core_file_note_size_min = CORE_FILE_NOTE_SIZE_DEFAULT; static const unsigned int core_file_note_size_max = CORE_FILE_NOTE_SIZE_MAX; -static struct ctl_table coredump_sysctls[] = { +static const struct ctl_table coredump_sysctls[] = { { .procname = "core_uses_pid", .data = &core_uses_pid, diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 0ad52fbe51c9..010f9c0a4c2f 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -574,11 +574,10 @@ EXPORT_SYMBOL_GPL(fscrypt_fname_siphash); * Validate dentries in encrypted directories to make sure we aren't potentially * caching stale dentries after a key has been added. */ -int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) +int fscrypt_d_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { - struct dentry *dir; int err; - int valid; /* * Plaintext names are always valid, since fscrypt doesn't support @@ -591,30 +590,21 @@ int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) /* * No-key name; valid if the directory's key is still unavailable. * - * Although fscrypt forbids rename() on no-key names, we still must use - * dget_parent() here rather than use ->d_parent directly. That's - * because a corrupted fs image may contain directory hard links, which - * the VFS handles by moving the directory's dentry tree in the dcache - * each time ->lookup() finds the directory and it already has a dentry - * elsewhere. Thus ->d_parent can be changing, and we must safely grab - * a reference to some ->d_parent to prevent it from being freed. + * Note in RCU mode we have to bail if we get here - + * fscrypt_get_encryption_info() may block. */ if (flags & LOOKUP_RCU) return -ECHILD; - dir = dget_parent(dentry); /* * Pass allow_unsupported=true, so that files with an unsupported * encryption policy can be deleted. */ - err = fscrypt_get_encryption_info(d_inode(dir), true); - valid = !fscrypt_has_encryption_key(d_inode(dir)); - dput(dir); - + err = fscrypt_get_encryption_info(dir, true); if (err < 0) return err; - return valid; + return !fscrypt_has_encryption_key(dir); } EXPORT_SYMBOL_GPL(fscrypt_d_revalidate); diff --git a/fs/dcache.c b/fs/dcache.c index b4d5e9e1e43d..e3634916ffb9 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -192,7 +192,7 @@ static int proc_nr_dentry(const struct ctl_table *table, int write, void *buffer return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } -static struct ctl_table fs_dcache_sysctls[] = { +static const struct ctl_table fs_dcache_sysctls[] = { { .procname = "dentry-state", .data = &dentry_stat, @@ -295,12 +295,16 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c return dentry_string_cmp(cs, ct, tcount); } +/* + * long names are allocated separately from dentry and never modified. + * Refcounted, freeing is RCU-delayed. See take_dentry_name_snapshot() + * for the reason why ->count and ->head can't be combined into a union. + * dentry_string_cmp() relies upon ->name[] being word-aligned. + */ struct external_name { - union { - atomic_t count; - struct rcu_head head; - } u; - unsigned char name[]; + atomic_t count; + struct rcu_head head; + unsigned char name[] __aligned(sizeof(unsigned long)); }; static inline struct external_name *external_name(struct dentry *dentry) @@ -324,31 +328,45 @@ static void __d_free_external(struct rcu_head *head) static inline int dname_external(const struct dentry *dentry) { - return dentry->d_name.name != dentry->d_iname; + return dentry->d_name.name != dentry->d_shortname.string; } void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry) { - spin_lock(&dentry->d_lock); - name->name = dentry->d_name; - if (unlikely(dname_external(dentry))) { - atomic_inc(&external_name(dentry)->u.count); + unsigned seq; + const unsigned char *s; + + rcu_read_lock(); +retry: + seq = read_seqcount_begin(&dentry->d_seq); + s = READ_ONCE(dentry->d_name.name); + name->name.hash_len = dentry->d_name.hash_len; + name->name.name = name->inline_name.string; + if (likely(s == dentry->d_shortname.string)) { + name->inline_name = dentry->d_shortname; } else { - memcpy(name->inline_name, dentry->d_iname, - dentry->d_name.len + 1); - name->name.name = name->inline_name; + struct external_name *p; + p = container_of(s, struct external_name, name[0]); + // get a valid reference + if (unlikely(!atomic_inc_not_zero(&p->count))) + goto retry; + name->name.name = s; } - spin_unlock(&dentry->d_lock); + if (read_seqcount_retry(&dentry->d_seq, seq)) { + release_dentry_name_snapshot(name); + goto retry; + } + rcu_read_unlock(); } EXPORT_SYMBOL(take_dentry_name_snapshot); void release_dentry_name_snapshot(struct name_snapshot *name) { - if (unlikely(name->name.name != name->inline_name)) { + if (unlikely(name->name.name != name->inline_name.string)) { struct external_name *p; p = container_of(name->name.name, struct external_name, name[0]); - if (unlikely(atomic_dec_and_test(&p->u.count))) - kfree_rcu(p, u.head); + if (unlikely(atomic_dec_and_test(&p->count))) + kfree_rcu(p, head); } } EXPORT_SYMBOL(release_dentry_name_snapshot); @@ -386,7 +404,7 @@ static void dentry_free(struct dentry *dentry) WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias)); if (unlikely(dname_external(dentry))) { struct external_name *p = external_name(dentry); - if (likely(atomic_dec_and_test(&p->u.count))) { + if (likely(atomic_dec_and_test(&p->count))) { call_rcu(&dentry->d_u.d_rcu, __d_free_external); return; } @@ -1654,10 +1672,10 @@ static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) * will still always have a NUL at the end, even if we might * be overwriting an internal NUL character */ - dentry->d_iname[DNAME_INLINE_LEN-1] = 0; + dentry->d_shortname.string[DNAME_INLINE_LEN-1] = 0; if (unlikely(!name)) { name = &slash_name; - dname = dentry->d_iname; + dname = dentry->d_shortname.string; } else if (name->len > DNAME_INLINE_LEN-1) { size_t size = offsetof(struct external_name, name[1]); struct external_name *p = kmalloc(size + name->len, @@ -1667,10 +1685,10 @@ static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) kmem_cache_free(dentry_cache, dentry); return NULL; } - atomic_set(&p->u.count, 1); + atomic_set(&p->count, 1); dname = p->name; } else { - dname = dentry->d_iname; + dname = dentry->d_shortname.string; } dentry->d_name.len = name->len; @@ -1681,9 +1699,8 @@ static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) /* Make sure we always see the terminating NUL character */ smp_store_release(&dentry->d_name.name, dname); /* ^^^ */ - dentry->d_lockref.count = 1; dentry->d_flags = 0; - spin_lock_init(&dentry->d_lock); + lockref_init(&dentry->d_lockref); seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock); dentry->d_inode = NULL; dentry->d_parent = dentry; @@ -2729,10 +2746,9 @@ static void swap_names(struct dentry *dentry, struct dentry *target) * dentry:internal, target:external. Steal target's * storage and make target internal. */ - memcpy(target->d_iname, dentry->d_name.name, - dentry->d_name.len + 1); dentry->d_name.name = target->d_name.name; - target->d_name.name = target->d_iname; + target->d_shortname = dentry->d_shortname; + target->d_name.name = target->d_shortname.string; } } else { if (unlikely(dname_external(dentry))) { @@ -2740,20 +2756,16 @@ static void swap_names(struct dentry *dentry, struct dentry *target) * dentry:external, target:internal. Give dentry's * storage to target and make dentry internal */ - memcpy(dentry->d_iname, target->d_name.name, - target->d_name.len + 1); target->d_name.name = dentry->d_name.name; - dentry->d_name.name = dentry->d_iname; + dentry->d_shortname = target->d_shortname; + dentry->d_name.name = dentry->d_shortname.string; } else { /* * Both are internal. */ - unsigned int i; - BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long))); - for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) { - swap(((long *) &dentry->d_iname)[i], - ((long *) &target->d_iname)[i]); - } + for (int i = 0; i < DNAME_INLINE_WORDS; i++) + swap(dentry->d_shortname.words[i], + target->d_shortname.words[i]); } } swap(dentry->d_name.hash_len, target->d_name.hash_len); @@ -2765,16 +2777,15 @@ static void copy_name(struct dentry *dentry, struct dentry *target) if (unlikely(dname_external(dentry))) old_name = external_name(dentry); if (unlikely(dname_external(target))) { - atomic_inc(&external_name(target)->u.count); + atomic_inc(&external_name(target)->count); dentry->d_name = target->d_name; } else { - memcpy(dentry->d_iname, target->d_name.name, - target->d_name.len + 1); - dentry->d_name.name = dentry->d_iname; + dentry->d_shortname = target->d_shortname; + dentry->d_name.name = dentry->d_shortname.string; dentry->d_name.hash_len = target->d_name.hash_len; } - if (old_name && likely(atomic_dec_and_test(&old_name->u.count))) - kfree_rcu(old_name, u.head); + if (old_name && likely(atomic_dec_and_test(&old_name->count))) + kfree_rcu(old_name, head); } /* @@ -2955,7 +2966,12 @@ static int __d_unalias(struct dentry *dentry, struct dentry *alias) goto out_err; m2 = &alias->d_parent->d_inode->i_rwsem; out_unalias: + if (alias->d_op && alias->d_op->d_unalias_trylock && + !alias->d_op->d_unalias_trylock(alias)) + goto out_err; __d_move(alias, dentry, false); + if (alias->d_op && alias->d_op->d_unalias_unlock) + alias->d_op->d_unalias_unlock(alias); ret = 0; out_err: if (m2) @@ -3103,12 +3119,12 @@ void d_mark_tmpfile(struct file *file, struct inode *inode) { struct dentry *dentry = file->f_path.dentry; - BUG_ON(dentry->d_name.name != dentry->d_iname || + BUG_ON(dname_external(dentry) || !hlist_unhashed(&dentry->d_u.d_alias) || !d_unlinked(dentry)); spin_lock(&dentry->d_parent->d_lock); spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); - dentry->d_name.len = sprintf(dentry->d_iname, "#%llu", + dentry->d_name.len = sprintf(dentry->d_shortname.string, "#%llu", (unsigned long long)inode->i_ino); spin_unlock(&dentry->d_lock); spin_unlock(&dentry->d_parent->d_lock); @@ -3196,7 +3212,7 @@ static void __init dcache_init(void) */ dentry_cache = KMEM_CACHE_USERCOPY(dentry, SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_ACCOUNT, - d_iname); + d_shortname.string); /* Hash may have been set up in dcache_init_early */ if (!hashdist) diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index 16e198a26339..69e9ddcb113d 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -47,11 +47,17 @@ const struct file_operations debugfs_noop_file_operations = { #define F_DENTRY(filp) ((filp)->f_path.dentry) +const void *debugfs_get_aux(const struct file *file) +{ + return DEBUGFS_I(file_inode(file))->aux; +} +EXPORT_SYMBOL_GPL(debugfs_get_aux); + const struct file_operations *debugfs_real_fops(const struct file *filp) { struct debugfs_fsdata *fsd = F_DENTRY(filp)->d_fsdata; - if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT) { + if (!fsd) { /* * Urgh, we've been called w/o a protecting * debugfs_file_get(). @@ -84,9 +90,12 @@ static int __debugfs_file_get(struct dentry *dentry, enum dbgfs_get_mode mode) return -EINVAL; d_fsd = READ_ONCE(dentry->d_fsdata); - if (!((unsigned long)d_fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)) { + if (d_fsd) { fsd = d_fsd; } else { + struct inode *inode = dentry->d_inode; + unsigned int methods = 0; + if (WARN_ON(mode == DBGFS_GET_ALREADY)) return -EINVAL; @@ -95,23 +104,41 @@ static int __debugfs_file_get(struct dentry *dentry, enum dbgfs_get_mode mode) return -ENOMEM; if (mode == DBGFS_GET_SHORT) { + const struct debugfs_short_fops *ops; + ops = fsd->short_fops = DEBUGFS_I(inode)->short_fops; + if (ops->llseek) + methods |= HAS_LSEEK; + if (ops->read) + methods |= HAS_READ; + if (ops->write) + methods |= HAS_WRITE; fsd->real_fops = NULL; - fsd->short_fops = (void *)((unsigned long)d_fsd & - ~DEBUGFS_FSDATA_IS_REAL_FOPS_BIT); } else { - fsd->real_fops = (void *)((unsigned long)d_fsd & - ~DEBUGFS_FSDATA_IS_REAL_FOPS_BIT); + const struct file_operations *ops; + ops = fsd->real_fops = DEBUGFS_I(inode)->real_fops; + if (ops->llseek) + methods |= HAS_LSEEK; + if (ops->read) + methods |= HAS_READ; + if (ops->write) + methods |= HAS_WRITE; + if (ops->unlocked_ioctl) + methods |= HAS_IOCTL; + if (ops->poll) + methods |= HAS_POLL; fsd->short_fops = NULL; } + fsd->methods = methods; refcount_set(&fsd->active_users, 1); init_completion(&fsd->active_users_drained); INIT_LIST_HEAD(&fsd->cancellations); mutex_init(&fsd->cancellations_mtx); - if (cmpxchg(&dentry->d_fsdata, d_fsd, fsd) != d_fsd) { + d_fsd = cmpxchg(&dentry->d_fsdata, NULL, fsd); + if (d_fsd) { mutex_destroy(&fsd->cancellations_mtx); kfree(fsd); - fsd = READ_ONCE(dentry->d_fsdata); + fsd = d_fsd; } } @@ -208,8 +235,7 @@ void debugfs_enter_cancellation(struct file *file, return; fsd = READ_ONCE(dentry->d_fsdata); - if (WARN_ON(!fsd || - ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))) + if (WARN_ON(!fsd)) return; mutex_lock(&fsd->cancellations_mtx); @@ -240,8 +266,7 @@ void debugfs_leave_cancellation(struct file *file, return; fsd = READ_ONCE(dentry->d_fsdata); - if (WARN_ON(!fsd || - ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT))) + if (WARN_ON(!fsd)) return; mutex_lock(&fsd->cancellations_mtx); @@ -322,13 +347,16 @@ const struct file_operations debugfs_open_proxy_file_operations = { #define PROTO(args...) args #define ARGS(args...) args -#define FULL_PROXY_FUNC(name, ret_type, filp, proto, args) \ +#define FULL_PROXY_FUNC(name, ret_type, filp, proto, args, bit, ret) \ static ret_type full_proxy_ ## name(proto) \ { \ - struct dentry *dentry = F_DENTRY(filp); \ + struct dentry *dentry = F_DENTRY(filp); \ + struct debugfs_fsdata *fsd = dentry->d_fsdata; \ const struct file_operations *real_fops; \ ret_type r; \ \ + if (!(fsd->methods & bit)) \ + return ret; \ r = debugfs_file_get(dentry); \ if (unlikely(r)) \ return r; \ @@ -338,17 +366,18 @@ static ret_type full_proxy_ ## name(proto) \ return r; \ } -#define FULL_PROXY_FUNC_BOTH(name, ret_type, filp, proto, args) \ +#define FULL_PROXY_FUNC_BOTH(name, ret_type, filp, proto, args, bit, ret) \ static ret_type full_proxy_ ## name(proto) \ { \ struct dentry *dentry = F_DENTRY(filp); \ - struct debugfs_fsdata *fsd; \ + struct debugfs_fsdata *fsd = dentry->d_fsdata; \ ret_type r; \ \ + if (!(fsd->methods & bit)) \ + return ret; \ r = debugfs_file_get(dentry); \ if (unlikely(r)) \ return r; \ - fsd = dentry->d_fsdata; \ if (fsd->real_fops) \ r = fsd->real_fops->name(args); \ else \ @@ -359,29 +388,32 @@ static ret_type full_proxy_ ## name(proto) \ FULL_PROXY_FUNC_BOTH(llseek, loff_t, filp, PROTO(struct file *filp, loff_t offset, int whence), - ARGS(filp, offset, whence)); + ARGS(filp, offset, whence), HAS_LSEEK, -ESPIPE); FULL_PROXY_FUNC_BOTH(read, ssize_t, filp, PROTO(struct file *filp, char __user *buf, size_t size, loff_t *ppos), - ARGS(filp, buf, size, ppos)); + ARGS(filp, buf, size, ppos), HAS_READ, -EINVAL); FULL_PROXY_FUNC_BOTH(write, ssize_t, filp, PROTO(struct file *filp, const char __user *buf, size_t size, loff_t *ppos), - ARGS(filp, buf, size, ppos)); + ARGS(filp, buf, size, ppos), HAS_WRITE, -EINVAL); FULL_PROXY_FUNC(unlocked_ioctl, long, filp, PROTO(struct file *filp, unsigned int cmd, unsigned long arg), - ARGS(filp, cmd, arg)); + ARGS(filp, cmd, arg), HAS_IOCTL, -ENOTTY); static __poll_t full_proxy_poll(struct file *filp, struct poll_table_struct *wait) { struct dentry *dentry = F_DENTRY(filp); + struct debugfs_fsdata *fsd = dentry->d_fsdata; __poll_t r = 0; const struct file_operations *real_fops; + if (!(fsd->methods & HAS_POLL)) + return DEFAULT_POLLMASK; if (debugfs_file_get(dentry)) return EPOLLHUP; @@ -393,9 +425,7 @@ static __poll_t full_proxy_poll(struct file *filp, static int full_proxy_release(struct inode *inode, struct file *filp) { - const struct dentry *dentry = F_DENTRY(filp); const struct file_operations *real_fops = debugfs_real_fops(filp); - const struct file_operations *proxy_fops = filp->f_op; int r = 0; /* @@ -404,49 +434,21 @@ static int full_proxy_release(struct inode *inode, struct file *filp) * not to leak any resources. Releasers must not assume that * ->i_private is still being meaningful here. */ - if (real_fops && real_fops->release) + if (real_fops->release) r = real_fops->release(inode, filp); - replace_fops(filp, d_inode(dentry)->i_fop); - kfree(proxy_fops); fops_put(real_fops); return r; } -static void __full_proxy_fops_init(struct file_operations *proxy_fops, - struct debugfs_fsdata *fsd) -{ - proxy_fops->release = full_proxy_release; - - if ((fsd->real_fops && fsd->real_fops->llseek) || - (fsd->short_fops && fsd->short_fops->llseek)) - proxy_fops->llseek = full_proxy_llseek; - - if ((fsd->real_fops && fsd->real_fops->read) || - (fsd->short_fops && fsd->short_fops->read)) - proxy_fops->read = full_proxy_read; - - if ((fsd->real_fops && fsd->real_fops->write) || - (fsd->short_fops && fsd->short_fops->write)) - proxy_fops->write = full_proxy_write; - - if (fsd->real_fops && fsd->real_fops->poll) - proxy_fops->poll = full_proxy_poll; - - if (fsd->real_fops && fsd->real_fops->unlocked_ioctl) - proxy_fops->unlocked_ioctl = full_proxy_unlocked_ioctl; -} - -static int full_proxy_open(struct inode *inode, struct file *filp, - enum dbgfs_get_mode mode) +static int full_proxy_open_regular(struct inode *inode, struct file *filp) { struct dentry *dentry = F_DENTRY(filp); const struct file_operations *real_fops; - struct file_operations *proxy_fops = NULL; struct debugfs_fsdata *fsd; int r; - r = __debugfs_file_get(dentry, mode); + r = __debugfs_file_get(dentry, DBGFS_GET_REGULAR); if (r) return r == -EIO ? -ENOENT : r; @@ -456,7 +458,7 @@ static int full_proxy_open(struct inode *inode, struct file *filp, if (r) goto out; - if (real_fops && !fops_get(real_fops)) { + if (!fops_get(real_fops)) { #ifdef CONFIG_MODULES if (real_fops->owner && real_fops->owner->state == MODULE_STATE_GOING) { @@ -472,55 +474,52 @@ static int full_proxy_open(struct inode *inode, struct file *filp, goto out; } - proxy_fops = kzalloc(sizeof(*proxy_fops), GFP_KERNEL); - if (!proxy_fops) { - r = -ENOMEM; - goto free_proxy; - } - __full_proxy_fops_init(proxy_fops, fsd); - replace_fops(filp, proxy_fops); - - if (!real_fops || real_fops->open) { - if (real_fops) - r = real_fops->open(inode, filp); - else - r = simple_open(inode, filp); + if (real_fops->open) { + r = real_fops->open(inode, filp); if (r) { - replace_fops(filp, d_inode(dentry)->i_fop); - goto free_proxy; - } else if (filp->f_op != proxy_fops) { + fops_put(real_fops); + } else if (filp->f_op != &debugfs_full_proxy_file_operations) { /* No protection against file removal anymore. */ WARN(1, "debugfs file owner replaced proxy fops: %pd", dentry); - goto free_proxy; + fops_put(real_fops); } } - - goto out; -free_proxy: - kfree(proxy_fops); - fops_put(real_fops); out: debugfs_file_put(dentry); return r; } -static int full_proxy_open_regular(struct inode *inode, struct file *filp) -{ - return full_proxy_open(inode, filp, DBGFS_GET_REGULAR); -} - const struct file_operations debugfs_full_proxy_file_operations = { .open = full_proxy_open_regular, + .release = full_proxy_release, + .llseek = full_proxy_llseek, + .read = full_proxy_read, + .write = full_proxy_write, + .poll = full_proxy_poll, + .unlocked_ioctl = full_proxy_unlocked_ioctl }; static int full_proxy_open_short(struct inode *inode, struct file *filp) { - return full_proxy_open(inode, filp, DBGFS_GET_SHORT); + struct dentry *dentry = F_DENTRY(filp); + int r; + + r = __debugfs_file_get(dentry, DBGFS_GET_SHORT); + if (r) + return r == -EIO ? -ENOENT : r; + r = debugfs_locked_down(inode, filp, NULL); + if (!r) + r = simple_open(inode, filp); + debugfs_file_put(dentry); + return r; } const struct file_operations debugfs_full_short_proxy_file_operations = { .open = full_proxy_open_short, + .llseek = full_proxy_llseek, + .read = full_proxy_read, + .write = full_proxy_write, }; ssize_t debugfs_attr_read(struct file *file, char __user *buf, diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index e752009de929..75715d8877ee 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -208,16 +208,34 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root) return 0; } +static struct kmem_cache *debugfs_inode_cachep __ro_after_init; + +static void init_once(void *foo) +{ + struct debugfs_inode_info *info = foo; + inode_init_once(&info->vfs_inode); +} + +static struct inode *debugfs_alloc_inode(struct super_block *sb) +{ + struct debugfs_inode_info *info; + info = alloc_inode_sb(sb, debugfs_inode_cachep, GFP_KERNEL); + if (!info) + return NULL; + return &info->vfs_inode; +} + static void debugfs_free_inode(struct inode *inode) { if (S_ISLNK(inode->i_mode)) kfree(inode->i_link); - free_inode_nonrcu(inode); + kmem_cache_free(debugfs_inode_cachep, DEBUGFS_I(inode)); } static const struct super_operations debugfs_super_operations = { .statfs = simple_statfs, .show_options = debugfs_show_options, + .alloc_inode = debugfs_alloc_inode, .free_inode = debugfs_free_inode, }; @@ -225,23 +243,18 @@ static void debugfs_release_dentry(struct dentry *dentry) { struct debugfs_fsdata *fsd = dentry->d_fsdata; - if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT) - return; - - /* check it wasn't a dir (no fsdata) or automount (no real_fops) */ - if (fsd && (fsd->real_fops || fsd->short_fops)) { + if (fsd) { WARN_ON(!list_empty(&fsd->cancellations)); mutex_destroy(&fsd->cancellations_mtx); } - kfree(fsd); } static struct vfsmount *debugfs_automount(struct path *path) { - struct debugfs_fsdata *fsd = path->dentry->d_fsdata; + struct inode *inode = path->dentry->d_inode; - return fsd->automount(path->dentry, d_inode(path->dentry)->i_private); + return DEBUGFS_I(inode)->automount(path->dentry, inode->i_private); } static const struct dentry_operations debugfs_dops = { @@ -411,6 +424,7 @@ static struct dentry *end_creating(struct dentry *dentry) static struct dentry *__debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, + const void *aux, const struct file_operations *proxy_fops, const void *real_fops) { @@ -441,9 +455,11 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode, inode->i_private = data; inode->i_op = &debugfs_file_inode_operations; + if (!real_fops) + proxy_fops = &debugfs_noop_file_operations; inode->i_fop = proxy_fops; - dentry->d_fsdata = (void *)((unsigned long)real_fops | - DEBUGFS_FSDATA_IS_REAL_FOPS_BIT); + DEBUGFS_I(inode)->raw = real_fops; + DEBUGFS_I(inode)->aux = aux; d_instantiate(dentry, inode); fsnotify_create(d_inode(dentry->d_parent), dentry); @@ -452,30 +468,22 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode, struct dentry *debugfs_create_file_full(const char *name, umode_t mode, struct dentry *parent, void *data, + const void *aux, const struct file_operations *fops) { - if (WARN_ON((unsigned long)fops & - DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)) - return ERR_PTR(-EINVAL); - - return __debugfs_create_file(name, mode, parent, data, - fops ? &debugfs_full_proxy_file_operations : - &debugfs_noop_file_operations, + return __debugfs_create_file(name, mode, parent, data, aux, + &debugfs_full_proxy_file_operations, fops); } EXPORT_SYMBOL_GPL(debugfs_create_file_full); struct dentry *debugfs_create_file_short(const char *name, umode_t mode, - struct dentry *parent, void *data, - const struct debugfs_short_fops *fops) + struct dentry *parent, void *data, + const void *aux, + const struct debugfs_short_fops *fops) { - if (WARN_ON((unsigned long)fops & - DEBUGFS_FSDATA_IS_REAL_FOPS_BIT)) - return ERR_PTR(-EINVAL); - - return __debugfs_create_file(name, mode, parent, data, - fops ? &debugfs_full_short_proxy_file_operations : - &debugfs_noop_file_operations, + return __debugfs_create_file(name, mode, parent, data, aux, + &debugfs_full_short_proxy_file_operations, fops); } EXPORT_SYMBOL_GPL(debugfs_create_file_short); @@ -512,9 +520,8 @@ struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode, const struct file_operations *fops) { - return __debugfs_create_file(name, mode, parent, data, - fops ? &debugfs_open_proxy_file_operations : - &debugfs_noop_file_operations, + return __debugfs_create_file(name, mode, parent, data, NULL, + &debugfs_open_proxy_file_operations, fops); } EXPORT_SYMBOL_GPL(debugfs_create_file_unsafe); @@ -624,23 +631,13 @@ struct dentry *debugfs_create_automount(const char *name, void *data) { struct dentry *dentry = start_creating(name, parent); - struct debugfs_fsdata *fsd; struct inode *inode; if (IS_ERR(dentry)) return dentry; - fsd = kzalloc(sizeof(*fsd), GFP_KERNEL); - if (!fsd) { - failed_creating(dentry); - return ERR_PTR(-ENOMEM); - } - - fsd->automount = f; - if (!(debugfs_allow & DEBUGFS_ALLOW_API)) { failed_creating(dentry); - kfree(fsd); return ERR_PTR(-EPERM); } @@ -648,14 +645,13 @@ struct dentry *debugfs_create_automount(const char *name, if (unlikely(!inode)) { pr_err("out of free dentries, can not create automount '%s'\n", name); - kfree(fsd); return failed_creating(dentry); } make_empty_dir_inode(inode); inode->i_flags |= S_AUTOMOUNT; inode->i_private = data; - dentry->d_fsdata = fsd; + DEBUGFS_I(inode)->automount = f; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); d_instantiate(dentry, inode); @@ -730,7 +726,7 @@ static void __debugfs_file_removed(struct dentry *dentry) */ smp_mb(); fsd = READ_ONCE(dentry->d_fsdata); - if ((unsigned long)fsd & DEBUGFS_FSDATA_IS_REAL_FOPS_BIT) + if (!fsd) return; /* if this was the last reference, we're done */ @@ -834,76 +830,70 @@ void debugfs_lookup_and_remove(const char *name, struct dentry *parent) EXPORT_SYMBOL_GPL(debugfs_lookup_and_remove); /** - * debugfs_rename - rename a file/directory in the debugfs filesystem - * @old_dir: a pointer to the parent dentry for the renamed object. This - * should be a directory dentry. - * @old_dentry: dentry of an object to be renamed. - * @new_dir: a pointer to the parent dentry where the object should be - * moved. This should be a directory dentry. - * @new_name: a pointer to a string containing the target name. + * debugfs_change_name - rename a file/directory in the debugfs filesystem + * @dentry: dentry of an object to be renamed. + * @fmt: format for new name * * This function renames a file/directory in debugfs. The target must not * exist for rename to succeed. * - * This function will return a pointer to old_dentry (which is updated to - * reflect renaming) if it succeeds. If an error occurs, ERR_PTR(-ERROR) - * will be returned. + * This function will return 0 on success and -E... on failure. * * If debugfs is not enabled in the kernel, the value -%ENODEV will be * returned. */ -struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, - struct dentry *new_dir, const char *new_name) +int __printf(2, 3) debugfs_change_name(struct dentry *dentry, const char *fmt, ...) { - int error; - struct dentry *dentry = NULL, *trap; + int error = 0; + const char *new_name; struct name_snapshot old_name; + struct dentry *parent, *target; + struct inode *dir; + va_list ap; - if (IS_ERR(old_dir)) - return old_dir; - if (IS_ERR(new_dir)) - return new_dir; - if (IS_ERR_OR_NULL(old_dentry)) - return old_dentry; - - trap = lock_rename(new_dir, old_dir); - /* Source or destination directories don't exist? */ - if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir)) - goto exit; - /* Source does not exist, cyclic rename, or mountpoint? */ - if (d_really_is_negative(old_dentry) || old_dentry == trap || - d_mountpoint(old_dentry)) - goto exit; - dentry = lookup_one_len(new_name, new_dir, strlen(new_name)); - /* Lookup failed, cyclic rename or target exists? */ - if (IS_ERR(dentry) || dentry == trap || d_really_is_positive(dentry)) - goto exit; - - take_dentry_name_snapshot(&old_name, old_dentry); - - error = simple_rename(&nop_mnt_idmap, d_inode(old_dir), old_dentry, - d_inode(new_dir), dentry, 0); - if (error) { - release_dentry_name_snapshot(&old_name); - goto exit; + if (IS_ERR_OR_NULL(dentry)) + return 0; + + va_start(ap, fmt); + new_name = kvasprintf_const(GFP_KERNEL, fmt, ap); + va_end(ap); + if (!new_name) + return -ENOMEM; + + parent = dget_parent(dentry); + dir = d_inode(parent); + inode_lock(dir); + + take_dentry_name_snapshot(&old_name, dentry); + + if (WARN_ON_ONCE(dentry->d_parent != parent)) { + error = -EINVAL; + goto out; } - d_move(old_dentry, dentry); - fsnotify_move(d_inode(old_dir), d_inode(new_dir), &old_name.name, - d_is_dir(old_dentry), - NULL, old_dentry); + if (strcmp(old_name.name.name, new_name) == 0) + goto out; + target = lookup_one_len(new_name, parent, strlen(new_name)); + if (IS_ERR(target)) { + error = PTR_ERR(target); + goto out; + } + if (d_really_is_positive(target)) { + dput(target); + error = -EINVAL; + goto out; + } + simple_rename_timestamp(dir, dentry, dir, target); + d_move(dentry, target); + dput(target); + fsnotify_move(dir, dir, &old_name.name, d_is_dir(dentry), NULL, dentry); +out: release_dentry_name_snapshot(&old_name); - unlock_rename(new_dir, old_dir); - dput(dentry); - return old_dentry; -exit: - if (dentry && !IS_ERR(dentry)) - dput(dentry); - unlock_rename(new_dir, old_dir); - if (IS_ERR(dentry)) - return dentry; - return ERR_PTR(-EINVAL); + inode_unlock(dir); + dput(parent); + kfree_const(new_name); + return error; } -EXPORT_SYMBOL_GPL(debugfs_rename); +EXPORT_SYMBOL_GPL(debugfs_change_name); /** * debugfs_initialized - Tells whether debugfs has been registered @@ -939,12 +929,22 @@ static int __init debugfs_init(void) if (retval) return retval; - retval = register_filesystem(&debug_fs_type); - if (retval) + debugfs_inode_cachep = kmem_cache_create("debugfs_inode_cache", + sizeof(struct debugfs_inode_info), 0, + SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT, + init_once); + if (debugfs_inode_cachep == NULL) { sysfs_remove_mount_point(kernel_kobj, "debug"); - else - debugfs_registered = true; + return -ENOMEM; + } - return retval; + retval = register_filesystem(&debug_fs_type); + if (retval) { // Really not going to happen + sysfs_remove_mount_point(kernel_kobj, "debug"); + kmem_cache_destroy(debugfs_inode_cachep); + return retval; + } + debugfs_registered = true; + return 0; } core_initcall(debugfs_init); diff --git a/fs/debugfs/internal.h b/fs/debugfs/internal.h index bbae4a228ef4..93483fe84425 100644 --- a/fs/debugfs/internal.h +++ b/fs/debugfs/internal.h @@ -11,6 +11,22 @@ struct file_operations; +struct debugfs_inode_info { + struct inode vfs_inode; + union { + const void *raw; + const struct file_operations *real_fops; + const struct debugfs_short_fops *short_fops; + debugfs_automount_t automount; + }; + const void *aux; +}; + +static inline struct debugfs_inode_info *DEBUGFS_I(struct inode *inode) +{ + return container_of(inode, struct debugfs_inode_info, vfs_inode); +} + /* declared over in file.c */ extern const struct file_operations debugfs_noop_file_operations; extern const struct file_operations debugfs_open_proxy_file_operations; @@ -20,29 +36,25 @@ extern const struct file_operations debugfs_full_short_proxy_file_operations; struct debugfs_fsdata { const struct file_operations *real_fops; const struct debugfs_short_fops *short_fops; - union { - /* automount_fn is used when real_fops is NULL */ - debugfs_automount_t automount; - struct { - refcount_t active_users; - struct completion active_users_drained; - - /* protect cancellations */ - struct mutex cancellations_mtx; - struct list_head cancellations; - }; + struct { + refcount_t active_users; + struct completion active_users_drained; + + /* protect cancellations */ + struct mutex cancellations_mtx; + struct list_head cancellations; + unsigned int methods; }; }; -/* - * A dentry's ->d_fsdata either points to the real fops or to a - * dynamically allocated debugfs_fsdata instance. - * In order to distinguish between these two cases, a real fops - * pointer gets its lowest bit set. - */ -#define DEBUGFS_FSDATA_IS_REAL_FOPS_BIT BIT(0) +enum { + HAS_READ = 1, + HAS_WRITE = 2, + HAS_LSEEK = 4, + HAS_POLL = 8, + HAS_IOCTL = 16 +}; -/* Access BITS */ #define DEBUGFS_ALLOW_API BIT(0) #define DEBUGFS_ALLOW_MOUNT BIT(1) diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index b20e565b9c5e..1096ff8562fa 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -45,7 +45,7 @@ static int pty_limit_min; static int pty_limit_max = INT_MAX; static atomic_t pty_count = ATOMIC_INIT(0); -static struct ctl_table pty_table[] = { +static const struct ctl_table pty_table[] = { { .procname = "max", .maxlen = sizeof(int), diff --git a/fs/dlm/config.c b/fs/dlm/config.c index b2f21aa00719..cf9ba6fd7a28 100644 --- a/fs/dlm/config.c +++ b/fs/dlm/config.c @@ -935,7 +935,7 @@ int dlm_comm_seq(int nodeid, uint32_t *seq, bool locked) mutex_unlock(&clusters_root.subsys.su_mutex); } if (!cm) - return -EEXIST; + return -ENOENT; *seq = cm->seq; put_comm(cm); diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index fc1d710166e9..c8ff88f1cdcf 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -824,9 +824,12 @@ static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, r->res_first_lkid = 0; } - /* A dir record will not be on the scan list. */ - if (r->res_dir_nodeid != our_nodeid) - del_scan(ls, r); + /* we always deactivate scan timer for the rsb, when + * we move it out of the inactive state as rsb state + * can be changed and scan timers are only for inactive + * rsbs. + */ + del_scan(ls, r); list_move(&r->res_slow_list, &ls->ls_slow_active); rsb_clear_flag(r, RSB_INACTIVE); kref_init(&r->res_ref); /* ref is now used in active state */ @@ -989,10 +992,10 @@ static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, r->res_nodeid = 0; } + del_scan(ls, r); list_move(&r->res_slow_list, &ls->ls_slow_active); rsb_clear_flag(r, RSB_INACTIVE); kref_init(&r->res_ref); - del_scan(ls, r); write_unlock_bh(&ls->ls_rsbtbl_lock); goto out; @@ -1337,9 +1340,13 @@ static int _dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *na __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, true, flags, r_nodeid, result); - /* A dir record rsb should never be on scan list. */ - /* Try to fix this with del_scan? */ - WARN_ON(!list_empty(&r->res_scan_list)); + /* A dir record rsb should never be on scan list. + * Except when we are the dir and master node. + * This function should only be called by the dir + * node. + */ + WARN_ON(!list_empty(&r->res_scan_list) && + r->res_master_nodeid != our_nodeid); write_unlock_bh(&ls->ls_rsbtbl_lock); @@ -1430,16 +1437,23 @@ static void deactivate_rsb(struct kref *kref) list_move(&r->res_slow_list, &ls->ls_slow_inactive); /* - * When the rsb becomes unused: - * - If it's not a dir record for a remote master rsb, - * then it is put on the scan list to be freed. - * - If it's a dir record for a remote master rsb, - * then it is kept in the inactive state until - * receive_remove() from the master node. + * When the rsb becomes unused, there are two possibilities: + * 1. Leave the inactive rsb in place (don't remove it). + * 2. Add it to the scan list to be removed. + * + * 1 is done when the rsb is acting as the dir record + * for a remotely mastered rsb. The rsb must be left + * in place as an inactive rsb to act as the dir record. + * + * 2 is done when a) the rsb is not the master and not the + * dir record, b) when the rsb is both the master and the + * dir record, c) when the rsb is master but not dir record. + * + * (If no directory is used, the rsb can always be removed.) */ - if (!dlm_no_directory(ls) && - (r->res_master_nodeid != our_nodeid) && - (dlm_dir_nodeid(r) != our_nodeid)) + if (dlm_no_directory(ls) || + (r->res_master_nodeid == our_nodeid || + dlm_dir_nodeid(r) != our_nodeid)) add_scan(ls, r); if (r->res_lvbptr) { diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index df40c3fd1070..d28141829c05 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -462,7 +462,8 @@ static bool dlm_lowcomms_con_has_addr(const struct connection *con, int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr) { struct connection *con; - bool ret, idx; + bool ret; + int idx; idx = srcu_read_lock(&connections_srcu); con = nodeid2con(nodeid, GFP_NOFS); diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c index acaa0825e9bb..1dfd5b81d831 100644 --- a/fs/ecryptfs/dentry.c +++ b/fs/ecryptfs/dentry.c @@ -17,7 +17,9 @@ /** * ecryptfs_d_revalidate - revalidate an ecryptfs dentry - * @dentry: The ecryptfs dentry + * @dir: inode of expected parent + * @name: expected name + * @dentry: dentry to revalidate * @flags: lookup flags * * Called when the VFS needs to revalidate a dentry. This @@ -28,7 +30,8 @@ * Returns 1 if valid, 0 otherwise. * */ -static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags) +static int ecryptfs_d_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); int rc = 1; @@ -36,8 +39,15 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags) if (flags & LOOKUP_RCU) return -ECHILD; - if (lower_dentry->d_flags & DCACHE_OP_REVALIDATE) - rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags); + if (lower_dentry->d_flags & DCACHE_OP_REVALIDATE) { + struct inode *lower_dir = ecryptfs_inode_to_lower(dir); + struct name_snapshot n; + + take_dentry_name_snapshot(&n, lower_dentry); + rc = lower_dentry->d_op->d_revalidate(lower_dir, &n.name, + lower_dentry, flags); + release_dentry_name_snapshot(&n); + } if (d_really_is_positive(dentry)) { struct inode *inode = d_inode(dentry); diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c index 23c51d62f902..cb1b6d0c3454 100644 --- a/fs/efivarfs/file.c +++ b/fs/efivarfs/file.c @@ -36,28 +36,41 @@ static ssize_t efivarfs_file_write(struct file *file, if (IS_ERR(data)) return PTR_ERR(data); + inode_lock(inode); + if (var->removed) { + /* + * file got removed; don't allow a set. Caused by an + * unsuccessful create or successful delete write + * racing with us. + */ + bytes = -EIO; + goto out; + } + bytes = efivar_entry_set_get_size(var, attributes, &datasize, data, &set); - if (!set && bytes) { + if (!set) { if (bytes == -ENOENT) bytes = -EIO; goto out; } if (bytes == -ENOENT) { - drop_nlink(inode); - d_delete(file->f_path.dentry); - dput(file->f_path.dentry); + /* + * zero size signals to release that the write deleted + * the variable + */ + i_size_write(inode, 0); } else { - inode_lock(inode); i_size_write(inode, datasize + sizeof(attributes)); inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); - inode_unlock(inode); } bytes = count; out: + inode_unlock(inode); + kfree(data); return bytes; @@ -106,8 +119,36 @@ out_free: return size; } +static int efivarfs_file_release(struct inode *inode, struct file *file) +{ + struct efivar_entry *var = inode->i_private; + + inode_lock(inode); + var->removed = (--var->open_count == 0 && i_size_read(inode) == 0); + inode_unlock(inode); + + if (var->removed) + simple_recursive_removal(file->f_path.dentry, NULL); + + return 0; +} + +static int efivarfs_file_open(struct inode *inode, struct file *file) +{ + struct efivar_entry *entry = inode->i_private; + + file->private_data = entry; + + inode_lock(inode); + entry->open_count++; + inode_unlock(inode); + + return 0; +} + const struct file_operations efivarfs_file_operations = { - .open = simple_open, - .read = efivarfs_file_read, - .write = efivarfs_file_write, + .open = efivarfs_file_open, + .read = efivarfs_file_read, + .write = efivarfs_file_write, + .release = efivarfs_file_release, }; diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c index ec23da8405ff..98a7299a9ee9 100644 --- a/fs/efivarfs/inode.c +++ b/fs/efivarfs/inode.c @@ -77,39 +77,34 @@ static bool efivarfs_valid_name(const char *str, int len) static int efivarfs_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { - struct efivarfs_fs_info *info = dir->i_sb->s_fs_info; struct inode *inode = NULL; struct efivar_entry *var; int namelen, i = 0, err = 0; bool is_removable = false; + efi_guid_t vendor; if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len)) return -EINVAL; - var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL); - if (!var) - return -ENOMEM; - /* length of the variable name itself: remove GUID and separator */ namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1; - err = guid_parse(dentry->d_name.name + namelen + 1, &var->var.VendorGuid); + err = guid_parse(dentry->d_name.name + namelen + 1, &vendor); if (err) - goto out; - if (guid_equal(&var->var.VendorGuid, &LINUX_EFI_RANDOM_SEED_TABLE_GUID)) { - err = -EPERM; - goto out; - } + return err; + if (guid_equal(&vendor, &LINUX_EFI_RANDOM_SEED_TABLE_GUID)) + return -EPERM; - if (efivar_variable_is_removable(var->var.VendorGuid, + if (efivar_variable_is_removable(vendor, dentry->d_name.name, namelen)) is_removable = true; inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable); - if (!inode) { - err = -ENOMEM; - goto out; - } + if (!inode) + return -ENOMEM; + var = efivar_entry(inode); + + var->var.VendorGuid = vendor; for (i = 0; i < namelen; i++) var->var.VariableName[i] = dentry->d_name.name[i]; @@ -117,21 +112,11 @@ static int efivarfs_create(struct mnt_idmap *idmap, struct inode *dir, var->var.VariableName[i] = '\0'; inode->i_private = var; - kmemleak_ignore(var); - - err = efivar_entry_add(var, &info->efivarfs_list); - if (err) - goto out; d_instantiate(dentry, inode); dget(dentry); -out: - if (err) { - kfree(var); - if (inode) - iput(inode); - } - return err; + + return 0; } static int efivarfs_unlink(struct inode *dir, struct dentry *dentry) @@ -187,7 +172,24 @@ efivarfs_fileattr_set(struct mnt_idmap *idmap, return 0; } +/* copy of simple_setattr except that it doesn't do i_size updates */ +static int efivarfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, + struct iattr *iattr) +{ + struct inode *inode = d_inode(dentry); + int error; + + error = setattr_prepare(idmap, dentry, iattr); + if (error) + return error; + + setattr_copy(idmap, inode, iattr); + mark_inode_dirty(inode); + return 0; +} + static const struct inode_operations efivarfs_file_inode_operations = { .fileattr_get = efivarfs_fileattr_get, .fileattr_set = efivarfs_fileattr_set, + .setattr = efivarfs_setattr, }; diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h index 74f0602a9e01..ac6a1dd0a6a5 100644 --- a/fs/efivarfs/internal.h +++ b/fs/efivarfs/internal.h @@ -6,7 +6,6 @@ #ifndef EFIVAR_FS_INTERNAL_H #define EFIVAR_FS_INTERNAL_H -#include <linux/list.h> #include <linux/efi.h> struct efivarfs_mount_opts { @@ -16,30 +15,31 @@ struct efivarfs_mount_opts { struct efivarfs_fs_info { struct efivarfs_mount_opts mount_opts; - struct list_head efivarfs_list; struct super_block *sb; struct notifier_block nb; + struct notifier_block pm_nb; }; struct efi_variable { efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)]; efi_guid_t VendorGuid; - __u32 Attributes; }; struct efivar_entry { struct efi_variable var; - struct list_head list; - struct kobject kobj; + struct inode vfs_inode; + unsigned long open_count; + bool removed; }; -int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *, - struct list_head *), - void *data, struct list_head *head); +static inline struct efivar_entry *efivar_entry(struct inode *inode) +{ + return container_of(inode, struct efivar_entry, vfs_inode); +} + +int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *), + void *data, bool duplicate_check); -int efivar_entry_add(struct efivar_entry *entry, struct list_head *head); -void __efivar_entry_add(struct efivar_entry *entry, struct list_head *head); -void efivar_entry_remove(struct efivar_entry *entry); int efivar_entry_delete(struct efivar_entry *entry); int efivar_entry_size(struct efivar_entry *entry, unsigned long *size); @@ -50,13 +50,14 @@ int efivar_entry_get(struct efivar_entry *entry, u32 *attributes, int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, unsigned long *size, void *data, bool *set); -int efivar_entry_iter(int (*func)(struct efivar_entry *, void *), - struct list_head *head, void *data); bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, unsigned long data_size); bool efivar_variable_is_removable(efi_guid_t vendor, const char *name, size_t len); +char *efivar_get_utf8name(const efi_char16_t *name16, efi_guid_t *vendor); +bool efivarfs_variable_is_present(efi_char16_t *variable_name, + efi_guid_t *vendor, void *data); extern const struct file_operations efivarfs_file_operations; extern const struct inode_operations efivarfs_dir_inode_operations; diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c index beba15673be8..09fcf731e65d 100644 --- a/fs/efivarfs/super.c +++ b/fs/efivarfs/super.c @@ -13,6 +13,7 @@ #include <linux/pagemap.h> #include <linux/ucs2_string.h> #include <linux/slab.h> +#include <linux/suspend.h> #include <linux/magic.h> #include <linux/statfs.h> #include <linux/notifier.h> @@ -39,9 +40,24 @@ static int efivarfs_ops_notifier(struct notifier_block *nb, unsigned long event, return NOTIFY_OK; } -static void efivarfs_evict_inode(struct inode *inode) +static struct inode *efivarfs_alloc_inode(struct super_block *sb) { - clear_inode(inode); + struct efivar_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL); + + if (!entry) + return NULL; + + inode_init_once(&entry->vfs_inode); + entry->removed = false; + + return &entry->vfs_inode; +} + +static void efivarfs_free_inode(struct inode *inode) +{ + struct efivar_entry *entry = efivar_entry(inode); + + kfree(entry); } static int efivarfs_show_options(struct seq_file *m, struct dentry *root) @@ -106,7 +122,8 @@ static int efivarfs_statfs(struct dentry *dentry, struct kstatfs *buf) static const struct super_operations efivarfs_ops = { .statfs = efivarfs_statfs, .drop_inode = generic_delete_inode, - .evict_inode = efivarfs_evict_inode, + .alloc_inode = efivarfs_alloc_inode, + .free_inode = efivarfs_free_inode, .show_options = efivarfs_show_options, }; @@ -181,56 +198,63 @@ static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name) return ERR_PTR(-ENOMEM); } -static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, - unsigned long name_size, void *data, - struct list_head *list) +bool efivarfs_variable_is_present(efi_char16_t *variable_name, + efi_guid_t *vendor, void *data) +{ + char *name = efivar_get_utf8name(variable_name, vendor); + struct super_block *sb = data; + struct dentry *dentry; + struct qstr qstr; + + if (!name) + /* + * If the allocation failed there'll already be an + * error in the log (and likely a huge and growing + * number of them since they system will be under + * extreme memory pressure), so simply assume + * collision for safety but don't add to the log + * flood. + */ + return true; + + qstr.name = name; + qstr.len = strlen(name); + dentry = d_hash_and_lookup(sb->s_root, &qstr); + kfree(name); + if (!IS_ERR_OR_NULL(dentry)) + dput(dentry); + + return dentry != NULL; +} + +static int efivarfs_create_dentry(struct super_block *sb, efi_char16_t *name16, + unsigned long name_size, efi_guid_t vendor, + char *name) { - struct super_block *sb = (struct super_block *)data; struct efivar_entry *entry; - struct inode *inode = NULL; + struct inode *inode; struct dentry *dentry, *root = sb->s_root; unsigned long size = 0; - char *name; int len; int err = -ENOMEM; bool is_removable = false; - if (guid_equal(&vendor, &LINUX_EFI_RANDOM_SEED_TABLE_GUID)) - return 0; - - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return err; - - memcpy(entry->var.VariableName, name16, name_size); - memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); - - len = ucs2_utf8size(entry->var.VariableName); + /* length of the variable name itself: remove GUID and separator */ + len = strlen(name) - EFI_VARIABLE_GUID_LEN - 1; - /* name, plus '-', plus GUID, plus NUL*/ - name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL); - if (!name) - goto fail; - - ucs2_as_utf8(name, entry->var.VariableName, len); - - if (efivar_variable_is_removable(entry->var.VendorGuid, name, len)) + if (efivar_variable_is_removable(vendor, name, len)) is_removable = true; - name[len] = '-'; - - efi_guid_to_str(&entry->var.VendorGuid, name + len + 1); - - name[len + EFI_VARIABLE_GUID_LEN+1] = '\0'; - - /* replace invalid slashes like kobject_set_name_vargs does for /sys/firmware/efi/vars. */ - strreplace(name, '/', '!'); - inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0, is_removable); if (!inode) goto fail_name; + entry = efivar_entry(inode); + + memcpy(entry->var.VariableName, name16, name_size); + memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); + dentry = efivarfs_alloc_dentry(root, name); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); @@ -238,14 +262,13 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, } __efivar_entry_get(entry, NULL, &size, NULL); - __efivar_entry_add(entry, list); /* copied by the above to local storage in the dentry. */ kfree(name); inode_lock(inode); inode->i_private = entry; - i_size_write(inode, size + sizeof(entry->var.Attributes)); + i_size_write(inode, size + sizeof(__u32)); /* attributes + data */ inode_unlock(inode); d_add(dentry, inode); @@ -255,16 +278,24 @@ fail_inode: iput(inode); fail_name: kfree(name); -fail: - kfree(entry); + return err; } -static int efivarfs_destroy(struct efivar_entry *entry, void *data) +static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, + unsigned long name_size, void *data) { - efivar_entry_remove(entry); - kfree(entry); - return 0; + struct super_block *sb = (struct super_block *)data; + char *name; + + if (guid_equal(&vendor, &LINUX_EFI_RANDOM_SEED_TABLE_GUID)) + return 0; + + name = efivar_get_utf8name(name16, &vendor); + if (!name) + return -ENOMEM; + + return efivarfs_create_dentry(sb, name16, name_size, vendor, name); } enum { @@ -336,7 +367,7 @@ static int efivarfs_fill_super(struct super_block *sb, struct fs_context *fc) if (err) return err; - return efivar_init(efivarfs_callback, sb, &sfi->efivarfs_list); + return efivar_init(efivarfs_callback, sb, true); } static int efivarfs_get_tree(struct fs_context *fc) @@ -360,6 +391,148 @@ static const struct fs_context_operations efivarfs_context_ops = { .reconfigure = efivarfs_reconfigure, }; +struct efivarfs_ctx { + struct dir_context ctx; + struct super_block *sb; + struct dentry *dentry; +}; + +static bool efivarfs_actor(struct dir_context *ctx, const char *name, int len, + loff_t offset, u64 ino, unsigned mode) +{ + unsigned long size; + struct efivarfs_ctx *ectx = container_of(ctx, struct efivarfs_ctx, ctx); + struct qstr qstr = { .name = name, .len = len }; + struct dentry *dentry = d_hash_and_lookup(ectx->sb->s_root, &qstr); + struct inode *inode; + struct efivar_entry *entry; + int err; + + if (IS_ERR_OR_NULL(dentry)) + return true; + + inode = d_inode(dentry); + entry = efivar_entry(inode); + + err = efivar_entry_size(entry, &size); + size += sizeof(__u32); /* attributes */ + if (err) + size = 0; + + inode_lock(inode); + i_size_write(inode, size); + inode_unlock(inode); + + if (!size) { + ectx->dentry = dentry; + return false; + } + + dput(dentry); + + return true; +} + +static int efivarfs_check_missing(efi_char16_t *name16, efi_guid_t vendor, + unsigned long name_size, void *data) +{ + char *name; + struct super_block *sb = data; + struct dentry *dentry; + struct qstr qstr; + int err; + + if (guid_equal(&vendor, &LINUX_EFI_RANDOM_SEED_TABLE_GUID)) + return 0; + + name = efivar_get_utf8name(name16, &vendor); + if (!name) + return -ENOMEM; + + qstr.name = name; + qstr.len = strlen(name); + dentry = d_hash_and_lookup(sb->s_root, &qstr); + if (IS_ERR(dentry)) { + err = PTR_ERR(dentry); + goto out; + } + + if (!dentry) { + /* found missing entry */ + pr_info("efivarfs: creating variable %s\n", name); + return efivarfs_create_dentry(sb, name16, name_size, vendor, name); + } + + dput(dentry); + err = 0; + + out: + kfree(name); + + return err; +} + +static int efivarfs_pm_notify(struct notifier_block *nb, unsigned long action, + void *ptr) +{ + struct efivarfs_fs_info *sfi = container_of(nb, struct efivarfs_fs_info, + pm_nb); + struct path path = { .mnt = NULL, .dentry = sfi->sb->s_root, }; + struct efivarfs_ctx ectx = { + .ctx = { + .actor = efivarfs_actor, + }, + .sb = sfi->sb, + }; + struct file *file; + static bool rescan_done = true; + + if (action == PM_HIBERNATION_PREPARE) { + rescan_done = false; + return NOTIFY_OK; + } else if (action != PM_POST_HIBERNATION) { + return NOTIFY_DONE; + } + + if (rescan_done) + return NOTIFY_DONE; + + pr_info("efivarfs: resyncing variable state\n"); + + /* O_NOATIME is required to prevent oops on NULL mnt */ + file = kernel_file_open(&path, O_RDONLY | O_DIRECTORY | O_NOATIME, + current_cred()); + if (IS_ERR(file)) + return NOTIFY_DONE; + + rescan_done = true; + + /* + * First loop over the directory and verify each entry exists, + * removing it if it doesn't + */ + file->f_pos = 2; /* skip . and .. */ + do { + ectx.dentry = NULL; + iterate_dir(file, &ectx.ctx); + if (ectx.dentry) { + pr_info("efivarfs: removing variable %pd\n", + ectx.dentry); + simple_recursive_removal(ectx.dentry, NULL); + dput(ectx.dentry); + } + } while (ectx.dentry); + fput(file); + + /* + * then loop over variables, creating them if there's no matching + * dentry + */ + efivar_init(efivarfs_check_missing, sfi->sb, false); + + return NOTIFY_OK; +} + static int efivarfs_init_fs_context(struct fs_context *fc) { struct efivarfs_fs_info *sfi; @@ -371,13 +544,16 @@ static int efivarfs_init_fs_context(struct fs_context *fc) if (!sfi) return -ENOMEM; - INIT_LIST_HEAD(&sfi->efivarfs_list); - sfi->mount_opts.uid = GLOBAL_ROOT_UID; sfi->mount_opts.gid = GLOBAL_ROOT_GID; fc->s_fs_info = sfi; fc->ops = &efivarfs_context_ops; + + sfi->pm_nb.notifier_call = efivarfs_pm_notify; + sfi->pm_nb.priority = 0; + register_pm_notifier(&sfi->pm_nb); + return 0; } @@ -387,9 +563,8 @@ static void efivarfs_kill_sb(struct super_block *sb) blocking_notifier_chain_unregister(&efivar_ops_nh, &sfi->nb); kill_litter_super(sb); + unregister_pm_notifier(&sfi->pm_nb); - /* Remove all entries and destroy */ - efivar_entry_iter(efivarfs_destroy, &sfi->efivarfs_list, NULL); kfree(sfi); } diff --git a/fs/efivarfs/vars.c b/fs/efivarfs/vars.c index f7d43c847ee9..6edc10958ecf 100644 --- a/fs/efivarfs/vars.c +++ b/fs/efivarfs/vars.c @@ -225,6 +225,31 @@ variable_matches(const char *var_name, size_t len, const char *match_name, } } +char * +efivar_get_utf8name(const efi_char16_t *name16, efi_guid_t *vendor) +{ + int len = ucs2_utf8size(name16); + char *name; + + /* name, plus '-', plus GUID, plus NUL*/ + name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL); + if (!name) + return NULL; + + ucs2_as_utf8(name, name16, len); + + name[len] = '-'; + + efi_guid_to_str(vendor, name + len + 1); + + name[len + EFI_VARIABLE_GUID_LEN+1] = '\0'; + + /* replace invalid slashes like kobject_set_name_vargs does for /sys/firmware/efi/vars. */ + strreplace(name, '/', '!'); + + return name; +} + bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, unsigned long data_size) @@ -288,28 +313,6 @@ efivar_variable_is_removable(efi_guid_t vendor, const char *var_name, return found; } -static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor, - struct list_head *head) -{ - struct efivar_entry *entry, *n; - unsigned long strsize1, strsize2; - bool found = false; - - strsize1 = ucs2_strsize(variable_name, EFI_VAR_NAME_LEN); - list_for_each_entry_safe(entry, n, head, list) { - strsize2 = ucs2_strsize(entry->var.VariableName, EFI_VAR_NAME_LEN); - if (strsize1 == strsize2 && - !memcmp(variable_name, &(entry->var.VariableName), - strsize2) && - !efi_guidcmp(entry->var.VendorGuid, - *vendor)) { - found = true; - break; - } - } - return found; -} - /* * Returns the size of variable_name, in bytes, including the * terminating NULL character, or variable_name_size if no NULL @@ -361,16 +364,15 @@ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid, * efivar_init - build the initial list of EFI variables * @func: callback function to invoke for every variable * @data: function-specific data to pass to @func - * @head: initialised head of variable list + * @duplicate_check: fail if a duplicate variable is found * * Get every EFI variable from the firmware and invoke @func. @func - * should call efivar_entry_add() to build the list of variables. + * should populate the initial dentry and inode tree. * * Returns 0 on success, or a kernel error code on failure. */ -int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *, - struct list_head *), - void *data, struct list_head *head) +int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *), + void *data, bool duplicate_check) { unsigned long variable_name_size = 512; efi_char16_t *variable_name; @@ -414,14 +416,15 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *, * we'll ever see a different variable name, * and may end up looping here forever. */ - if (variable_is_present(variable_name, &vendor_guid, - head)) { + if (duplicate_check && + efivarfs_variable_is_present(variable_name, + &vendor_guid, data)) { dup_variable_bug(variable_name, &vendor_guid, variable_name_size); status = EFI_NOT_FOUND; } else { err = func(variable_name, vendor_guid, - variable_name_size, data, head); + variable_name_size, data); if (err) status = EFI_NOT_FOUND; } @@ -453,70 +456,12 @@ free: } /** - * efivar_entry_add - add entry to variable list - * @entry: entry to add to list - * @head: list head - * - * Returns 0 on success, or a kernel error code on failure. - */ -int efivar_entry_add(struct efivar_entry *entry, struct list_head *head) -{ - int err; - - err = efivar_lock(); - if (err) - return err; - list_add(&entry->list, head); - efivar_unlock(); - - return 0; -} - -/** - * __efivar_entry_add - add entry to variable list - * @entry: entry to add to list - * @head: list head - */ -void __efivar_entry_add(struct efivar_entry *entry, struct list_head *head) -{ - list_add(&entry->list, head); -} - -/** - * efivar_entry_remove - remove entry from variable list - * @entry: entry to remove from list - * - * Returns 0 on success, or a kernel error code on failure. - */ -void efivar_entry_remove(struct efivar_entry *entry) -{ - list_del(&entry->list); -} - -/* - * efivar_entry_list_del_unlock - remove entry from variable list - * @entry: entry to remove - * - * Remove @entry from the variable list and release the list lock. - * - * NOTE: slightly weird locking semantics here - we expect to be - * called with the efivars lock already held, and we release it before - * returning. This is because this function is usually called after - * set_variable() while the lock is still held. - */ -static void efivar_entry_list_del_unlock(struct efivar_entry *entry) -{ - list_del(&entry->list); - efivar_unlock(); -} - -/** - * efivar_entry_delete - delete variable and remove entry from list + * efivar_entry_delete - delete variable * @entry: entry containing variable to delete * - * Delete the variable from the firmware and remove @entry from the - * variable list. It is the caller's responsibility to free @entry - * once we return. + * Delete the variable from the firmware. It is the caller's + * responsibility to free @entry (by deleting the dentry/inode) once + * we return. * * Returns 0 on success, -EINTR if we can't grab the semaphore, * converted EFI status code if set_variable() fails. @@ -533,12 +478,10 @@ int efivar_entry_delete(struct efivar_entry *entry) status = efivar_set_variable_locked(entry->var.VariableName, &entry->var.VendorGuid, 0, 0, NULL, false); - if (!(status == EFI_SUCCESS || status == EFI_NOT_FOUND)) { - efivar_unlock(); + efivar_unlock(); + if (!(status == EFI_SUCCESS || status == EFI_NOT_FOUND)) return efi_status_to_err(status); - } - efivar_entry_list_del_unlock(entry); return 0; } @@ -632,7 +575,7 @@ int efivar_entry_get(struct efivar_entry *entry, u32 *attributes, * get_variable() fail. * * If the EFI variable does not exist when calling set_variable() - * (EFI_NOT_FOUND), @entry is removed from the variable list. + * (EFI_NOT_FOUND). */ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, unsigned long *size, void *data, bool *set) @@ -648,9 +591,8 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, return -EINVAL; /* - * The lock here protects the get_variable call, the conditional - * set_variable call, and removal of the variable from the efivars - * list (in the case of an authenticated delete). + * The lock here protects the get_variable call and the + * conditional set_variable call */ err = efivar_lock(); if (err) @@ -676,10 +618,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, &entry->var.VendorGuid, NULL, size, NULL); - if (status == EFI_NOT_FOUND) - efivar_entry_list_del_unlock(entry); - else - efivar_unlock(); + efivar_unlock(); if (status && status != EFI_BUFFER_TOO_SMALL) return efi_status_to_err(status); @@ -691,37 +630,3 @@ out: return err; } - -/** - * efivar_entry_iter - iterate over variable list - * @func: callback function - * @head: head of variable list - * @data: function-specific data to pass to callback - * - * Iterate over the list of EFI variables and call @func with every - * entry on the list. It is safe for @func to remove entries in the - * list via efivar_entry_delete() while iterating. - * - * Some notes for the callback function: - * - a non-zero return value indicates an error and terminates the loop - * - @func is called from atomic context - */ -int efivar_entry_iter(int (*func)(struct efivar_entry *, void *), - struct list_head *head, void *data) -{ - struct efivar_entry *entry, *n; - int err = 0; - - err = efivar_lock(); - if (err) - return err; - - list_for_each_entry_safe(entry, n, head, list) { - err = func(entry, data); - if (err) - break; - } - efivar_unlock(); - - return err; -} diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h index 7bfe251680ec..65ff39401020 100644 --- a/fs/erofs/compress.h +++ b/fs/erofs/compress.h @@ -29,29 +29,8 @@ struct z_erofs_decompressor { char *name; }; -/* some special page->private (unsigned long, see below) */ #define Z_EROFS_SHORTLIVED_PAGE (-1UL << 2) -#define Z_EROFS_PREALLOCATED_PAGE (-2UL << 2) - -/* - * For all pages in a pcluster, page->private should be one of - * Type Last 2bits page->private - * short-lived page 00 Z_EROFS_SHORTLIVED_PAGE - * preallocated page (tryalloc) 00 Z_EROFS_PREALLOCATED_PAGE - * cached/managed page 00 pointer to z_erofs_pcluster - * online page (file-backed, 01/10/11 sub-index << 2 | count - * some pages can be used for inplace I/O) - * - * page->mapping should be one of - * Type page->mapping - * short-lived page NULL - * preallocated page NULL - * cached/managed page non-NULL or NULL (invalidated/truncated page) - * online page non-NULL - * - * For all managed pages, PG_private should be set with 1 extra refcount, - * which is used for page reclaim / migration. - */ +#define Z_EROFS_PREALLOCATED_FOLIO ((void *)(-2UL << 2)) /* * Currently, short-lived pages are pages directly from buddy system diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index eb318c7ddd80..2b123b070a42 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -7,14 +7,7 @@ #include "compress.h" #include <linux/lz4.h> -#ifndef LZ4_DISTANCE_MAX /* history window size */ -#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ -#endif - #define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1) -#ifndef LZ4_DECOMPRESS_INPLACE_MARGIN -#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32) -#endif struct z_erofs_lz4_decompress_ctx { struct z_erofs_decompress_req *rq; diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h index c8f2ae845bd2..199395ed1c1f 100644 --- a/fs/erofs/erofs_fs.h +++ b/fs/erofs/erofs_fs.h @@ -9,6 +9,7 @@ #ifndef __EROFS_FS_H #define __EROFS_FS_H +/* to allow for x86 boot sectors and other oddities. */ #define EROFS_SUPER_OFFSET 1024 #define EROFS_FEATURE_COMPAT_SB_CHKSUM 0x00000001 @@ -54,7 +55,7 @@ struct erofs_deviceslot { /* erofs on-disk super block (currently 128 bytes) */ struct erofs_super_block { __le32 magic; /* file system magic number */ - __le32 checksum; /* crc32c(super_block) */ + __le32 checksum; /* crc32c to avoid unexpected on-disk overlap */ __le32 feature_compat; __u8 blkszbits; /* filesystem block size in bit shift */ __u8 sb_extslots; /* superblock size = 128 + sb_extslots * 16 */ diff --git a/fs/erofs/fileio.c b/fs/erofs/fileio.c index 33f8539dda4a..0ffd1c63beeb 100644 --- a/fs/erofs/fileio.c +++ b/fs/erofs/fileio.c @@ -6,7 +6,7 @@ #include <trace/events/erofs.h> struct erofs_fileio_rq { - struct bio_vec bvecs[BIO_MAX_VECS]; + struct bio_vec bvecs[16]; struct bio bio; struct kiocb iocb; struct super_block *sb; @@ -68,7 +68,7 @@ static struct erofs_fileio_rq *erofs_fileio_rq_alloc(struct erofs_map_dev *mdev) struct erofs_fileio_rq *rq = kzalloc(sizeof(*rq), GFP_KERNEL | __GFP_NOFAIL); - bio_init(&rq->bio, NULL, rq->bvecs, BIO_MAX_VECS, REQ_OP_READ); + bio_init(&rq->bio, NULL, rq->bvecs, ARRAY_SIZE(rq->bvecs), REQ_OP_READ); rq->iocb.ki_filp = mdev->m_dif->file; rq->sb = mdev->m_sb; return rq; diff --git a/fs/erofs/super.c b/fs/erofs/super.c index f5956474bfde..827b62665649 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -39,29 +39,21 @@ void _erofs_printk(struct super_block *sb, const char *fmt, ...) static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata) { - size_t len = 1 << EROFS_SB(sb)->blkszbits; - struct erofs_super_block *dsb; - u32 expected_crc, crc; + struct erofs_super_block *dsb = sbdata + EROFS_SUPER_OFFSET; + u32 len = 1 << EROFS_SB(sb)->blkszbits, crc; if (len > EROFS_SUPER_OFFSET) len -= EROFS_SUPER_OFFSET; + len -= offsetof(struct erofs_super_block, checksum) + + sizeof(dsb->checksum); - dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET, len, GFP_KERNEL); - if (!dsb) - return -ENOMEM; - - expected_crc = le32_to_cpu(dsb->checksum); - dsb->checksum = 0; - /* to allow for x86 boot sectors and other oddities. */ - crc = crc32c(~0, dsb, len); - kfree(dsb); - - if (crc != expected_crc) { - erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected", - crc, expected_crc); - return -EBADMSG; - } - return 0; + /* skip .magic(pre-verified) and .checksum(0) fields */ + crc = crc32c(0x5045B54A, (&dsb->checksum) + 1, len); + if (crc == le32_to_cpu(dsb->checksum)) + return 0; + erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected", + crc, le32_to_cpu(dsb->checksum)); + return -EBADMSG; } static void erofs_inode_init_once(void *ptr) @@ -516,8 +508,6 @@ static int erofs_fc_parse_param(struct fs_context *fc, errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name); #endif break; - default: - return -ENOPARAM; } return 0; } diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c index a90d7d649739..df2777e05661 100644 --- a/fs/erofs/xattr.c +++ b/fs/erofs/xattr.c @@ -407,7 +407,7 @@ int erofs_getxattr(struct inode *inode, int index, const char *name, } it.index = index; - it.name = (struct qstr)QSTR_INIT(name, strlen(name)); + it.name = QSTR(name); if (it.name.len > EROFS_NAME_LEN) return -ERANGE; @@ -478,7 +478,7 @@ int erofs_xattr_prefixes_init(struct super_block *sb) if (!sbi->xattr_prefix_count) return 0; - pfs = kzalloc(sbi->xattr_prefix_count * sizeof(*pfs), GFP_KERNEL); + pfs = kcalloc(sbi->xattr_prefix_count, sizeof(*pfs), GFP_KERNEL); if (!pfs) return -ENOMEM; diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 19ef4ff2a134..d771e06db738 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -12,12 +12,6 @@ #define Z_EROFS_PCLUSTER_MAX_PAGES (Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE) #define Z_EROFS_INLINE_BVECS 2 -/* - * let's leave a type here in case of introducing - * another tagged pointer later. - */ -typedef void *z_erofs_next_pcluster_t; - struct z_erofs_bvec { struct page *page; int offset; @@ -48,7 +42,7 @@ struct z_erofs_pcluster { struct lockref lockref; /* A: point to next chained pcluster or TAILs */ - z_erofs_next_pcluster_t next; + struct z_erofs_pcluster *next; /* I: start block address of this pcluster */ erofs_off_t index; @@ -94,12 +88,11 @@ struct z_erofs_pcluster { /* the end of a chain of pclusters */ #define Z_EROFS_PCLUSTER_TAIL ((void *) 0x700 + POISON_POINTER_DELTA) -#define Z_EROFS_PCLUSTER_NIL (NULL) struct z_erofs_decompressqueue { struct super_block *sb; + struct z_erofs_pcluster *head; atomic_t pending_bios; - z_erofs_next_pcluster_t head; union { struct completion done; @@ -320,7 +313,7 @@ static void erofs_destroy_percpu_workers(void) static struct kthread_worker *erofs_init_percpu_worker(int cpu) { struct kthread_worker *worker = - kthread_create_worker_on_cpu(cpu, 0, "erofs_worker/%u", cpu); + kthread_run_worker_on_cpu(cpu, 0, "erofs_worker/%u"); if (IS_ERR(worker)) return worker; @@ -462,39 +455,32 @@ err_decompressor: } enum z_erofs_pclustermode { + /* It has previously been linked into another processing chain */ Z_EROFS_PCLUSTER_INFLIGHT, /* - * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it - * could be dispatched into bypass queue later due to uptodated managed - * pages. All related online pages cannot be reused for inplace I/O (or - * bvpage) since it can be directly decoded without I/O submission. + * A weaker form of Z_EROFS_PCLUSTER_FOLLOWED; the difference is that it + * may be dispatched to the bypass queue later due to uptodated managed + * folios. All file-backed folios related to this pcluster cannot be + * reused for in-place I/O (or bvpage) since the pcluster may be decoded + * in a separate queue (and thus out of order). */ Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE, /* - * The pcluster was just linked to a decompression chain by us. It can - * also be linked with the remaining pclusters, which means if the - * processing page is the tail page of a pcluster, this pcluster can - * safely use the whole page (since the previous pcluster is within the - * same chain) for in-place I/O, as illustrated below: - * ___________________________________________________ - * | tail (partial) page | head (partial) page | - * | (of the current pcl) | (of the previous pcl) | - * |___PCLUSTER_FOLLOWED___|_____PCLUSTER_FOLLOWED_____| - * - * [ (*) the page above can be used as inplace I/O. ] + * The pcluster has just been linked to our processing chain. + * File-backed folios (except for the head page) related to it can be + * used for in-place I/O (or bvpage). */ Z_EROFS_PCLUSTER_FOLLOWED, }; -struct z_erofs_decompress_frontend { +struct z_erofs_frontend { struct inode *const inode; struct erofs_map_blocks map; struct z_erofs_bvec_iter biter; struct page *pagepool; struct page *candidate_bvpage; - struct z_erofs_pcluster *pcl; - z_erofs_next_pcluster_t owned_head; + struct z_erofs_pcluster *pcl, *head; enum z_erofs_pclustermode mode; erofs_off_t headoffset; @@ -503,11 +489,11 @@ struct z_erofs_decompress_frontend { unsigned int icur; }; -#define DECOMPRESS_FRONTEND_INIT(__i) { \ - .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \ - .mode = Z_EROFS_PCLUSTER_FOLLOWED } +#define Z_EROFS_DEFINE_FRONTEND(fe, i, ho) struct z_erofs_frontend fe = { \ + .inode = i, .head = Z_EROFS_PCLUSTER_TAIL, \ + .mode = Z_EROFS_PCLUSTER_FOLLOWED, .headoffset = ho } -static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe) +static bool z_erofs_should_alloc_cache(struct z_erofs_frontend *fe) { unsigned int cachestrategy = EROFS_I_SB(fe->inode)->opt.cache_strategy; @@ -524,19 +510,17 @@ static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe) return false; } -static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) +static void z_erofs_bind_cache(struct z_erofs_frontend *fe) { struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode)); struct z_erofs_pcluster *pcl = fe->pcl; unsigned int pclusterpages = z_erofs_pclusterpages(pcl); bool shouldalloc = z_erofs_should_alloc_cache(fe); - bool standalone = true; - /* - * optimistic allocation without direct reclaim since inplace I/O - * can be used if low memory otherwise. - */ + bool may_bypass = true; + /* Optimistic allocation, as in-place I/O can be used as a fallback */ gfp_t gfp = (mapping_gfp_mask(mc) & ~__GFP_DIRECT_RECLAIM) | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; + struct folio *folio, *newfolio; unsigned int i; if (i_blocksize(fe->inode) != PAGE_SIZE || @@ -544,47 +528,42 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) return; for (i = 0; i < pclusterpages; ++i) { - struct page *page, *newpage; - /* Inaccurate check w/o locking to avoid unneeded lookups */ if (READ_ONCE(pcl->compressed_bvecs[i].page)) continue; - page = find_get_page(mc, pcl->index + i); - if (!page) { - /* I/O is needed, no possible to decompress directly */ - standalone = false; + folio = filemap_get_folio(mc, pcl->index + i); + if (IS_ERR(folio)) { + may_bypass = false; if (!shouldalloc) continue; /* - * Try cached I/O if allocation succeeds or fallback to - * in-place I/O instead to avoid any direct reclaim. + * Allocate a managed folio for cached I/O, or it may be + * then filled with a file-backed folio for in-place I/O */ - newpage = erofs_allocpage(&fe->pagepool, gfp); - if (!newpage) + newfolio = filemap_alloc_folio(gfp, 0); + if (!newfolio) continue; - set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE); + newfolio->private = Z_EROFS_PREALLOCATED_FOLIO; + folio = NULL; } spin_lock(&pcl->lockref.lock); if (!pcl->compressed_bvecs[i].page) { - pcl->compressed_bvecs[i].page = page ? page : newpage; + pcl->compressed_bvecs[i].page = + folio_page(folio ?: newfolio, 0); spin_unlock(&pcl->lockref.lock); continue; } spin_unlock(&pcl->lockref.lock); - - if (page) - put_page(page); - else if (newpage) - erofs_pagepool_add(&fe->pagepool, newpage); + folio_put(folio ?: newfolio); } /* - * don't do inplace I/O if all compressed pages are available in - * managed cache since it can be moved to the bypass queue instead. + * Don't perform in-place I/O if all compressed pages are available in + * the managed cache, as the pcluster can be moved to the bypass queue. */ - if (standalone) + if (may_bypass) fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; } @@ -681,7 +660,7 @@ int erofs_init_managed_cache(struct super_block *sb) } /* callers must be with pcluster lock held */ -static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe, +static int z_erofs_attach_page(struct z_erofs_frontend *fe, struct z_erofs_bvec *bvec, bool exclusive) { struct z_erofs_pcluster *pcl = fe->pcl; @@ -727,7 +706,7 @@ static bool z_erofs_get_pcluster(struct z_erofs_pcluster *pcl) return true; } -static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) +static int z_erofs_register_pcluster(struct z_erofs_frontend *fe) { struct erofs_map_blocks *map = &fe->map; struct super_block *sb = fe->inode->i_sb; @@ -747,14 +726,11 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) if (IS_ERR(pcl)) return PTR_ERR(pcl); - spin_lock_init(&pcl->lockref.lock); - pcl->lockref.count = 1; /* one ref for this request */ + lockref_init(&pcl->lockref); /* one ref for this request */ pcl->algorithmformat = map->m_algorithmformat; pcl->length = 0; pcl->partial = true; - - /* new pclusters should be claimed as type 1, primary and followed */ - pcl->next = fe->owned_head; + pcl->next = fe->head; pcl->pageofs_out = map->m_la & ~PAGE_MASK; fe->mode = Z_EROFS_PCLUSTER_FOLLOWED; @@ -790,8 +766,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) goto err_out; } } - fe->owned_head = &pcl->next; - fe->pcl = pcl; + fe->head = fe->pcl = pcl; return 0; err_out: @@ -800,7 +775,7 @@ err_out: return err; } -static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe) +static int z_erofs_pcluster_begin(struct z_erofs_frontend *fe) { struct erofs_map_blocks *map = &fe->map; struct super_block *sb = fe->inode->i_sb; @@ -810,7 +785,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe) DBG_BUGON(fe->pcl); /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */ - DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL); + DBG_BUGON(!fe->head); if (!(map->m_flags & EROFS_MAP_META)) { while (1) { @@ -838,10 +813,9 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe) if (ret == -EEXIST) { mutex_lock(&fe->pcl->lock); /* check if this pcluster hasn't been linked into any chain. */ - if (cmpxchg(&fe->pcl->next, Z_EROFS_PCLUSTER_NIL, - fe->owned_head) == Z_EROFS_PCLUSTER_NIL) { + if (!cmpxchg(&fe->pcl->next, NULL, fe->head)) { /* .. so it can be attached to our submission chain */ - fe->owned_head = &fe->pcl->next; + fe->head = fe->pcl; fe->mode = Z_EROFS_PCLUSTER_FOLLOWED; } else { /* otherwise, it belongs to an inflight chain */ fe->mode = Z_EROFS_PCLUSTER_INFLIGHT; @@ -874,14 +848,9 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe) return 0; } -/* - * keep in mind that no referenced pclusters will be freed - * only after a RCU grace period. - */ static void z_erofs_rcu_callback(struct rcu_head *head) { - z_erofs_free_pcluster(container_of(head, - struct z_erofs_pcluster, rcu)); + z_erofs_free_pcluster(container_of(head, struct z_erofs_pcluster, rcu)); } static bool __erofs_try_to_release_pcluster(struct erofs_sb_info *sbi, @@ -923,12 +892,10 @@ static bool erofs_try_to_release_pcluster(struct erofs_sb_info *sbi, return free; } -unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi, - unsigned long nr_shrink) +unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi, unsigned long nr) { struct z_erofs_pcluster *pcl; - unsigned int freed = 0; - unsigned long index; + unsigned long index, freed = 0; xa_lock(&sbi->managed_pslots); xa_for_each(&sbi->managed_pslots, index, pcl) { @@ -938,7 +905,7 @@ unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi, xa_unlock(&sbi->managed_pslots); ++freed; - if (!--nr_shrink) + if (!--nr) return freed; xa_lock(&sbi->managed_pslots); } @@ -967,7 +934,7 @@ static void z_erofs_put_pcluster(struct erofs_sb_info *sbi, call_rcu(&pcl->rcu, z_erofs_rcu_callback); } -static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe) +static void z_erofs_pcluster_end(struct z_erofs_frontend *fe) { struct z_erofs_pcluster *pcl = fe->pcl; @@ -980,13 +947,9 @@ static void z_erofs_pcluster_end(struct z_erofs_decompress_frontend *fe) if (fe->candidate_bvpage) fe->candidate_bvpage = NULL; - /* - * if all pending pages are added, don't hold its reference - * any longer if the pcluster isn't hosted by ourselves. - */ + /* Drop refcount if it doesn't belong to our processing chain */ if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE) z_erofs_put_pcluster(EROFS_I_SB(fe->inode), pcl, false); - fe->pcl = NULL; } @@ -1015,7 +978,7 @@ static int z_erofs_read_fragment(struct super_block *sb, struct folio *folio, return 0; } -static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f, +static int z_erofs_scan_folio(struct z_erofs_frontend *f, struct folio *folio, bool ra) { struct inode *const inode = f->inode; @@ -1130,7 +1093,7 @@ static bool z_erofs_page_is_invalidated(struct page *page) return !page_folio(page)->mapping && !z_erofs_is_shortlived_page(page); } -struct z_erofs_decompress_backend { +struct z_erofs_backend { struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES]; struct super_block *sb; struct z_erofs_pcluster *pcl; @@ -1150,7 +1113,7 @@ struct z_erofs_bvec_item { struct list_head list; }; -static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be, +static void z_erofs_do_decompressed_bvec(struct z_erofs_backend *be, struct z_erofs_bvec *bvec) { struct z_erofs_bvec_item *item; @@ -1173,8 +1136,7 @@ static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be, list_add(&item->list, &be->decompressed_secondary_bvecs); } -static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be, - int err) +static void z_erofs_fill_other_copies(struct z_erofs_backend *be, int err) { unsigned int off0 = be->pcl->pageofs_out; struct list_head *p, *n; @@ -1215,7 +1177,7 @@ static void z_erofs_fill_other_copies(struct z_erofs_decompress_backend *be, } } -static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be) +static void z_erofs_parse_out_bvecs(struct z_erofs_backend *be) { struct z_erofs_pcluster *pcl = be->pcl; struct z_erofs_bvec_iter biter; @@ -1240,8 +1202,7 @@ static void z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be) z_erofs_put_shortlivedpage(be->pagepool, old_bvpage); } -static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be, - bool *overlapped) +static int z_erofs_parse_in_bvecs(struct z_erofs_backend *be, bool *overlapped) { struct z_erofs_pcluster *pcl = be->pcl; unsigned int pclusterpages = z_erofs_pclusterpages(pcl); @@ -1276,8 +1237,7 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be, return err; } -static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, - int err) +static int z_erofs_decompress_pcluster(struct z_erofs_backend *be, int err) { struct erofs_sb_info *const sbi = EROFS_SB(be->sb); struct z_erofs_pcluster *pcl = be->pcl; @@ -1394,7 +1354,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, pcl->vcnt = 0; /* pcluster lock MUST be taken before the following line */ - WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL); + WRITE_ONCE(pcl->next, NULL); mutex_unlock(&pcl->lock); if (z_erofs_is_inline_pcluster(pcl)) @@ -1407,21 +1367,19 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, static int z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, struct page **pagepool) { - struct z_erofs_decompress_backend be = { + struct z_erofs_backend be = { .sb = io->sb, .pagepool = pagepool, .decompressed_secondary_bvecs = LIST_HEAD_INIT(be.decompressed_secondary_bvecs), + .pcl = io->head, }; - z_erofs_next_pcluster_t owned = io->head; + struct z_erofs_pcluster *next; int err = io->eio ? -EIO : 0; - while (owned != Z_EROFS_PCLUSTER_TAIL) { - DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL); - - be.pcl = container_of(owned, struct z_erofs_pcluster, next); - owned = READ_ONCE(be.pcl->next); - + for (; be.pcl != Z_EROFS_PCLUSTER_TAIL; be.pcl = next) { + DBG_BUGON(!be.pcl); + next = READ_ONCE(be.pcl->next); err = z_erofs_decompress_pcluster(&be, err) ?: err; } return err; @@ -1487,7 +1445,7 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, } static void z_erofs_fill_bio_vec(struct bio_vec *bvec, - struct z_erofs_decompress_frontend *f, + struct z_erofs_frontend *f, struct z_erofs_pcluster *pcl, unsigned int nr, struct address_space *mc) @@ -1514,12 +1472,8 @@ repeat: DBG_BUGON(z_erofs_is_shortlived_page(bvec->bv_page)); folio = page_folio(zbv.page); - /* - * Handle preallocated cached folios. We tried to allocate such folios - * without triggering direct reclaim. If allocation failed, inplace - * file-backed folios will be used instead. - */ - if (folio->private == (void *)Z_EROFS_PREALLOCATED_PAGE) { + /* For preallocated managed folios, add them to page cache here */ + if (folio->private == Z_EROFS_PREALLOCATED_FOLIO) { tocache = true; goto out_tocache; } @@ -1631,18 +1585,13 @@ enum { NR_JOBQUEUES, }; -static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, - z_erofs_next_pcluster_t qtail[], - z_erofs_next_pcluster_t owned_head) +static void z_erofs_move_to_bypass_queue(struct z_erofs_pcluster *pcl, + struct z_erofs_pcluster *next, + struct z_erofs_pcluster **qtail[]) { - z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT]; - z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS]; - WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL); - - WRITE_ONCE(*submit_qtail, owned_head); - WRITE_ONCE(*bypass_qtail, &pcl->next); - + WRITE_ONCE(*qtail[JQ_SUBMIT], next); + WRITE_ONCE(*qtail[JQ_BYPASS], pcl); qtail[JQ_BYPASS] = &pcl->next; } @@ -1671,15 +1620,15 @@ static void z_erofs_endio(struct bio *bio) bio_put(bio); } -static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, +static void z_erofs_submit_queue(struct z_erofs_frontend *f, struct z_erofs_decompressqueue *fgq, bool *force_fg, bool readahead) { struct super_block *sb = f->inode->i_sb; struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb)); - z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; + struct z_erofs_pcluster **qtail[NR_JOBQUEUES]; struct z_erofs_decompressqueue *q[NR_JOBQUEUES]; - z_erofs_next_pcluster_t owned_head = f->owned_head; + struct z_erofs_pcluster *pcl, *next; /* bio is NULL initially, so no need to initialize last_{index,bdev} */ erofs_off_t last_pa; unsigned int nr_bios = 0; @@ -1695,22 +1644,19 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; /* by default, all need io submission */ - q[JQ_SUBMIT]->head = owned_head; + q[JQ_SUBMIT]->head = next = f->head; do { struct erofs_map_dev mdev; - struct z_erofs_pcluster *pcl; erofs_off_t cur, end; struct bio_vec bvec; unsigned int i = 0; bool bypass = true; - DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL); - pcl = container_of(owned_head, struct z_erofs_pcluster, next); - owned_head = READ_ONCE(pcl->next); - + pcl = next; + next = READ_ONCE(pcl->next); if (z_erofs_is_inline_pcluster(pcl)) { - move_to_bypass_jobqueue(pcl, qtail, owned_head); + z_erofs_move_to_bypass_queue(pcl, next, qtail); continue; } @@ -1782,8 +1728,8 @@ drain_io: if (!bypass) qtail[JQ_SUBMIT] = &pcl->next; else - move_to_bypass_jobqueue(pcl, qtail, owned_head); - } while (owned_head != Z_EROFS_PCLUSTER_TAIL); + z_erofs_move_to_bypass_queue(pcl, next, qtail); + } while (next != Z_EROFS_PCLUSTER_TAIL); if (bio) { if (erofs_is_fileio_mode(EROFS_SB(sb))) @@ -1807,17 +1753,16 @@ drain_io: z_erofs_decompress_kickoff(q[JQ_SUBMIT], nr_bios); } -static int z_erofs_runqueue(struct z_erofs_decompress_frontend *f, - unsigned int ra_folios) +static int z_erofs_runqueue(struct z_erofs_frontend *f, unsigned int rapages) { struct z_erofs_decompressqueue io[NR_JOBQUEUES]; struct erofs_sb_info *sbi = EROFS_I_SB(f->inode); - bool force_fg = z_erofs_is_sync_decompress(sbi, ra_folios); + bool force_fg = z_erofs_is_sync_decompress(sbi, rapages); int err; - if (f->owned_head == Z_EROFS_PCLUSTER_TAIL) + if (f->head == Z_EROFS_PCLUSTER_TAIL) return 0; - z_erofs_submit_queue(f, io, &force_fg, !!ra_folios); + z_erofs_submit_queue(f, io, &force_fg, !!rapages); /* handle bypass queue (no i/o pclusters) immediately */ err = z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool); @@ -1835,7 +1780,7 @@ static int z_erofs_runqueue(struct z_erofs_decompress_frontend *f, * Since partial uptodate is still unimplemented for now, we have to use * approximate readmore strategies as a start. */ -static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, +static void z_erofs_pcluster_readmore(struct z_erofs_frontend *f, struct readahead_control *rac, bool backmost) { struct inode *inode = f->inode; @@ -1890,12 +1835,10 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, static int z_erofs_read_folio(struct file *file, struct folio *folio) { struct inode *const inode = folio->mapping->host; - struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); + Z_EROFS_DEFINE_FRONTEND(f, inode, folio_pos(folio)); int err; trace_erofs_read_folio(folio, false); - f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT; - z_erofs_pcluster_readmore(&f, NULL, true); err = z_erofs_scan_folio(&f, folio, false); z_erofs_pcluster_readmore(&f, NULL, false); @@ -1915,17 +1858,14 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio) static void z_erofs_readahead(struct readahead_control *rac) { struct inode *const inode = rac->mapping->host; - struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); + Z_EROFS_DEFINE_FRONTEND(f, inode, readahead_pos(rac)); struct folio *head = NULL, *folio; - unsigned int nr_folios; + unsigned int nrpages = readahead_count(rac); int err; - f.headoffset = readahead_pos(rac); - z_erofs_pcluster_readmore(&f, rac, true); - nr_folios = readahead_count(rac); - trace_erofs_readpages(inode, readahead_index(rac), nr_folios, false); - + nrpages = readahead_count(rac); + trace_erofs_readpages(inode, readahead_index(rac), nrpages, false); while ((folio = readahead_folio(rac))) { folio->private = head; head = folio; @@ -1944,7 +1884,7 @@ static void z_erofs_readahead(struct readahead_control *rac) z_erofs_pcluster_readmore(&f, rac, false); z_erofs_pcluster_end(&f); - (void)z_erofs_runqueue(&f, nr_folios); + (void)z_erofs_runqueue(&f, nrpages); erofs_put_metabuf(&f.map.buf); erofs_release_pages(&f.pagepool); } diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 4535f2f0a014..689437e99a5a 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -97,17 +97,48 @@ static int get_compacted_la_distance(unsigned int lobits, return d1; } -static int unpack_compacted_index(struct z_erofs_maprecorder *m, - unsigned int amortizedshift, - erofs_off_t pos, bool lookahead) +static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m, + unsigned long lcn, bool lookahead) { - struct erofs_inode *const vi = EROFS_I(m->inode); + struct inode *const inode = m->inode; + struct erofs_inode *const vi = EROFS_I(inode); + const erofs_off_t ebase = sizeof(struct z_erofs_map_header) + + ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8); const unsigned int lclusterbits = vi->z_logical_clusterbits; + const unsigned int totalidx = erofs_iblks(inode); + unsigned int compacted_4b_initial, compacted_2b, amortizedshift; unsigned int vcnt, lo, lobits, encodebits, nblk, bytes; - bool big_pcluster; + bool big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1; + erofs_off_t pos; u8 *in, type; int i; + if (lcn >= totalidx || lclusterbits > 14) + return -EINVAL; + + m->lcn = lcn; + /* used to align to 32-byte (compacted_2b) alignment */ + compacted_4b_initial = ((32 - ebase % 32) / 4) & 7; + compacted_2b = 0; + if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) && + compacted_4b_initial < totalidx) + compacted_2b = rounddown(totalidx - compacted_4b_initial, 16); + + pos = ebase; + amortizedshift = 2; /* compact_4b */ + if (lcn >= compacted_4b_initial) { + pos += compacted_4b_initial * 4; + lcn -= compacted_4b_initial; + if (lcn < compacted_2b) { + amortizedshift = 1; + } else { + pos += compacted_2b * 2; + lcn -= compacted_2b; + } + } + pos += lcn * (1 << amortizedshift); + + /* figure out the lcluster count in this pack */ if (1 << amortizedshift == 4 && lclusterbits <= 14) vcnt = 2; else if (1 << amortizedshift == 2 && lclusterbits <= 12) @@ -122,7 +153,6 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, /* it doesn't equal to round_up(..) */ m->nextpackoff = round_down(pos, vcnt << amortizedshift) + (vcnt << amortizedshift); - big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1; lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U); encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt; bytes = pos & ((vcnt << amortizedshift) - 1); @@ -207,53 +237,6 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, return 0; } -static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m, - unsigned long lcn, bool lookahead) -{ - struct inode *const inode = m->inode; - struct erofs_inode *const vi = EROFS_I(inode); - const erofs_off_t ebase = sizeof(struct z_erofs_map_header) + - ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8); - unsigned int totalidx = erofs_iblks(inode); - unsigned int compacted_4b_initial, compacted_2b; - unsigned int amortizedshift; - erofs_off_t pos; - - if (lcn >= totalidx || vi->z_logical_clusterbits > 14) - return -EINVAL; - - m->lcn = lcn; - /* used to align to 32-byte (compacted_2b) alignment */ - compacted_4b_initial = (32 - ebase % 32) / 4; - if (compacted_4b_initial == 32 / 4) - compacted_4b_initial = 0; - - if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) && - compacted_4b_initial < totalidx) - compacted_2b = rounddown(totalidx - compacted_4b_initial, 16); - else - compacted_2b = 0; - - pos = ebase; - if (lcn < compacted_4b_initial) { - amortizedshift = 2; - goto out; - } - pos += compacted_4b_initial * 4; - lcn -= compacted_4b_initial; - - if (lcn < compacted_2b) { - amortizedshift = 1; - goto out; - } - pos += compacted_2b * 2; - lcn -= compacted_2b; - amortizedshift = 2; -out: - pos += lcn * (1 << amortizedshift); - return unpack_compacted_index(m, amortizedshift, pos, lookahead); -} - static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder *m, unsigned int lcn, bool lookahead) { @@ -311,27 +294,23 @@ err_bogus: static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m, unsigned int initial_lcn) { - struct super_block *sb = m->inode->i_sb; - struct erofs_inode *const vi = EROFS_I(m->inode); - struct erofs_map_blocks *const map = m->map; - const unsigned int lclusterbits = vi->z_logical_clusterbits; - unsigned long lcn; + struct inode *inode = m->inode; + struct super_block *sb = inode->i_sb; + struct erofs_inode *vi = EROFS_I(inode); + bool bigpcl1 = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1; + bool bigpcl2 = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2; + unsigned long lcn = m->lcn + 1; int err; - DBG_BUGON(m->type != Z_EROFS_LCLUSTER_TYPE_PLAIN && - m->type != Z_EROFS_LCLUSTER_TYPE_HEAD1 && - m->type != Z_EROFS_LCLUSTER_TYPE_HEAD2); + DBG_BUGON(m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD); DBG_BUGON(m->type != m->headtype); - if (m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN || - ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1) && - !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) || - ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) && - !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2))) { - map->m_plen = 1ULL << lclusterbits; - return 0; - } - lcn = m->lcn + 1; + if ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1 && !bigpcl1) || + ((m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN || + m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) && !bigpcl2) || + (lcn << vi->z_logical_clusterbits) >= inode->i_size) + m->compressedblks = 1; + if (m->compressedblks) goto out; @@ -356,9 +335,9 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m, case Z_EROFS_LCLUSTER_TYPE_HEAD2: /* * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type - * rather than CBLKCNT, it's a 1 lcluster-sized pcluster. + * rather than CBLKCNT, it's a 1 block-sized pcluster. */ - m->compressedblks = 1 << (lclusterbits - sb->s_blocksize_bits); + m->compressedblks = 1; break; case Z_EROFS_LCLUSTER_TYPE_NONHEAD: if (m->delta[0] != 1) @@ -373,7 +352,7 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m, return -EFSCORRUPTED; } out: - map->m_plen = erofs_pos(sb, m->compressedblks); + m->map->m_plen = erofs_pos(sb, m->compressedblks); return 0; err_bonus_cblkcnt: erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid); diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c index 0dd65cefce33..55ff2ab5128e 100644 --- a/fs/erofs/zutil.c +++ b/fs/erofs/zutil.c @@ -87,8 +87,8 @@ int z_erofs_gbuf_growsize(unsigned int nrpages) tmp_pages[j] = gbuf->pages[j]; do { last = j; - j = alloc_pages_bulk_array(GFP_KERNEL, nrpages, - tmp_pages); + j = alloc_pages_bulk(GFP_KERNEL, nrpages, + tmp_pages); if (last == j) goto out; } while (j != nrpages); @@ -243,7 +243,7 @@ void erofs_shrinker_unregister(struct super_block *sb) static unsigned long erofs_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { - return atomic_long_read(&erofs_global_shrink_cnt); + return atomic_long_read(&erofs_global_shrink_cnt) ?: SHRINK_EMPTY; } static unsigned long erofs_shrink_scan(struct shrinker *shrink, diff --git a/fs/eventpoll.c b/fs/eventpoll.c index f9898e60dd8b..7c0980db77b3 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -318,7 +318,7 @@ static void unlist_file(struct epitems_head *head) static long long_zero; static long long_max = LONG_MAX; -static struct ctl_table epoll_table[] = { +static const struct ctl_table epoll_table[] = { { .procname = "max_user_watches", .data = &max_user_watches, diff --git a/fs/exec.c b/fs/exec.c index 98cb7ba9983c..506cd411f4ac 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -205,18 +205,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, /* * Avoid relying on expanding the stack down in GUP (which * does not work for STACK_GROWSUP anyway), and just do it - * by hand ahead of time. + * ahead of time. */ - if (write && pos < vma->vm_start) { - mmap_write_lock(mm); - ret = expand_downwards(vma, pos); - if (unlikely(ret < 0)) { - mmap_write_unlock(mm); - return NULL; - } - mmap_write_downgrade(mm); - } else - mmap_read_lock(mm); + if (!mmap_read_lock_maybe_expand(mm, vma, pos, write)) + return NULL; /* * We are doing an exec(). 'current' is the process @@ -892,7 +884,8 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags) .lookup_flags = LOOKUP_FOLLOW, }; - if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0) + if ((flags & + ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH | AT_EXECVE_CHECK)) != 0) return ERR_PTR(-EINVAL); if (flags & AT_SYMLINK_NOFOLLOW) open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW; @@ -912,7 +905,7 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags) path_noexec(&file->f_path)) return ERR_PTR(-EACCES); - err = deny_write_access(file); + err = exe_file_deny_write_access(file); if (err) return ERR_PTR(err); @@ -927,7 +920,7 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags) * Returns ERR_PTR on failure or allocated struct file on success. * * As this is a wrapper for the internal do_open_execat(), callers - * must call allow_write_access() before fput() on release. Also see + * must call exe_file_allow_write_access() before fput() on release. Also see * do_close_execat(). */ struct file *open_exec(const char *name) @@ -1194,16 +1187,16 @@ static int unshare_sighand(struct task_struct *me) } /* - * These functions flushes out all traces of the currently running executable - * so that a new one can be started + * This is unlocked -- the string will always be NUL-terminated, but + * may show overlapping contents if racing concurrent reads. */ - void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec) { - task_lock(tsk); + size_t len = min(strlen(buf), sizeof(tsk->comm) - 1); + trace_task_rename(tsk, buf); - strscpy_pad(tsk->comm, buf, sizeof(tsk->comm)); - task_unlock(tsk); + memcpy(tsk->comm, buf, len); + memset(&tsk->comm[len], 0, sizeof(tsk->comm) - len); perf_event_comm(tsk, exec); } @@ -1341,7 +1334,28 @@ int begin_new_exec(struct linux_binprm * bprm) set_dumpable(current->mm, SUID_DUMP_USER); perf_event_exec(); - __set_task_comm(me, kbasename(bprm->filename), true); + + /* + * If the original filename was empty, alloc_bprm() made up a path + * that will probably not be useful to admins running ps or similar. + * Let's fix it up to be something reasonable. + */ + if (bprm->comm_from_dentry) { + /* + * Hold RCU lock to keep the name from being freed behind our back. + * Use acquire semantics to make sure the terminating NUL from + * __d_alloc() is seen. + * + * Note, we're deliberately sloppy here. We don't need to care about + * detecting a concurrent rename and just want a terminated name. + */ + rcu_read_lock(); + __set_task_comm(me, smp_load_acquire(&bprm->file->f_path.dentry->d_name.name), + true); + rcu_read_unlock(); + } else { + __set_task_comm(me, kbasename(bprm->filename), true); + } /* An exec changes our domain. We are no longer part of the thread group */ @@ -1471,7 +1485,7 @@ static void do_close_execat(struct file *file) { if (!file) return; - allow_write_access(file); + exe_file_allow_write_access(file); fput(file); } @@ -1517,11 +1531,13 @@ static struct linux_binprm *alloc_bprm(int fd, struct filename *filename, int fl if (fd == AT_FDCWD || filename->name[0] == '/') { bprm->filename = filename->name; } else { - if (filename->name[0] == '\0') + if (filename->name[0] == '\0') { bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd); - else + bprm->comm_from_dentry = 1; + } else { bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s", fd, filename->name); + } if (!bprm->fdpath) goto out_free; @@ -1541,6 +1557,21 @@ static struct linux_binprm *alloc_bprm(int fd, struct filename *filename, int fl } bprm->interp = bprm->filename; + /* + * At this point, security_file_open() has already been called (with + * __FMODE_EXEC) and access control checks for AT_EXECVE_CHECK will + * stop just after the security_bprm_creds_for_exec() call in + * bprm_execve(). Indeed, the kernel should not try to parse the + * content of the file with exec_binprm() nor change the calling + * thread, which means that the following security functions will not + * be called: + * - security_bprm_check() + * - security_bprm_creds_from_file() + * - security_bprm_committing_creds() + * - security_bprm_committed_creds() + */ + bprm->is_check = !!(flags & AT_EXECVE_CHECK); + retval = bprm_mm_init(bprm); if (!retval) return bprm; @@ -1719,13 +1750,11 @@ int remove_arg_zero(struct linux_binprm *bprm) } EXPORT_SYMBOL(remove_arg_zero); -#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e)) /* * cycle the list of binary formats handler, until one recognizes the image */ static int search_binary_handler(struct linux_binprm *bprm) { - bool need_retry = IS_ENABLED(CONFIG_MODULES); struct linux_binfmt *fmt; int retval; @@ -1737,8 +1766,6 @@ static int search_binary_handler(struct linux_binprm *bprm) if (retval) return retval; - retval = -ENOENT; - retry: read_lock(&binfmt_lock); list_for_each_entry(fmt, &formats, lh) { if (!try_module_get(fmt->module)) @@ -1756,17 +1783,7 @@ static int search_binary_handler(struct linux_binprm *bprm) } read_unlock(&binfmt_lock); - if (need_retry) { - if (printable(bprm->buf[0]) && printable(bprm->buf[1]) && - printable(bprm->buf[2]) && printable(bprm->buf[3])) - return retval; - if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0) - return retval; - need_retry = false; - goto retry; - } - - return retval; + return -ENOEXEC; } /* binfmt handlers will call back into begin_new_exec() on success. */ @@ -1797,7 +1814,7 @@ static int exec_binprm(struct linux_binprm *bprm) bprm->file = bprm->interpreter; bprm->interpreter = NULL; - allow_write_access(exec); + exe_file_allow_write_access(exec); if (unlikely(bprm->have_execfd)) { if (bprm->executable) { fput(exec); @@ -1836,7 +1853,7 @@ static int bprm_execve(struct linux_binprm *bprm) /* Set the unchanging part of bprm->cred */ retval = security_bprm_creds_for_exec(bprm); - if (retval) + if (retval || bprm->is_check) goto out; retval = exec_binprm(bprm); @@ -1904,9 +1921,6 @@ static int do_execveat_common(int fd, struct filename *filename, } retval = count(argv, MAX_ARG_STRINGS); - if (retval == 0) - pr_warn_once("process '%s' launched '%s' with NULL argv: empty string added\n", - current->comm, bprm->filename); if (retval < 0) goto out_free; bprm->argc = retval; @@ -1944,6 +1958,9 @@ static int do_execveat_common(int fd, struct filename *filename, if (retval < 0) goto out_free; bprm->argc = 1; + + pr_warn_once("process '%s' launched '%s' with NULL argv: empty string added\n", + current->comm, bprm->filename); } retval = bprm_execve(bprm); @@ -2142,7 +2159,7 @@ static int proc_dointvec_minmax_coredump(const struct ctl_table *table, int writ return error; } -static struct ctl_table fs_exec_sysctls[] = { +static const struct ctl_table fs_exec_sysctls[] = { { .procname = "suid_dumpable", .data = &suid_dumpable, diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c index 099f80645072..691dd77b6ab5 100644 --- a/fs/exfat/namei.c +++ b/fs/exfat/namei.c @@ -31,10 +31,9 @@ static inline void exfat_d_version_set(struct dentry *dentry, * If it happened, the negative dentry isn't actually negative anymore. So, * drop it. */ -static int exfat_d_revalidate(struct dentry *dentry, unsigned int flags) +static int exfat_d_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { - int ret; - if (flags & LOOKUP_RCU) return -ECHILD; @@ -58,11 +57,7 @@ static int exfat_d_revalidate(struct dentry *dentry, unsigned int flags) if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) return 0; - spin_lock(&dentry->d_lock); - ret = inode_eq_iversion(d_inode(dentry->d_parent), - exfat_d_version(dentry)); - spin_unlock(&dentry->d_lock); - return ret; + return inode_eq_iversion(dir, exfat_d_version(dentry)); } /* returns the length of a struct qstr, ignoring trailing dots if necessary */ diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index e20d59221fc0..c9ca41d91a6c 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -31,8 +31,7 @@ config EXT4_FS select BUFFER_HEAD select JBD2 select CRC16 - select CRYPTO - select CRYPTO_CRC32C + select CRC32 select FS_IOMAP select FS_ENCRYPTION_ALGS if FS_ENCRYPTION help diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 74f2071189b2..4e7de7eaa374 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -33,7 +33,7 @@ #include <linux/blockgroup_lock.h> #include <linux/percpu_counter.h> #include <linux/ratelimit.h> -#include <crypto/hash.h> +#include <linux/crc32c.h> #include <linux/falloc.h> #include <linux/percpu-rwsem.h> #include <linux/fiemap.h> @@ -1662,9 +1662,6 @@ struct ext4_sb_info { /* record the last minlen when FITRIM is called. */ unsigned long s_last_trim_minblks; - /* Reference to checksum algorithm driver via cryptoapi */ - struct crypto_shash *s_chksum_driver; - /* Precomputed FS UUID checksum for seeding other checksums */ __u32 s_csum_seed; @@ -2463,19 +2460,7 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize) static inline u32 ext4_chksum(struct ext4_sb_info *sbi, u32 crc, const void *address, unsigned int length) { - struct { - struct shash_desc shash; - char ctx[4]; - } desc; - - BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver)!=sizeof(desc.ctx)); - - desc.shash.tfm = sbi->s_chksum_driver; - *(u32 *)desc.ctx = crc; - - BUG_ON(crypto_shash_update(&desc.shash, address, length)); - - return *(u32 *)desc.ctx; + return crc32c(crc, address, length); } #ifdef __KERNEL__ @@ -3276,11 +3261,7 @@ extern int ext4_register_li_request(struct super_block *sb, static inline int ext4_has_metadata_csum(struct super_block *sb) { - WARN_ON_ONCE(ext4_has_feature_metadata_csum(sb) && - !EXT4_SB(sb)->s_chksum_driver); - - return ext4_has_feature_metadata_csum(sb) && - (EXT4_SB(sb)->s_chksum_driver != NULL); + return ext4_has_feature_metadata_csum(sb); } static inline int ext4_has_group_desc_csum(struct super_block *sb) diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c index 26c4fc37edcf..da4263a14a20 100644 --- a/fs/ext4/fast_commit.c +++ b/fs/ext4/fast_commit.c @@ -322,9 +322,7 @@ restart: WARN_ON(!list_empty(&ei->i_fc_dilist)); spin_unlock(&sbi->s_fc_lock); - if (fc_dentry->fcd_name.name && - fc_dentry->fcd_name.len > DNAME_INLINE_LEN) - kfree(fc_dentry->fcd_name.name); + release_dentry_name_snapshot(&fc_dentry->fcd_name); kmem_cache_free(ext4_fc_dentry_cachep, fc_dentry); return; @@ -449,22 +447,7 @@ static int __track_dentry_update(handle_t *handle, struct inode *inode, node->fcd_op = dentry_update->op; node->fcd_parent = dir->i_ino; node->fcd_ino = inode->i_ino; - if (dentry->d_name.len > DNAME_INLINE_LEN) { - node->fcd_name.name = kmalloc(dentry->d_name.len, GFP_NOFS); - if (!node->fcd_name.name) { - kmem_cache_free(ext4_fc_dentry_cachep, node); - ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_NOMEM, handle); - mutex_lock(&ei->i_fc_lock); - return -ENOMEM; - } - memcpy((u8 *)node->fcd_name.name, dentry->d_name.name, - dentry->d_name.len); - } else { - memcpy(node->fcd_iname, dentry->d_name.name, - dentry->d_name.len); - node->fcd_name.name = node->fcd_iname; - } - node->fcd_name.len = dentry->d_name.len; + take_dentry_name_snapshot(&node->fcd_name, dentry); INIT_LIST_HEAD(&node->fcd_dilist); spin_lock(&sbi->s_fc_lock); if (sbi->s_journal->j_flags & JBD2_FULL_COMMIT_ONGOING || @@ -832,7 +815,7 @@ static bool ext4_fc_add_dentry_tlv(struct super_block *sb, u32 *crc, { struct ext4_fc_dentry_info fcd; struct ext4_fc_tl tl; - int dlen = fc_dentry->fcd_name.len; + int dlen = fc_dentry->fcd_name.name.len; u8 *dst = ext4_fc_reserve_space(sb, EXT4_FC_TAG_BASE_LEN + sizeof(fcd) + dlen, crc); @@ -847,7 +830,7 @@ static bool ext4_fc_add_dentry_tlv(struct super_block *sb, u32 *crc, dst += EXT4_FC_TAG_BASE_LEN; memcpy(dst, &fcd, sizeof(fcd)); dst += sizeof(fcd); - memcpy(dst, fc_dentry->fcd_name.name, dlen); + memcpy(dst, fc_dentry->fcd_name.name.name, dlen); return true; } @@ -1328,9 +1311,7 @@ static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid) list_del_init(&fc_dentry->fcd_dilist); spin_unlock(&sbi->s_fc_lock); - if (fc_dentry->fcd_name.name && - fc_dentry->fcd_name.len > DNAME_INLINE_LEN) - kfree(fc_dentry->fcd_name.name); + release_dentry_name_snapshot(&fc_dentry->fcd_name); kmem_cache_free(ext4_fc_dentry_cachep, fc_dentry); spin_lock(&sbi->s_fc_lock); } diff --git a/fs/ext4/fast_commit.h b/fs/ext4/fast_commit.h index 2fadb2c4780c..3bd534e4dbbf 100644 --- a/fs/ext4/fast_commit.h +++ b/fs/ext4/fast_commit.h @@ -109,8 +109,7 @@ struct ext4_fc_dentry_update { int fcd_op; /* Type of update create / unlink / link */ int fcd_parent; /* Parent inode number */ int fcd_ino; /* Inode number */ - struct qstr fcd_name; /* Dirent name */ - unsigned char fcd_iname[DNAME_INLINE_LEN]; /* Dirent name string */ + struct name_snapshot fcd_name; /* Dirent name */ struct list_head fcd_list; struct list_head fcd_dilist; }; diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 3bd96c3d4cd0..a5205149adba 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -756,6 +756,9 @@ retry: return VM_FAULT_SIGBUS; } } else { + result = filemap_fsnotify_fault(vmf); + if (unlikely(result)) + return result; filemap_invalidate_lock_shared(mapping); } result = dax_iomap_fault(vmf, order, &pfn, &error, &ext4_iomap_ops); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 89aade6f45f6..7c54ae5fcbd4 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -5006,10 +5006,11 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, if (IS_ENCRYPTED(inode)) { inode->i_op = &ext4_encrypted_symlink_inode_operations; } else if (ext4_inode_is_fast_symlink(inode)) { - inode->i_link = (char *)ei->i_data; inode->i_op = &ext4_fast_symlink_inode_operations; nd_terminate_link(ei->i_data, inode->i_size, sizeof(ei->i_data) - 1); + inode_set_cached_link(inode, (char *)ei->i_data, + inode->i_size); } else { inode->i_op = &ext4_symlink_inode_operations; } diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index bcf2737078b8..536d56d15072 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -3418,7 +3418,6 @@ retry: inode->i_op = &ext4_symlink_inode_operations; } else { inode->i_op = &ext4_fast_symlink_inode_operations; - inode->i_link = (char *)&EXT4_I(inode)->i_data; } } @@ -3434,6 +3433,9 @@ retry: disk_link.len); inode->i_size = disk_link.len - 1; EXT4_I(inode)->i_disksize = inode->i_size; + if (!IS_ENCRYPTED(inode)) + inode_set_cached_link(inode, (char *)&EXT4_I(inode)->i_data, + inode->i_size); } err = ext4_add_nondir(handle, dentry, &inode); if (handle) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 785809f33ff4..a50e5c31b937 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1380,8 +1380,6 @@ static void ext4_put_super(struct super_block *sb) */ kobject_put(&sbi->s_kobj); wait_for_completion(&sbi->s_kobj_unregister); - if (sbi->s_chksum_driver) - crypto_free_shash(sbi->s_chksum_driver); kfree(sbi->s_blockgroup_lock); fs_put_dax(sbi->s_daxdev, NULL); fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy); @@ -4634,15 +4632,6 @@ static int ext4_init_metadata_csum(struct super_block *sb, struct ext4_super_blo ext4_setup_csum_trigger(sb, EXT4_JTR_ORPHAN_FILE, ext4_orphan_file_block_trigger); - /* Load the checksum driver */ - sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); - if (IS_ERR(sbi->s_chksum_driver)) { - int ret = PTR_ERR(sbi->s_chksum_driver); - ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver."); - sbi->s_chksum_driver = NULL; - return ret; - } - /* Check superblock checksum */ if (!ext4_superblock_csum_verify(sb, es)) { ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with " @@ -5312,6 +5301,9 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) /* i_version is always enabled now */ sb->s_flags |= SB_I_VERSION; + /* HSM events are allowed by default. */ + sb->s_iflags |= SB_I_ALLOW_HSM; + err = ext4_check_feature_compatibility(sb, es, silent); if (err) goto failed_mount; @@ -5687,9 +5679,6 @@ failed_mount3: del_timer_sync(&sbi->s_err_report); ext4_group_desc_free(sbi); failed_mount: - if (sbi->s_chksum_driver) - crypto_free_shash(sbi->s_chksum_driver); - #if IS_ENABLED(CONFIG_UNICODE) utf8_unload(sb->s_encoding); #endif @@ -7494,6 +7483,5 @@ static void __exit ext4_exit_fs(void) MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); MODULE_DESCRIPTION("Fourth Extended Filesystem"); MODULE_LICENSE("GPL"); -MODULE_SOFTDEP("pre: crc32c"); module_init(ext4_init_fs) module_exit(ext4_exit_fs) diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig index 68a1e23e1557..5916a02fb46d 100644 --- a/fs/f2fs/Kconfig +++ b/fs/f2fs/Kconfig @@ -4,8 +4,7 @@ config F2FS_FS depends on BLOCK select BUFFER_HEAD select NLS - select CRYPTO - select CRYPTO_CRC32 + select CRC32 select F2FS_FS_XATTR if FS_ENCRYPTION select FS_ENCRYPTION_ALGS if FS_ENCRYPTION select FS_IOMAP diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index 7f26440e8595..985690d81a82 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -846,7 +846,7 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index) bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages, int index, int nr_pages, bool uptodate) { - unsigned long pgidx = pages[index]->index; + unsigned long pgidx = page_folio(pages[index])->index; int i = uptodate ? 0 : 1; /* @@ -860,9 +860,11 @@ bool f2fs_all_cluster_page_ready(struct compress_ctx *cc, struct page **pages, return false; for (; i < cc->cluster_size; i++) { - if (pages[index + i]->index != pgidx + i) + struct folio *folio = page_folio(pages[index + i]); + + if (folio->index != pgidx + i) return false; - if (uptodate && !PageUptodate(pages[index + i])) + if (uptodate && !folio_test_uptodate(folio)) return false; } @@ -1195,7 +1197,8 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata, .cluster_size = F2FS_I(inode)->i_cluster_size, .rpages = fsdata, }; - bool first_index = (index == cc.rpages[0]->index); + struct folio *folio = page_folio(cc.rpages[0]); + bool first_index = (index == folio->index); if (copied) set_cluster_dirty(&cc); @@ -1239,13 +1242,14 @@ int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock) int i; for (i = cluster_size - 1; i >= 0; i--) { - loff_t start = rpages[i]->index << PAGE_SHIFT; + struct folio *folio = page_folio(rpages[i]); + loff_t start = folio->index << PAGE_SHIFT; if (from <= start) { - zero_user_segment(rpages[i], 0, PAGE_SIZE); + folio_zero_segment(folio, 0, folio_size(folio)); } else { - zero_user_segment(rpages[i], from - start, - PAGE_SIZE); + folio_zero_segment(folio, from - start, + folio_size(folio)); break; } } @@ -1278,6 +1282,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode) ? 1 : 0, }; + struct folio *folio; struct dnode_of_data dn; struct node_info ni; struct compress_io_ctx *cic; @@ -1289,7 +1294,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, /* we should bypass data pages to proceed the kworker jobs */ if (unlikely(f2fs_cp_error(sbi))) { - mapping_set_error(cc->rpages[0]->mapping, -EIO); + mapping_set_error(inode->i_mapping, -EIO); goto out_free; } @@ -1316,7 +1321,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, goto out_put_dnode; } - psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT; + folio = page_folio(cc->rpages[last_index]); + psize = folio_pos(folio) + folio_size(folio); err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false); if (err) @@ -1339,7 +1345,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, for (i = 0; i < cc->valid_nr_cpages; i++) { f2fs_set_compressed_page(cc->cpages[i], inode, - cc->rpages[i + 1]->index, cic); + page_folio(cc->rpages[i + 1])->index, cic); fio.compressed_page = cc->cpages[i]; fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page, @@ -1374,7 +1380,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, if (blkaddr == COMPRESS_ADDR) fio.compr_blocks++; if (__is_valid_data_blkaddr(blkaddr)) - f2fs_invalidate_blocks(sbi, blkaddr); + f2fs_invalidate_blocks(sbi, blkaddr, 1); f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR); goto unlock_continue; } @@ -1384,7 +1390,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, if (i > cc->valid_nr_cpages) { if (__is_valid_data_blkaddr(blkaddr)) { - f2fs_invalidate_blocks(sbi, blkaddr); + f2fs_invalidate_blocks(sbi, blkaddr, 1); f2fs_update_data_blkaddr(&dn, NEW_ADDR); } goto unlock_continue; @@ -1545,6 +1551,7 @@ continue_unlock: if (!clear_page_dirty_for_io(cc->rpages[i])) goto continue_unlock; + submitted = 0; ret = f2fs_write_single_data_page(page_folio(cc->rpages[i]), &submitted, NULL, NULL, wbc, io_type, @@ -1903,11 +1910,12 @@ struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi) return sbi->compress_inode->i_mapping; } -void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr) +void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi, + block_t blkaddr, unsigned int len) { if (!sbi->compress_inode) return; - invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr); + invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr + len - 1); } void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index a2478c2afb3a..de4da6d9cd93 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -70,9 +70,9 @@ bool f2fs_is_cp_guaranteed(struct page *page) return false; } -static enum count_type __read_io_type(struct page *page) +static enum count_type __read_io_type(struct folio *folio) { - struct address_space *mapping = page_file_mapping(page); + struct address_space *mapping = folio->mapping; if (mapping) { struct inode *inode = mapping->host; @@ -136,27 +136,22 @@ struct bio_post_read_ctx { */ static void f2fs_finish_read_bio(struct bio *bio, bool in_task) { - struct bio_vec *bv; - struct bvec_iter_all iter_all; + struct folio_iter fi; struct bio_post_read_ctx *ctx = bio->bi_private; - bio_for_each_segment_all(bv, bio, iter_all) { - struct page *page = bv->bv_page; + bio_for_each_folio_all(fi, bio) { + struct folio *folio = fi.folio; - if (f2fs_is_compressed_page(page)) { + if (f2fs_is_compressed_page(&folio->page)) { if (ctx && !ctx->decompression_attempted) - f2fs_end_read_compressed_page(page, true, 0, + f2fs_end_read_compressed_page(&folio->page, true, 0, in_task); - f2fs_put_page_dic(page, in_task); + f2fs_put_page_dic(&folio->page, in_task); continue; } - if (bio->bi_status) - ClearPageUptodate(page); - else - SetPageUptodate(page); - dec_page_count(F2FS_P_SB(page), __read_io_type(page)); - unlock_page(page); + dec_page_count(F2FS_F_SB(folio), __read_io_type(folio)); + folio_end_read(folio, bio->bi_status == 0); } if (ctx) @@ -516,10 +511,6 @@ static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio, enum page_type type) { WARN_ON_ONCE(is_read_io(bio_op(bio))); - - if (f2fs_lfs_mode(sbi) && current->plug && PAGE_TYPE_ON_MAIN(type)) - blk_finish_plug(current->plug); - trace_f2fs_submit_write_bio(sbi->sb, type, bio); iostat_update_submit_ctx(bio, type); submit_bio(bio); @@ -689,33 +680,29 @@ void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi) int f2fs_submit_page_bio(struct f2fs_io_info *fio) { struct bio *bio; - struct page *page = fio->encrypted_page ? - fio->encrypted_page : fio->page; + struct folio *fio_folio = page_folio(fio->page); + struct folio *data_folio = fio->encrypted_page ? + page_folio(fio->encrypted_page) : fio_folio; if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, fio->is_por ? META_POR : (__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC_ENHANCE))) return -EFSCORRUPTED; - trace_f2fs_submit_page_bio(page, fio); + trace_f2fs_submit_folio_bio(data_folio, fio); /* Allocate a new bio */ bio = __bio_alloc(fio, 1); - f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host, - page_folio(fio->page)->index, fio, GFP_NOIO); - - if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { - bio_put(bio); - return -EFAULT; - } + f2fs_set_bio_crypt_ctx(bio, fio_folio->mapping->host, + fio_folio->index, fio, GFP_NOIO); + bio_add_folio_nofail(bio, data_folio, folio_size(data_folio), 0); if (fio->io_wbc && !is_read_io(fio->op)) - wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page), - PAGE_SIZE); + wbc_account_cgroup_owner(fio->io_wbc, fio_folio, PAGE_SIZE); inc_page_count(fio->sbi, is_read_io(fio->op) ? - __read_io_type(page) : WB_DATA_TYPE(fio->page, false)); + __read_io_type(data_folio) : WB_DATA_TYPE(fio->page, false)); if (is_read_io(bio_op(bio))) f2fs_submit_read_bio(fio->sbi, bio, fio->type); @@ -894,7 +881,7 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio) __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC)) return -EFSCORRUPTED; - trace_f2fs_submit_page_bio(page, fio); + trace_f2fs_submit_folio_bio(page_folio(page), fio); if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block, fio->new_blkaddr)) @@ -1018,7 +1005,7 @@ alloc_new: io->last_block_in_bio = fio->new_blkaddr; - trace_f2fs_submit_page_write(fio->page, fio); + trace_f2fs_submit_folio_write(page_folio(fio->page), fio); #ifdef CONFIG_BLK_DEV_ZONED if (f2fs_sb_has_blkzoned(sbi) && btype < META && is_end_zone_blkaddr(sbi, fio->new_blkaddr)) { @@ -1289,7 +1276,7 @@ struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index, struct address_space *mapping = inode->i_mapping; struct page *page; - page = find_get_page(mapping, index); + page = find_get_page_flags(mapping, index, FGP_ACCESSED); if (page && PageUptodate(page)) return page; f2fs_put_page(page, 0); @@ -1423,7 +1410,7 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type) return err; if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) - f2fs_invalidate_internal_cache(sbi, old_blkaddr); + f2fs_invalidate_internal_cache(sbi, old_blkaddr, 1); f2fs_update_data_blkaddr(dn, dn->data_blkaddr); return 0; @@ -2464,7 +2451,7 @@ next_page: static int f2fs_read_data_folio(struct file *file, struct folio *folio) { - struct inode *inode = folio_file_mapping(folio)->host; + struct inode *inode = folio->mapping->host; int ret = -EAGAIN; trace_f2fs_readpage(folio, DATA); @@ -3163,6 +3150,7 @@ continue_unlock: continue; } #endif + submitted = 0; ret = f2fs_write_single_data_page(folio, &submitted, &bio, &last_block, wbc, io_type, 0, true); @@ -4043,7 +4031,6 @@ retry: cur_lblock = 1; /* force Empty message */ sis->max = cur_lblock; sis->pages = cur_lblock - 1; - sis->highest_bit = cur_lblock - 1; out: if (not_aligned) f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%lu * N)", diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 47a5c806cf16..54dd52de7269 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -175,7 +175,8 @@ static unsigned long dir_block_index(unsigned int level, static struct f2fs_dir_entry *find_in_block(struct inode *dir, struct page *dentry_page, const struct f2fs_filename *fname, - int *max_slots) + int *max_slots, + bool use_hash) { struct f2fs_dentry_block *dentry_blk; struct f2fs_dentry_ptr d; @@ -183,7 +184,7 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir, dentry_blk = (struct f2fs_dentry_block *)page_address(dentry_page); make_dentry_ptr_block(dir, &d, dentry_blk); - return f2fs_find_target_dentry(&d, fname, max_slots); + return f2fs_find_target_dentry(&d, fname, max_slots, use_hash); } static inline int f2fs_match_name(const struct inode *dir, @@ -208,7 +209,8 @@ static inline int f2fs_match_name(const struct inode *dir, } struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, - const struct f2fs_filename *fname, int *max_slots) + const struct f2fs_filename *fname, int *max_slots, + bool use_hash) { struct f2fs_dir_entry *de; unsigned long bit_pos = 0; @@ -231,7 +233,7 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, continue; } - if (de->hash_code == fname->hash) { + if (!use_hash || de->hash_code == fname->hash) { res = f2fs_match_name(d->inode, fname, d->filename[bit_pos], le16_to_cpu(de->name_len)); @@ -258,11 +260,12 @@ found: static struct f2fs_dir_entry *find_in_level(struct inode *dir, unsigned int level, const struct f2fs_filename *fname, - struct page **res_page) + struct page **res_page, + bool use_hash) { int s = GET_DENTRY_SLOTS(fname->disk_name.len); unsigned int nbucket, nblock; - unsigned int bidx, end_block; + unsigned int bidx, end_block, bucket_no; struct page *dentry_page; struct f2fs_dir_entry *de = NULL; pgoff_t next_pgofs; @@ -272,8 +275,11 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level); nblock = bucket_blocks(level); + bucket_no = use_hash ? le32_to_cpu(fname->hash) % nbucket : 0; + +start_find_bucket: bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level, - le32_to_cpu(fname->hash) % nbucket); + bucket_no); end_block = bidx + nblock; while (bidx < end_block) { @@ -290,7 +296,7 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, } } - de = find_in_block(dir, dentry_page, fname, &max_slots); + de = find_in_block(dir, dentry_page, fname, &max_slots, use_hash); if (IS_ERR(de)) { *res_page = ERR_CAST(de); de = NULL; @@ -307,12 +313,18 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, bidx++; } - if (!de && room && F2FS_I(dir)->chash != fname->hash) { - F2FS_I(dir)->chash = fname->hash; - F2FS_I(dir)->clevel = level; - } + if (de) + return de; - return de; + if (likely(use_hash)) { + if (room && F2FS_I(dir)->chash != fname->hash) { + F2FS_I(dir)->chash = fname->hash; + F2FS_I(dir)->clevel = level; + } + } else if (++bucket_no < nbucket) { + goto start_find_bucket; + } + return NULL; } struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, @@ -323,11 +335,15 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, struct f2fs_dir_entry *de = NULL; unsigned int max_depth; unsigned int level; + bool use_hash = true; *res_page = NULL; +#if IS_ENABLED(CONFIG_UNICODE) +start_find_entry: +#endif if (f2fs_has_inline_dentry(dir)) { - de = f2fs_find_in_inline_dir(dir, fname, res_page); + de = f2fs_find_in_inline_dir(dir, fname, res_page, use_hash); goto out; } @@ -343,11 +359,18 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, } for (level = 0; level < max_depth; level++) { - de = find_in_level(dir, level, fname, res_page); + de = find_in_level(dir, level, fname, res_page, use_hash); if (de || IS_ERR(*res_page)) break; } + out: +#if IS_ENABLED(CONFIG_UNICODE) + if (IS_CASEFOLDED(dir) && !de && use_hash) { + use_hash = false; + goto start_find_entry; + } +#endif /* This is to increase the speed of f2fs_create */ if (!de) F2FS_I(dir)->task = current; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 6f2cbf4c5740..1afa7be16e7d 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -24,7 +24,6 @@ #include <linux/quotaops.h> #include <linux/part_stat.h> #include <linux/rw_hint.h> -#include <crypto/hash.h> #include <linux/fscrypt.h> #include <linux/fsverity.h> @@ -1768,9 +1767,6 @@ struct f2fs_sb_info { u64 sectors_written_start; u64 kbytes_written; - /* Reference to checksum algorithm driver via cryptoapi */ - struct crypto_shash *s_chksum_driver; - /* Precomputed FS UUID checksum for seeding other checksums */ __u32 s_chksum_seed; @@ -1948,21 +1944,7 @@ static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi, static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc, const void *address, unsigned int length) { - struct { - struct shash_desc shash; - char ctx[4]; - } desc; - int err; - - BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx)); - - desc.shash.tfm = sbi->s_chksum_driver; - *(u32 *)desc.ctx = crc; - - err = crypto_shash_update(&desc.shash, address, length); - BUG_ON(err); - - return *(u32 *)desc.ctx; + return crc32(crc, address, length); } static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address, @@ -2003,9 +1985,14 @@ static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping) return F2FS_I_SB(mapping->host); } +static inline struct f2fs_sb_info *F2FS_F_SB(struct folio *folio) +{ + return F2FS_M_SB(folio->mapping); +} + static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page) { - return F2FS_M_SB(page_file_mapping(page)); + return F2FS_F_SB(page_folio(page)); } static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) @@ -3583,7 +3570,8 @@ int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry, struct f2fs_filename *fname); void f2fs_free_filename(struct f2fs_filename *fname); struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, - const struct f2fs_filename *fname, int *max_slots); + const struct f2fs_filename *fname, int *max_slots, + bool use_hash); int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, unsigned int start_pos, struct fscrypt_str *fstr); void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, @@ -3718,7 +3706,8 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino); int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi); int f2fs_flush_device_cache(struct f2fs_sb_info *sbi); void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free); -void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); +void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr, + unsigned int len); bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); int f2fs_start_discard_thread(struct f2fs_sb_info *sbi); void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi); @@ -4219,7 +4208,8 @@ int f2fs_write_inline_data(struct inode *inode, struct folio *folio); int f2fs_recover_inline_data(struct inode *inode, struct page *npage); struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, const struct f2fs_filename *fname, - struct page **res_page); + struct page **res_page, + bool use_hash); int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, struct page *ipage); int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, @@ -4386,7 +4376,8 @@ void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi); int __init f2fs_init_compress_cache(void); void f2fs_destroy_compress_cache(void); struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi); -void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr); +void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi, + block_t blkaddr, unsigned int len); void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, nid_t ino, block_t blkaddr); bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page, @@ -4441,8 +4432,8 @@ static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { } static inline int __init f2fs_init_compress_cache(void) { return 0; } static inline void f2fs_destroy_compress_cache(void) { } -static inline void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, - block_t blkaddr) { } +static inline void f2fs_invalidate_compress_pages_range(struct f2fs_sb_info *sbi, + block_t blkaddr, unsigned int len) { } static inline void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page, nid_t ino, block_t blkaddr) { } static inline bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, @@ -4758,10 +4749,10 @@ static inline void f2fs_truncate_meta_inode_pages(struct f2fs_sb_info *sbi, } static inline void f2fs_invalidate_internal_cache(struct f2fs_sb_info *sbi, - block_t blkaddr) + block_t blkaddr, unsigned int len) { - f2fs_truncate_meta_inode_pages(sbi, blkaddr, 1); - f2fs_invalidate_compress_page(sbi, blkaddr); + f2fs_truncate_meta_inode_pages(sbi, blkaddr, len); + f2fs_invalidate_compress_pages_range(sbi, blkaddr, len); } #define EFSBADCRC EBADMSG /* Bad CRC detected */ diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index aa9679b3d8e4..f92a9fba9991 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -621,8 +621,11 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count) int cluster_index = 0, valid_blocks = 0; int cluster_size = F2FS_I(dn->inode)->i_cluster_size; bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks); + block_t blkstart; + int blklen = 0; addr = get_dnode_addr(dn->inode, dn->node_page) + ofs; + blkstart = le32_to_cpu(*addr); /* Assumption: truncation starts with cluster */ for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) { @@ -638,26 +641,44 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count) } if (blkaddr == NULL_ADDR) - continue; + goto next; f2fs_set_data_blkaddr(dn, NULL_ADDR); if (__is_valid_data_blkaddr(blkaddr)) { if (time_to_inject(sbi, FAULT_BLKADDR_CONSISTENCE)) - continue; + goto next; if (!f2fs_is_valid_blkaddr_raw(sbi, blkaddr, DATA_GENERIC_ENHANCE)) - continue; + goto next; if (compressed_cluster) valid_blocks++; } - f2fs_invalidate_blocks(sbi, blkaddr); + if (blkstart + blklen == blkaddr) { + blklen++; + } else { + f2fs_invalidate_blocks(sbi, blkstart, blklen); + blkstart = blkaddr; + blklen = 1; + } if (!released || blkaddr != COMPRESS_ADDR) nr_free++; + + continue; + +next: + if (blklen) + f2fs_invalidate_blocks(sbi, blkstart, blklen); + + blkstart = le32_to_cpu(*(addr + 1)); + blklen = 0; } + if (blklen) + f2fs_invalidate_blocks(sbi, blkstart, blklen); + if (compressed_cluster) f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false); @@ -747,10 +768,8 @@ int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock) if (IS_DEVICE_ALIASING(inode)) { struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ]; struct extent_info ei = et->largest; - unsigned int i; - for (i = 0; i < ei.len; i++) - f2fs_invalidate_blocks(sbi, ei.blk + i); + f2fs_invalidate_blocks(sbi, ei.blk, ei.len); dec_valid_block_count(sbi, inode, ei.len); f2fs_update_time(sbi, REQ_TIME); @@ -1323,7 +1342,7 @@ static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr, ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA); if (ret) { dec_valid_block_count(sbi, inode, 1); - f2fs_invalidate_blocks(sbi, *blkaddr); + f2fs_invalidate_blocks(sbi, *blkaddr, 1); } else { f2fs_update_data_blkaddr(&dn, *blkaddr); } @@ -1575,7 +1594,7 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start, break; } - f2fs_invalidate_blocks(sbi, dn->data_blkaddr); + f2fs_invalidate_blocks(sbi, dn->data_blkaddr, 1); f2fs_set_data_blkaddr(dn, NEW_ADDR); } diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 3e1b6d2ff3a7..faf9fa1c804d 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -806,11 +806,14 @@ retry: goto out; } - if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) + if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) { ret = -EBUSY; - else - p.min_segno = *result; - goto out; + goto out; + } + if (gc_type == FG_GC) + clear_bit(GET_SEC_FROM_SEG(sbi, *result), dirty_i->victim_secmap); + p.min_segno = *result; + goto got_result; } ret = -ENODATA; @@ -1412,7 +1415,7 @@ static int move_data_block(struct inode *inode, block_t bidx, page_address(mpage), PAGE_SIZE); f2fs_put_page(mpage, 1); - f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr); + f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr, 1); set_page_dirty(fio.encrypted_page); if (clear_page_dirty_for_io(fio.encrypted_page)) diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 005babf1bed1..3e3c35d4c98b 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -81,7 +81,7 @@ bool f2fs_may_inline_dentry(struct inode *inode) void f2fs_do_read_inline_data(struct folio *folio, struct page *ipage) { - struct inode *inode = folio_file_mapping(folio)->host; + struct inode *inode = folio->mapping->host; if (folio_test_uptodate(folio)) return; @@ -352,7 +352,8 @@ process_inline: struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, const struct f2fs_filename *fname, - struct page **res_page) + struct page **res_page, + bool use_hash) { struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); struct f2fs_dir_entry *de; @@ -369,7 +370,7 @@ struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, inline_dentry = inline_data_addr(dir, ipage); make_dentry_ptr_inline(dir, &d, inline_dentry); - de = f2fs_find_target_dentry(&d, fname, NULL); + de = f2fs_find_target_dentry(&d, fname, NULL, use_hash); unlock_page(ipage); if (IS_ERR(de)) { *res_page = ERR_CAST(de); diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 282fd320bdb3..3dd25f64d6f1 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -302,15 +302,6 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page) F2FS_TOTAL_EXTRA_ATTR_SIZE); return false; } - if (f2fs_sb_has_flexible_inline_xattr(sbi) && - f2fs_has_inline_xattr(inode) && - (!fi->i_inline_xattr_size || - fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) { - f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %lu", - __func__, inode->i_ino, fi->i_inline_xattr_size, - MAX_INLINE_XATTR_SIZE); - return false; - } if (f2fs_sb_has_compression(sbi) && fi->i_flags & F2FS_COMPR_FL && F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, @@ -320,6 +311,16 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page) } } + if (f2fs_sb_has_flexible_inline_xattr(sbi) && + f2fs_has_inline_xattr(inode) && + (fi->i_inline_xattr_size < MIN_INLINE_XATTR_SIZE || + fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) { + f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, min: %zu, max: %lu", + __func__, inode->i_ino, fi->i_inline_xattr_size, + MIN_INLINE_XATTR_SIZE, MAX_INLINE_XATTR_SIZE); + return false; + } + if (!f2fs_sb_has_extra_attr(sbi)) { if (f2fs_sb_has_project_quota(sbi)) { f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.", diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 57d46e1439de..a278c7da8177 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -341,6 +341,7 @@ fail_drop: trace_f2fs_new_inode(inode, err); dquot_drop(inode); inode->i_flags |= S_NOQUOTA; + make_bad_inode(inode); if (nid_free) set_inode_flag(inode, FI_FREE_NID); clear_nlink(inode); diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 0b900a7a48e5..f88392fc4ba9 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -558,6 +558,7 @@ int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, block_t blkaddr; int i; + ni->flag = 0; ni->nid = nid; retry: /* Check nat cache */ @@ -916,7 +917,7 @@ static int truncate_node(struct dnode_of_data *dn) } /* Deallocate node address */ - f2fs_invalidate_blocks(sbi, ni.blk_addr); + f2fs_invalidate_blocks(sbi, ni.blk_addr, 1); dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino); set_node_addr(sbi, &ni, NULL_ADDR, false); @@ -1274,8 +1275,9 @@ int f2fs_remove_inode_page(struct inode *inode) } /* remove potential inline_data blocks */ - if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || - S_ISLNK(inode->i_mode)) + if (!IS_DEVICE_ALIASING(inode) && + (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || + S_ISLNK(inode->i_mode))) f2fs_truncate_data_blocks_range(&dn, 1); /* 0 is possible, after f2fs_new_inode() has failed */ @@ -2763,7 +2765,7 @@ int f2fs_recover_xattr_data(struct inode *inode, struct page *page) if (err) return err; - f2fs_invalidate_blocks(sbi, ni.blk_addr); + f2fs_invalidate_blocks(sbi, ni.blk_addr, 1); dec_valid_node_count(sbi, inode, false); set_node_addr(sbi, &ni, NULL_ADDR, false); diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index f35be2c48e3c..69a2027e3ebc 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -899,10 +899,8 @@ skip: * and the f2fs is not read only, check and fix zoned block devices' * write pointer consistency. */ - if (!err) { + if (!err) err = f2fs_check_and_fix_write_pointer(sbi); - ret = err; - } if (!err) clear_sbi_flag(sbi, SBI_POR_DOING); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index eade36c5ef13..c282e8a0a2ec 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -201,6 +201,12 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean) clear_inode_flag(inode, FI_ATOMIC_FILE); if (is_inode_flag_set(inode, FI_ATOMIC_DIRTIED)) { clear_inode_flag(inode, FI_ATOMIC_DIRTIED); + /* + * The vfs inode keeps clean during commit, but the f2fs inode + * doesn't. So clear the dirty state after commit and let + * f2fs_mark_inode_dirty_sync ensure a consistent dirty state. + */ + f2fs_inode_synced(inode); f2fs_mark_inode_dirty_sync(inode, true); } stat_dec_atomic_inode(inode); @@ -245,7 +251,7 @@ retry: if (!__is_valid_data_blkaddr(new_addr)) { if (new_addr == NULL_ADDR) dec_valid_block_count(sbi, inode, 1); - f2fs_invalidate_blocks(sbi, dn.data_blkaddr); + f2fs_invalidate_blocks(sbi, dn.data_blkaddr, 1); f2fs_update_data_blkaddr(&dn, new_addr); } else { f2fs_replace_block(sbi, &dn, dn.data_blkaddr, @@ -2426,78 +2432,38 @@ static void update_segment_mtime(struct f2fs_sb_info *sbi, block_t blkaddr, SIT_I(sbi)->max_mtime = ctime; } -static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) +/* + * NOTE: when updating multiple blocks at the same time, please ensure + * that the consecutive input blocks belong to the same segment. + */ +static int update_sit_entry_for_release(struct f2fs_sb_info *sbi, struct seg_entry *se, + block_t blkaddr, unsigned int offset, int del) { - struct seg_entry *se; - unsigned int segno, offset; - long int new_vblocks; bool exist; #ifdef CONFIG_F2FS_CHECK_FS bool mir_exist; #endif + int i; + int del_count = -del; - segno = GET_SEGNO(sbi, blkaddr); - if (segno == NULL_SEGNO) - return; - - se = get_seg_entry(sbi, segno); - new_vblocks = se->valid_blocks + del; - offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); - - f2fs_bug_on(sbi, (new_vblocks < 0 || - (new_vblocks > f2fs_usable_blks_in_seg(sbi, segno)))); - - se->valid_blocks = new_vblocks; - - /* Update valid block bitmap */ - if (del > 0) { - exist = f2fs_test_and_set_bit(offset, se->cur_valid_map); -#ifdef CONFIG_F2FS_CHECK_FS - mir_exist = f2fs_test_and_set_bit(offset, - se->cur_valid_map_mir); - if (unlikely(exist != mir_exist)) { - f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d", - blkaddr, exist); - f2fs_bug_on(sbi, 1); - } -#endif - if (unlikely(exist)) { - f2fs_err(sbi, "Bitmap was wrongly set, blk:%u", - blkaddr); - f2fs_bug_on(sbi, 1); - se->valid_blocks--; - del = 0; - } - - if (f2fs_block_unit_discard(sbi) && - !f2fs_test_and_set_bit(offset, se->discard_map)) - sbi->discard_blks--; + f2fs_bug_on(sbi, GET_SEGNO(sbi, blkaddr) != GET_SEGNO(sbi, blkaddr + del_count - 1)); - /* - * SSR should never reuse block which is checkpointed - * or newly invalidated. - */ - if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { - if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) - se->ckpt_valid_blocks++; - } - } else { - exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map); + for (i = 0; i < del_count; i++) { + exist = f2fs_test_and_clear_bit(offset + i, se->cur_valid_map); #ifdef CONFIG_F2FS_CHECK_FS - mir_exist = f2fs_test_and_clear_bit(offset, + mir_exist = f2fs_test_and_clear_bit(offset + i, se->cur_valid_map_mir); if (unlikely(exist != mir_exist)) { f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d", - blkaddr, exist); + blkaddr + i, exist); f2fs_bug_on(sbi, 1); } #endif if (unlikely(!exist)) { - f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u", - blkaddr); + f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u", blkaddr + i); f2fs_bug_on(sbi, 1); se->valid_blocks++; - del = 0; + del += 1; } else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { /* * If checkpoints are off, we must not reuse data that @@ -2505,7 +2471,7 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) * before, we must track that to know how much space we * really have. */ - if (f2fs_test_bit(offset, se->ckpt_valid_map)) { + if (f2fs_test_bit(offset + i, se->ckpt_valid_map)) { spin_lock(&sbi->stat_lock); sbi->unusable_block_count++; spin_unlock(&sbi->stat_lock); @@ -2513,12 +2479,91 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) } if (f2fs_block_unit_discard(sbi) && - f2fs_test_and_clear_bit(offset, se->discard_map)) + f2fs_test_and_clear_bit(offset + i, se->discard_map)) sbi->discard_blks++; + + if (!f2fs_test_bit(offset + i, se->ckpt_valid_map)) + se->ckpt_valid_blocks -= 1; + } + + return del; +} + +static int update_sit_entry_for_alloc(struct f2fs_sb_info *sbi, struct seg_entry *se, + block_t blkaddr, unsigned int offset, int del) +{ + bool exist; +#ifdef CONFIG_F2FS_CHECK_FS + bool mir_exist; +#endif + + exist = f2fs_test_and_set_bit(offset, se->cur_valid_map); +#ifdef CONFIG_F2FS_CHECK_FS + mir_exist = f2fs_test_and_set_bit(offset, + se->cur_valid_map_mir); + if (unlikely(exist != mir_exist)) { + f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d", + blkaddr, exist); + f2fs_bug_on(sbi, 1); } +#endif + if (unlikely(exist)) { + f2fs_err(sbi, "Bitmap was wrongly set, blk:%u", blkaddr); + f2fs_bug_on(sbi, 1); + se->valid_blocks--; + del = 0; + } + + if (f2fs_block_unit_discard(sbi) && + !f2fs_test_and_set_bit(offset, se->discard_map)) + sbi->discard_blks--; + + /* + * SSR should never reuse block which is checkpointed + * or newly invalidated. + */ + if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { + if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) + se->ckpt_valid_blocks++; + } + if (!f2fs_test_bit(offset, se->ckpt_valid_map)) se->ckpt_valid_blocks += del; + return del; +} + +/* + * If releasing blocks, this function supports updating multiple consecutive blocks + * at one time, but please note that these consecutive blocks need to belong to the + * same segment. + */ +static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) +{ + struct seg_entry *se; + unsigned int segno, offset; + long int new_vblocks; + + segno = GET_SEGNO(sbi, blkaddr); + if (segno == NULL_SEGNO) + return; + + se = get_seg_entry(sbi, segno); + new_vblocks = se->valid_blocks + del; + offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); + + f2fs_bug_on(sbi, (new_vblocks < 0 || + (new_vblocks > f2fs_usable_blks_in_seg(sbi, segno)))); + + se->valid_blocks = new_vblocks; + + /* Update valid block bitmap */ + if (del > 0) { + del = update_sit_entry_for_alloc(sbi, se, blkaddr, offset, del); + } else { + del = update_sit_entry_for_release(sbi, se, blkaddr, offset, del); + } + __mark_sit_entry_dirty(sbi, segno); /* update total number of valid blocks to be written in ckpt area */ @@ -2528,25 +2573,43 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) get_sec_entry(sbi, segno)->valid_blocks += del; } -void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) +void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr, + unsigned int len) { unsigned int segno = GET_SEGNO(sbi, addr); struct sit_info *sit_i = SIT_I(sbi); + block_t addr_start = addr, addr_end = addr + len - 1; + unsigned int seg_num = GET_SEGNO(sbi, addr_end) - segno + 1; + unsigned int i = 1, max_blocks = sbi->blocks_per_seg, cnt; f2fs_bug_on(sbi, addr == NULL_ADDR); if (addr == NEW_ADDR || addr == COMPRESS_ADDR) return; - f2fs_invalidate_internal_cache(sbi, addr); + f2fs_invalidate_internal_cache(sbi, addr, len); /* add it into sit main buffer */ down_write(&sit_i->sentry_lock); - update_segment_mtime(sbi, addr, 0); - update_sit_entry(sbi, addr, -1); + if (seg_num == 1) + cnt = len; + else + cnt = max_blocks - GET_BLKOFF_FROM_SEG0(sbi, addr); + + do { + update_segment_mtime(sbi, addr_start, 0); + update_sit_entry(sbi, addr_start, -cnt); - /* add it into dirty seglist */ - locate_dirty_segment(sbi, segno); + /* add it into dirty seglist */ + locate_dirty_segment(sbi, segno); + + /* update @addr_start and @cnt and @segno */ + addr_start = START_BLOCK(sbi, ++segno); + if (++i == seg_num) + cnt = GET_BLKOFF_FROM_SEG0(sbi, addr_end) + 1; + else + cnt = max_blocks; + } while (i <= seg_num); up_write(&sit_i->sentry_lock); } @@ -3857,7 +3920,7 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) goto out; } if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) - f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr); + f2fs_invalidate_internal_cache(fio->sbi, fio->old_blkaddr, 1); /* writeout dirty page into bdev */ f2fs_submit_page_write(fio); @@ -4049,7 +4112,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, update_sit_entry(sbi, new_blkaddr, 1); } if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) { - f2fs_invalidate_internal_cache(sbi, old_blkaddr); + f2fs_invalidate_internal_cache(sbi, old_blkaddr, 1); if (!from_gc) update_segment_mtime(sbi, old_blkaddr, 0); update_sit_entry(sbi, old_blkaddr, -1); @@ -5405,7 +5468,8 @@ int f2fs_check_and_fix_write_pointer(struct f2fs_sb_info *sbi) { int ret; - if (!f2fs_sb_has_blkzoned(sbi) || f2fs_readonly(sbi->sb)) + if (!f2fs_sb_has_blkzoned(sbi) || f2fs_readonly(sbi->sb) || + f2fs_hw_is_readonly(sbi)) return 0; f2fs_notice(sbi, "Checking entire write pointers"); @@ -5492,8 +5556,10 @@ unsigned long long f2fs_get_section_mtime(struct f2fs_sb_info *sbi, secno = GET_SEC_FROM_SEG(sbi, segno); start = GET_SEG_FROM_SEC(sbi, secno); - if (!__is_large_section(sbi)) - return get_seg_entry(sbi, start + i)->mtime; + if (!__is_large_section(sbi)) { + mtime = get_seg_entry(sbi, start + i)->mtime; + goto out; + } for (i = 0; i < usable_segs_per_sec; i++) { /* for large section, only check the mtime of valid segments */ @@ -5506,7 +5572,11 @@ unsigned long long f2fs_get_section_mtime(struct f2fs_sb_info *sbi, if (total_valid_blocks == 0) return INVALID_MTIME; - return div_u64(mtime, total_valid_blocks); + mtime = div_u64(mtime, total_valid_blocks); +out: + if (unlikely(mtime == INVALID_MTIME)) + mtime -= 1; + return mtime; } /* diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index fc7d463dee15..19b67828ae32 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1694,8 +1694,6 @@ static void f2fs_put_super(struct super_block *sb) kvfree(sbi->ckpt); - if (sbi->s_chksum_driver) - crypto_free_shash(sbi->s_chksum_driver); kfree(sbi->raw_super); f2fs_destroy_page_array_cache(sbi); @@ -4466,15 +4464,6 @@ try_onemore: } mutex_init(&sbi->flush_lock); - /* Load the checksum driver */ - sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0); - if (IS_ERR(sbi->s_chksum_driver)) { - f2fs_err(sbi, "Cannot load crc32 driver."); - err = PTR_ERR(sbi->s_chksum_driver); - sbi->s_chksum_driver = NULL; - goto free_sbi; - } - /* set a block size */ if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) { f2fs_err(sbi, "unable to set blocksize"); @@ -4919,8 +4908,6 @@ free_options: free_sb_buf: kfree(raw_super); free_sbi: - if (sbi->s_chksum_driver) - crypto_free_shash(sbi->s_chksum_driver); kfree(sbi); sb->s_fs_info = NULL; @@ -5127,5 +5114,3 @@ module_exit(exit_f2fs_fs) MODULE_AUTHOR("Samsung Electronics's Praesto Team"); MODULE_DESCRIPTION("Flash Friendly File System"); MODULE_LICENSE("GPL"); -MODULE_SOFTDEP("pre: crc32"); - diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index 6b99dc49f776..d15c68b28952 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -1472,7 +1472,7 @@ static int __maybe_unused segment_bits_seq_show(struct seq_file *seq, le32_to_cpu(sbi->raw_super->segment_count_main); int i, j; - seq_puts(seq, "format: segment_type|valid_blocks|bitmaps\n" + seq_puts(seq, "format: segment_type|valid_blocks|bitmaps|mtime\n" "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n"); for (i = 0; i < total_segs; i++) { @@ -1482,6 +1482,7 @@ static int __maybe_unused segment_bits_seq_show(struct seq_file *seq, seq_printf(seq, "%d|%-3u|", se->type, se->valid_blocks); for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++) seq_printf(seq, " %.2x", se->cur_valid_map[j]); + seq_printf(seq, "| %llx", se->mtime); seq_putc(seq, '\n'); } return 0; diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index 15bf32c21ac0..926c26e90ef8 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c @@ -43,17 +43,13 @@ static inline void vfat_d_version_set(struct dentry *dentry, * If it happened, the negative dentry isn't actually negative * anymore. So, drop it. */ -static int vfat_revalidate_shortname(struct dentry *dentry) +static bool vfat_revalidate_shortname(struct dentry *dentry, struct inode *dir) { - int ret = 1; - spin_lock(&dentry->d_lock); - if (!inode_eq_iversion(d_inode(dentry->d_parent), vfat_d_version(dentry))) - ret = 0; - spin_unlock(&dentry->d_lock); - return ret; + return inode_eq_iversion(dir, vfat_d_version(dentry)); } -static int vfat_revalidate(struct dentry *dentry, unsigned int flags) +static int vfat_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { if (flags & LOOKUP_RCU) return -ECHILD; @@ -61,10 +57,11 @@ static int vfat_revalidate(struct dentry *dentry, unsigned int flags) /* This is not negative dentry. Always valid. */ if (d_really_is_positive(dentry)) return 1; - return vfat_revalidate_shortname(dentry); + return vfat_revalidate_shortname(dentry, dir); } -static int vfat_revalidate_ci(struct dentry *dentry, unsigned int flags) +static int vfat_revalidate_ci(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { if (flags & LOOKUP_RCU) return -ECHILD; @@ -97,7 +94,7 @@ static int vfat_revalidate_ci(struct dentry *dentry, unsigned int flags) if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET)) return 0; - return vfat_revalidate_shortname(dentry); + return vfat_revalidate_shortname(dentry, dir); } /* returns the length of a struct qstr, ignoring trailing dots */ diff --git a/fs/fcntl.c b/fs/fcntl.c index 49884fa3c81d..5598e4d57422 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -1158,10 +1158,10 @@ static int __init fcntl_init(void) * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY * is defined as O_NONBLOCK on some platforms and not on others. */ - BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != + BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( (VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) | - __FMODE_EXEC | __FMODE_NONOTIFY)); + __FMODE_EXEC)); fasync_cache = kmem_cache_create("fasync_cache", sizeof(struct fasync_struct), 0, diff --git a/fs/fhandle.c b/fs/fhandle.c index ec9145047dfc..3e092ae6d142 100644 --- a/fs/fhandle.c +++ b/fs/fhandle.c @@ -187,17 +187,6 @@ static int get_path_from_fd(int fd, struct path *root) return 0; } -enum handle_to_path_flags { - HANDLE_CHECK_PERMS = (1 << 0), - HANDLE_CHECK_SUBTREE = (1 << 1), -}; - -struct handle_to_path_ctx { - struct path root; - enum handle_to_path_flags flags; - unsigned int fh_flags; -}; - static int vfs_dentry_acceptable(void *context, struct dentry *dentry) { struct handle_to_path_ctx *ctx = context; @@ -261,50 +250,55 @@ static int do_handle_to_path(struct file_handle *handle, struct path *path, { int handle_dwords; struct vfsmount *mnt = ctx->root.mnt; + struct dentry *dentry; /* change the handle size to multiple of sizeof(u32) */ handle_dwords = handle->handle_bytes >> 2; - path->dentry = exportfs_decode_fh_raw(mnt, - (struct fid *)handle->f_handle, - handle_dwords, handle->handle_type, - ctx->fh_flags, - vfs_dentry_acceptable, ctx); - if (IS_ERR_OR_NULL(path->dentry)) { - if (path->dentry == ERR_PTR(-ENOMEM)) + dentry = exportfs_decode_fh_raw(mnt, (struct fid *)handle->f_handle, + handle_dwords, handle->handle_type, + ctx->fh_flags, vfs_dentry_acceptable, + ctx); + if (IS_ERR_OR_NULL(dentry)) { + if (dentry == ERR_PTR(-ENOMEM)) return -ENOMEM; return -ESTALE; } + path->dentry = dentry; path->mnt = mntget(mnt); return 0; } -/* - * Allow relaxed permissions of file handles if the caller has the - * ability to mount the filesystem or create a bind-mount of the - * provided @mountdirfd. - * - * In both cases the caller may be able to get an unobstructed way to - * the encoded file handle. If the caller is only able to create a - * bind-mount we need to verify that there are no locked mounts on top - * of it that could prevent us from getting to the encoded file. - * - * In principle, locked mounts can prevent the caller from mounting the - * filesystem but that only applies to procfs and sysfs neither of which - * support decoding file handles. - */ -static inline bool may_decode_fh(struct handle_to_path_ctx *ctx, - unsigned int o_flags) +static inline int may_decode_fh(struct handle_to_path_ctx *ctx, + unsigned int o_flags) { struct path *root = &ctx->root; + if (capable(CAP_DAC_READ_SEARCH)) + return 0; + /* - * Restrict to O_DIRECTORY to provide a deterministic API that avoids a - * confusing api in the face of disconnected non-dir dentries. + * Allow relaxed permissions of file handles if the caller has + * the ability to mount the filesystem or create a bind-mount of + * the provided @mountdirfd. + * + * In both cases the caller may be able to get an unobstructed + * way to the encoded file handle. If the caller is only able to + * create a bind-mount we need to verify that there are no + * locked mounts on top of it that could prevent us from getting + * to the encoded file. + * + * In principle, locked mounts can prevent the caller from + * mounting the filesystem but that only applies to procfs and + * sysfs neither of which support decoding file handles. + * + * Restrict to O_DIRECTORY to provide a deterministic API that + * avoids a confusing api in the face of disconnected non-dir + * dentries. * * There's only one dentry for each directory inode (VFS rule)... */ if (!(o_flags & O_DIRECTORY)) - return false; + return -EPERM; if (ns_capable(root->mnt->mnt_sb->s_user_ns, CAP_SYS_ADMIN)) ctx->flags = HANDLE_CHECK_PERMS; @@ -314,14 +308,14 @@ static inline bool may_decode_fh(struct handle_to_path_ctx *ctx, !has_locked_children(real_mount(root->mnt), root->dentry)) ctx->flags = HANDLE_CHECK_PERMS | HANDLE_CHECK_SUBTREE; else - return false; + return -EPERM; /* Are we able to override DAC permissions? */ if (!ns_capable(current_user_ns(), CAP_DAC_READ_SEARCH)) - return false; + return -EPERM; ctx->fh_flags = EXPORT_FH_DIR_ONLY; - return true; + return 0; } static int handle_to_path(int mountdirfd, struct file_handle __user *ufh, @@ -331,15 +325,19 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh, struct file_handle f_handle; struct file_handle *handle = NULL; struct handle_to_path_ctx ctx = {}; + const struct export_operations *eops; retval = get_path_from_fd(mountdirfd, &ctx.root); if (retval) goto out_err; - if (!capable(CAP_DAC_READ_SEARCH) && !may_decode_fh(&ctx, o_flags)) { - retval = -EPERM; + eops = ctx.root.mnt->mnt_sb->s_export_op; + if (eops && eops->permission) + retval = eops->permission(&ctx, o_flags); + else + retval = may_decode_fh(&ctx, o_flags); + if (retval) goto out_path; - } if (copy_from_user(&f_handle, ufh, sizeof(struct file_handle))) { retval = -EFAULT; @@ -398,29 +396,28 @@ static long do_handle_open(int mountdirfd, struct file_handle __user *ufh, int open_flag) { long retval = 0; - struct path path; + struct path path __free(path_put) = {}; struct file *file; - int fd; + const struct export_operations *eops; retval = handle_to_path(mountdirfd, ufh, &path, open_flag); if (retval) return retval; - fd = get_unused_fd_flags(open_flag); - if (fd < 0) { - path_put(&path); + CLASS(get_unused_fd, fd)(O_CLOEXEC); + if (fd < 0) return fd; - } - file = file_open_root(&path, "", open_flag, 0); - if (IS_ERR(file)) { - put_unused_fd(fd); - retval = PTR_ERR(file); - } else { - retval = fd; - fd_install(fd, file); - } - path_put(&path); - return retval; + + eops = path.mnt->mnt_sb->s_export_op; + if (eops->open) + file = eops->open(&path, open_flag); + else + file = file_open_root(&path, "", open_flag, 0); + if (IS_ERR(file)) + return PTR_ERR(file); + + fd_install(fd, file); + return take_fd(fd); } /** diff --git a/fs/file.c b/fs/file.c index 25c6e53b03f8..d868cdb95d1e 100644 --- a/fs/file.c +++ b/fs/file.c @@ -279,10 +279,6 @@ repeat: if (nr < fdt->max_fds) return 0; - /* Can we expand? */ - if (nr >= sysctl_nr_open) - return -EMFILE; - if (unlikely(files->resize_in_progress)) { spin_unlock(&files->file_lock); wait_event(files->resize_wait, !files->resize_in_progress); @@ -290,6 +286,10 @@ repeat: goto repeat; } + /* Can we expand? */ + if (unlikely(nr >= sysctl_nr_open)) + return -EMFILE; + /* All good, so we try */ files->resize_in_progress = true; error = expand_fdtable(files, nr); @@ -1231,17 +1231,9 @@ __releases(&files->file_lock) /* * We need to detect attempts to do dup2() over allocated but still - * not finished descriptor. NB: OpenBSD avoids that at the price of - * extra work in their equivalent of fget() - they insert struct - * file immediately after grabbing descriptor, mark it larval if - * more work (e.g. actual opening) is needed and make sure that - * fget() treats larval files as absent. Potentially interesting, - * but while extra work in fget() is trivial, locking implications - * and amount of surgery on open()-related paths in VFS are not. - * FreeBSD fails with -EBADF in the same situation, NetBSD "solution" - * deadlocks in rather amusing ways, AFAICS. All of that is out of - * scope of POSIX or SUS, since neither considers shared descriptor - * tables and this condition does not arise without those. + * not finished descriptor. + * + * POSIX is silent on the issue, we return -EBUSY. */ fdt = files_fdtable(files); fd = array_index_nospec(fd, fdt->max_fds); diff --git a/fs/file_table.c b/fs/file_table.c index 976736be47cb..5c00dc38558d 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -106,7 +106,7 @@ static int proc_nr_files(const struct ctl_table *table, int write, void *buffer, return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } -static struct ctl_table fs_stat_sysctls[] = { +static const struct ctl_table fs_stat_sysctls[] = { { .procname = "file-nr", .data = &files_stat, @@ -128,7 +128,7 @@ static struct ctl_table fs_stat_sysctls[] = { .data = &sysctl_nr_open, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = proc_douintvec_minmax, .extra1 = &sysctl_nr_open_min, .extra2 = &sysctl_nr_open_max, }, @@ -194,6 +194,11 @@ static int init_file(struct file *f, int flags, const struct cred *cred) * refcount bumps we should reinitialize the reused file first. */ file_ref_init(&f->f_ref, 1); + /* + * Disable permission and pre-content events for all files by default. + * They may be enabled later by file_set_fsnotify_mode_from_watchers(). + */ + file_set_fsnotify_mode(f, FMODE_NONOTIFY_PERM); return 0; } @@ -351,9 +356,7 @@ static struct file *alloc_file(const struct path *path, int flags, static inline int alloc_path_pseudo(const char *name, struct inode *inode, struct vfsmount *mnt, struct path *path) { - struct qstr this = QSTR_INIT(name, strlen(name)); - - path->dentry = d_alloc_pseudo(mnt->mnt_sb, &this); + path->dentry = d_alloc_pseudo(mnt->mnt_sb, &QSTR(name)); if (!path->dentry) return -ENOMEM; path->mnt = mntget(mnt); @@ -377,7 +380,13 @@ struct file *alloc_file_pseudo(struct inode *inode, struct vfsmount *mnt, if (IS_ERR(file)) { ihold(inode); path_put(&path); + return file; } + /* + * Disable all fsnotify events for pseudo files by default. + * They may be enabled by caller with file_set_fsnotify_mode(). + */ + file_set_fsnotify_mode(file, FMODE_NONOTIFY); return file; } EXPORT_SYMBOL(alloc_file_pseudo); @@ -402,6 +411,11 @@ struct file *alloc_file_pseudo_noaccount(struct inode *inode, return file; } file_init_path(file, &path, fops); + /* + * Disable all fsnotify events for pseudo files by default. + * They may be enabled by caller with file_set_fsnotify_mode(). + */ + file_set_fsnotify_mode(file, FMODE_NONOTIFY); return file; } EXPORT_SYMBOL_GPL(alloc_file_pseudo_noaccount); @@ -478,6 +492,8 @@ static void ____fput(struct callback_head *work) __fput(container_of(work, struct file, f_task_work)); } +static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput); + /* * If kernel thread really needs to have the final fput() it has done * to complete, call this. The only user right now is the boot - we @@ -491,11 +507,10 @@ static void ____fput(struct callback_head *work) void flush_delayed_fput(void) { delayed_fput(NULL); + flush_delayed_work(&delayed_fput_work); } EXPORT_SYMBOL_GPL(flush_delayed_fput); -static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput); - void fput(struct file *file) { if (file_ref_put(&file->f_ref)) { diff --git a/fs/fs_context.c b/fs/fs_context.c index 98589aae5208..582d33e81117 100644 --- a/fs/fs_context.c +++ b/fs/fs_context.c @@ -493,7 +493,7 @@ static void put_fc_log(struct fs_context *fc) if (log) { if (refcount_dec_and_test(&log->usage)) { fc->log.log = NULL; - for (i = 0; i <= 7; i++) + for (i = 0; i < ARRAY_SIZE(log->buffer) ; i++) if (log->need_free & (1 << i)) kfree(log->buffer[i]); kfree(log); diff --git a/fs/fs_parser.c b/fs/fs_parser.c index 16fa61ef56bf..e635a81e17d9 100644 --- a/fs/fs_parser.c +++ b/fs/fs_parser.c @@ -13,7 +13,7 @@ #include <linux/namei.h> #include "internal.h" -static const struct constant_table bool_names[] = { +const struct constant_table bool_names[] = { { "0", false }, { "1", true }, { "false", false }, @@ -22,6 +22,7 @@ static const struct constant_table bool_names[] = { { "yes", true }, { }, }; +EXPORT_SYMBOL(bool_names); static const struct constant_table * __lookup_constant(const struct constant_table *tbl, const char *name) diff --git a/fs/fuse/Kconfig b/fs/fuse/Kconfig index 8674dbfbe59d..ca215a3cba3e 100644 --- a/fs/fuse/Kconfig +++ b/fs/fuse/Kconfig @@ -63,3 +63,15 @@ config FUSE_PASSTHROUGH to be performed directly on a backing file. If you want to allow passthrough operations, answer Y. + +config FUSE_IO_URING + bool "FUSE communication over io-uring" + default y + depends on FUSE_FS + depends on IO_URING + help + This allows sending FUSE requests over the io-uring interface and + also adds request core affinity. + + If you want to allow fuse server/client communication through io-uring, + answer Y diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile index 2c372180d631..3f0f312a31c1 100644 --- a/fs/fuse/Makefile +++ b/fs/fuse/Makefile @@ -15,5 +15,6 @@ fuse-y += iomode.o fuse-$(CONFIG_FUSE_DAX) += dax.o fuse-$(CONFIG_FUSE_PASSTHROUGH) += passthrough.o fuse-$(CONFIG_SYSCTL) += sysctl.o +fuse-$(CONFIG_FUSE_IO_URING) += dev_uring.o virtiofs-y := virtio_fs.o diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c index 9abbc2f2894f..0b6ee6dd1fd6 100644 --- a/fs/fuse/dax.c +++ b/fs/fuse/dax.c @@ -240,11 +240,12 @@ static int fuse_send_removemapping(struct inode *inode, args.opcode = FUSE_REMOVEMAPPING; args.nodeid = fi->nodeid; - args.in_numargs = 2; - args.in_args[0].size = sizeof(*inargp); - args.in_args[0].value = inargp; - args.in_args[1].size = inargp->count * sizeof(*remove_one); - args.in_args[1].value = remove_one; + args.in_numargs = 3; + fuse_set_zero_arg0(&args); + args.in_args[1].size = sizeof(*inargp); + args.in_args[1].value = inargp; + args.in_args[2].size = inargp->count * sizeof(*remove_one); + args.in_args[2].value = remove_one; return fuse_simple_request(fm, &args); } diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 27ccae63495d..5b5f789b37eb 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -6,7 +6,9 @@ See the file COPYING. */ +#include "dev_uring_i.h" #include "fuse_i.h" +#include "fuse_dev_i.h" #include <linux/init.h> #include <linux/module.h> @@ -28,23 +30,8 @@ MODULE_ALIAS_MISCDEV(FUSE_MINOR); MODULE_ALIAS("devname:fuse"); -/* Ordinary requests have even IDs, while interrupts IDs are odd */ -#define FUSE_INT_REQ_BIT (1ULL << 0) -#define FUSE_REQ_ID_STEP (1ULL << 1) - static struct kmem_cache *fuse_req_cachep; -static void end_requests(struct list_head *head); - -static struct fuse_dev *fuse_get_dev(struct file *file) -{ - /* - * Lockless access is OK, because file->private data is set - * once during mount and is valid until the file is released. - */ - return READ_ONCE(file->private_data); -} - static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req) { INIT_LIST_HEAD(&req->list); @@ -89,7 +76,8 @@ void fuse_set_initialized(struct fuse_conn *fc) static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) { - return !fc->initialized || (for_background && fc->blocked); + return !fc->initialized || (for_background && fc->blocked) || + (fc->io_uring && !fuse_uring_ready(fc)); } static void fuse_drop_waiting(struct fuse_conn *fc) @@ -234,7 +222,7 @@ u64 fuse_get_unique(struct fuse_iqueue *fiq) } EXPORT_SYMBOL_GPL(fuse_get_unique); -static unsigned int fuse_req_hash(u64 unique) +unsigned int fuse_req_hash(u64 unique) { return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS); } @@ -250,7 +238,8 @@ __releases(fiq->lock) spin_unlock(&fiq->lock); } -static void fuse_dev_queue_forget(struct fuse_iqueue *fiq, struct fuse_forget_link *forget) +void fuse_dev_queue_forget(struct fuse_iqueue *fiq, + struct fuse_forget_link *forget) { spin_lock(&fiq->lock); if (fiq->connected) { @@ -263,7 +252,7 @@ static void fuse_dev_queue_forget(struct fuse_iqueue *fiq, struct fuse_forget_li } } -static void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) +void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) { spin_lock(&fiq->lock); if (list_empty(&req->intr_entry)) { @@ -580,7 +569,25 @@ ssize_t __fuse_simple_request(struct mnt_idmap *idmap, return ret; } -static bool fuse_request_queue_background(struct fuse_req *req) +#ifdef CONFIG_FUSE_IO_URING +static bool fuse_request_queue_background_uring(struct fuse_conn *fc, + struct fuse_req *req) +{ + struct fuse_iqueue *fiq = &fc->iq; + + req->in.h.unique = fuse_get_unique(fiq); + req->in.h.len = sizeof(struct fuse_in_header) + + fuse_len_args(req->args->in_numargs, + (struct fuse_arg *) req->args->in_args); + + return fuse_uring_queue_bq_req(req); +} +#endif + +/* + * @return true if queued + */ +static int fuse_request_queue_background(struct fuse_req *req) { struct fuse_mount *fm = req->fm; struct fuse_conn *fc = fm->fc; @@ -592,6 +599,12 @@ static bool fuse_request_queue_background(struct fuse_req *req) atomic_inc(&fc->num_waiting); } __set_bit(FR_ISREPLY, &req->flags); + +#ifdef CONFIG_FUSE_IO_URING + if (fuse_uring_ready(fc)) + return fuse_request_queue_background_uring(fc, req); +#endif + spin_lock(&fc->bg_lock); if (likely(fc->connected)) { fc->num_background++; @@ -692,22 +705,8 @@ static int unlock_request(struct fuse_req *req) return err; } -struct fuse_copy_state { - int write; - struct fuse_req *req; - struct iov_iter *iter; - struct pipe_buffer *pipebufs; - struct pipe_buffer *currbuf; - struct pipe_inode_info *pipe; - unsigned long nr_segs; - struct page *pg; - unsigned len; - unsigned offset; - unsigned move_pages:1; -}; - -static void fuse_copy_init(struct fuse_copy_state *cs, int write, - struct iov_iter *iter) +void fuse_copy_init(struct fuse_copy_state *cs, int write, + struct iov_iter *iter) { memset(cs, 0, sizeof(*cs)); cs->write = write; @@ -814,6 +813,9 @@ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size) *size -= ncpy; cs->len -= ncpy; cs->offset += ncpy; + if (cs->is_uring) + cs->ring.copied_sz += ncpy; + return ncpy; } @@ -1068,9 +1070,9 @@ static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size) } /* Copy request arguments to/from userspace buffer */ -static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, - unsigned argpages, struct fuse_arg *args, - int zeroing) +int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, + unsigned argpages, struct fuse_arg *args, + int zeroing) { int err = 0; unsigned i; @@ -1760,7 +1762,7 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode, args = &ap->args; args->nodeid = outarg->nodeid; args->opcode = FUSE_NOTIFY_REPLY; - args->in_numargs = 2; + args->in_numargs = 3; args->in_pages = true; args->end = fuse_retrieve_end; @@ -1788,9 +1790,10 @@ static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode, } ra->inarg.offset = outarg->offset; ra->inarg.size = total_len; - args->in_args[0].size = sizeof(ra->inarg); - args->in_args[0].value = &ra->inarg; - args->in_args[1].size = total_len; + fuse_set_zero_arg0(args); + args->in_args[1].size = sizeof(ra->inarg); + args->in_args[1].value = &ra->inarg; + args->in_args[2].size = total_len; err = fuse_simple_notify_reply(fm, args, outarg->notify_unique); if (err) @@ -1885,7 +1888,7 @@ static void fuse_resend(struct fuse_conn *fc) spin_unlock(&fiq->lock); list_for_each_entry(req, &to_queue, list) clear_bit(FR_PENDING, &req->flags); - end_requests(&to_queue); + fuse_dev_end_requests(&to_queue); return; } /* iq and pq requests are both oldest to newest */ @@ -1934,7 +1937,7 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, } /* Look up request on processing list by unique ID */ -static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique) +struct fuse_req *fuse_request_find(struct fuse_pqueue *fpq, u64 unique) { unsigned int hash = fuse_req_hash(unique); struct fuse_req *req; @@ -1946,10 +1949,17 @@ static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique) return NULL; } -static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args, - unsigned nbytes) +int fuse_copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args, + unsigned nbytes) { - unsigned reqsize = sizeof(struct fuse_out_header); + + unsigned int reqsize = 0; + + /* + * Uring has all headers separated from args - args is payload only + */ + if (!cs->is_uring) + reqsize = sizeof(struct fuse_out_header); reqsize += fuse_len_args(args->out_numargs, args->out_args); @@ -2011,7 +2021,7 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud, spin_lock(&fpq->lock); req = NULL; if (fpq->connected) - req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT); + req = fuse_request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT); err = -ENOENT; if (!req) { @@ -2049,7 +2059,7 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud, if (oh.error) err = nbytes != sizeof(oh) ? -EINVAL : 0; else - err = copy_out_args(cs, req->args, nbytes); + err = fuse_copy_out_args(cs, req->args, nbytes); fuse_copy_finish(cs); spin_lock(&fpq->lock); @@ -2204,7 +2214,7 @@ static __poll_t fuse_dev_poll(struct file *file, poll_table *wait) } /* Abort all requests on the given list (pending or processing) */ -static void end_requests(struct list_head *head) +void fuse_dev_end_requests(struct list_head *head) { while (!list_empty(head)) { struct fuse_req *req; @@ -2307,7 +2317,13 @@ void fuse_abort_conn(struct fuse_conn *fc) wake_up_all(&fc->blocked_waitq); spin_unlock(&fc->lock); - end_requests(&to_end); + fuse_dev_end_requests(&to_end); + + /* + * fc->lock must not be taken to avoid conflicts with io-uring + * locks + */ + fuse_uring_abort(fc); } else { spin_unlock(&fc->lock); } @@ -2319,6 +2335,8 @@ void fuse_wait_aborted(struct fuse_conn *fc) /* matches implicit memory barrier in fuse_drop_waiting() */ smp_mb(); wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0); + + fuse_uring_wait_stopped_queues(fc); } int fuse_dev_release(struct inode *inode, struct file *file) @@ -2337,7 +2355,7 @@ int fuse_dev_release(struct inode *inode, struct file *file) list_splice_init(&fpq->processing[i], &to_end); spin_unlock(&fpq->lock); - end_requests(&to_end); + fuse_dev_end_requests(&to_end); /* Are we the last open device? */ if (atomic_dec_and_test(&fc->dev_count)) { @@ -2475,6 +2493,9 @@ const struct file_operations fuse_dev_operations = { .fasync = fuse_dev_fasync, .unlocked_ioctl = fuse_dev_ioctl, .compat_ioctl = compat_ptr_ioctl, +#ifdef CONFIG_FUSE_IO_URING + .uring_cmd = fuse_uring_cmd, +#endif }; EXPORT_SYMBOL_GPL(fuse_dev_operations); diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c new file mode 100644 index 000000000000..ebd2931b4f2a --- /dev/null +++ b/fs/fuse/dev_uring.c @@ -0,0 +1,1319 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * FUSE: Filesystem in Userspace + * Copyright (c) 2023-2024 DataDirect Networks. + */ + +#include "fuse_i.h" +#include "dev_uring_i.h" +#include "fuse_dev_i.h" + +#include <linux/fs.h> +#include <linux/io_uring/cmd.h> + +static bool __read_mostly enable_uring; +module_param(enable_uring, bool, 0644); +MODULE_PARM_DESC(enable_uring, + "Enable userspace communication through io-uring"); + +#define FUSE_URING_IOV_SEGS 2 /* header and payload */ + + +bool fuse_uring_enabled(void) +{ + return enable_uring; +} + +struct fuse_uring_pdu { + struct fuse_ring_ent *ent; +}; + +static const struct fuse_iqueue_ops fuse_io_uring_ops; + +static void uring_cmd_set_ring_ent(struct io_uring_cmd *cmd, + struct fuse_ring_ent *ring_ent) +{ + struct fuse_uring_pdu *pdu = + io_uring_cmd_to_pdu(cmd, struct fuse_uring_pdu); + + pdu->ent = ring_ent; +} + +static struct fuse_ring_ent *uring_cmd_to_ring_ent(struct io_uring_cmd *cmd) +{ + struct fuse_uring_pdu *pdu = + io_uring_cmd_to_pdu(cmd, struct fuse_uring_pdu); + + return pdu->ent; +} + +static void fuse_uring_flush_bg(struct fuse_ring_queue *queue) +{ + struct fuse_ring *ring = queue->ring; + struct fuse_conn *fc = ring->fc; + + lockdep_assert_held(&queue->lock); + lockdep_assert_held(&fc->bg_lock); + + /* + * Allow one bg request per queue, ignoring global fc limits. + * This prevents a single queue from consuming all resources and + * eliminates the need for remote queue wake-ups when global + * limits are met but this queue has no more waiting requests. + */ + while ((fc->active_background < fc->max_background || + !queue->active_background) && + (!list_empty(&queue->fuse_req_bg_queue))) { + struct fuse_req *req; + + req = list_first_entry(&queue->fuse_req_bg_queue, + struct fuse_req, list); + fc->active_background++; + queue->active_background++; + + list_move_tail(&req->list, &queue->fuse_req_queue); + } +} + +static void fuse_uring_req_end(struct fuse_ring_ent *ent, struct fuse_req *req, + int error) +{ + struct fuse_ring_queue *queue = ent->queue; + struct fuse_ring *ring = queue->ring; + struct fuse_conn *fc = ring->fc; + + lockdep_assert_not_held(&queue->lock); + spin_lock(&queue->lock); + ent->fuse_req = NULL; + if (test_bit(FR_BACKGROUND, &req->flags)) { + queue->active_background--; + spin_lock(&fc->bg_lock); + fuse_uring_flush_bg(queue); + spin_unlock(&fc->bg_lock); + } + + spin_unlock(&queue->lock); + + if (error) + req->out.h.error = error; + + clear_bit(FR_SENT, &req->flags); + fuse_request_end(req); +} + +/* Abort all list queued request on the given ring queue */ +static void fuse_uring_abort_end_queue_requests(struct fuse_ring_queue *queue) +{ + struct fuse_req *req; + LIST_HEAD(req_list); + + spin_lock(&queue->lock); + list_for_each_entry(req, &queue->fuse_req_queue, list) + clear_bit(FR_PENDING, &req->flags); + list_splice_init(&queue->fuse_req_queue, &req_list); + spin_unlock(&queue->lock); + + /* must not hold queue lock to avoid order issues with fi->lock */ + fuse_dev_end_requests(&req_list); +} + +void fuse_uring_abort_end_requests(struct fuse_ring *ring) +{ + int qid; + struct fuse_ring_queue *queue; + struct fuse_conn *fc = ring->fc; + + for (qid = 0; qid < ring->nr_queues; qid++) { + queue = READ_ONCE(ring->queues[qid]); + if (!queue) + continue; + + queue->stopped = true; + + WARN_ON_ONCE(ring->fc->max_background != UINT_MAX); + spin_lock(&queue->lock); + spin_lock(&fc->bg_lock); + fuse_uring_flush_bg(queue); + spin_unlock(&fc->bg_lock); + spin_unlock(&queue->lock); + fuse_uring_abort_end_queue_requests(queue); + } +} + +void fuse_uring_destruct(struct fuse_conn *fc) +{ + struct fuse_ring *ring = fc->ring; + int qid; + + if (!ring) + return; + + for (qid = 0; qid < ring->nr_queues; qid++) { + struct fuse_ring_queue *queue = ring->queues[qid]; + struct fuse_ring_ent *ent, *next; + + if (!queue) + continue; + + WARN_ON(!list_empty(&queue->ent_avail_queue)); + WARN_ON(!list_empty(&queue->ent_w_req_queue)); + WARN_ON(!list_empty(&queue->ent_commit_queue)); + WARN_ON(!list_empty(&queue->ent_in_userspace)); + + list_for_each_entry_safe(ent, next, &queue->ent_released, + list) { + list_del_init(&ent->list); + kfree(ent); + } + + kfree(queue->fpq.processing); + kfree(queue); + ring->queues[qid] = NULL; + } + + kfree(ring->queues); + kfree(ring); + fc->ring = NULL; +} + +/* + * Basic ring setup for this connection based on the provided configuration + */ +static struct fuse_ring *fuse_uring_create(struct fuse_conn *fc) +{ + struct fuse_ring *ring; + size_t nr_queues = num_possible_cpus(); + struct fuse_ring *res = NULL; + size_t max_payload_size; + + ring = kzalloc(sizeof(*fc->ring), GFP_KERNEL_ACCOUNT); + if (!ring) + return NULL; + + ring->queues = kcalloc(nr_queues, sizeof(struct fuse_ring_queue *), + GFP_KERNEL_ACCOUNT); + if (!ring->queues) + goto out_err; + + max_payload_size = max(FUSE_MIN_READ_BUFFER, fc->max_write); + max_payload_size = max(max_payload_size, fc->max_pages * PAGE_SIZE); + + spin_lock(&fc->lock); + if (fc->ring) { + /* race, another thread created the ring in the meantime */ + spin_unlock(&fc->lock); + res = fc->ring; + goto out_err; + } + + init_waitqueue_head(&ring->stop_waitq); + + fc->ring = ring; + ring->nr_queues = nr_queues; + ring->fc = fc; + ring->max_payload_sz = max_payload_size; + atomic_set(&ring->queue_refs, 0); + + spin_unlock(&fc->lock); + return ring; + +out_err: + kfree(ring->queues); + kfree(ring); + return res; +} + +static struct fuse_ring_queue *fuse_uring_create_queue(struct fuse_ring *ring, + int qid) +{ + struct fuse_conn *fc = ring->fc; + struct fuse_ring_queue *queue; + struct list_head *pq; + + queue = kzalloc(sizeof(*queue), GFP_KERNEL_ACCOUNT); + if (!queue) + return NULL; + pq = kcalloc(FUSE_PQ_HASH_SIZE, sizeof(struct list_head), GFP_KERNEL); + if (!pq) { + kfree(queue); + return NULL; + } + + queue->qid = qid; + queue->ring = ring; + spin_lock_init(&queue->lock); + + INIT_LIST_HEAD(&queue->ent_avail_queue); + INIT_LIST_HEAD(&queue->ent_commit_queue); + INIT_LIST_HEAD(&queue->ent_w_req_queue); + INIT_LIST_HEAD(&queue->ent_in_userspace); + INIT_LIST_HEAD(&queue->fuse_req_queue); + INIT_LIST_HEAD(&queue->fuse_req_bg_queue); + INIT_LIST_HEAD(&queue->ent_released); + + queue->fpq.processing = pq; + fuse_pqueue_init(&queue->fpq); + + spin_lock(&fc->lock); + if (ring->queues[qid]) { + spin_unlock(&fc->lock); + kfree(queue->fpq.processing); + kfree(queue); + return ring->queues[qid]; + } + + /* + * write_once and lock as the caller mostly doesn't take the lock at all + */ + WRITE_ONCE(ring->queues[qid], queue); + spin_unlock(&fc->lock); + + return queue; +} + +static void fuse_uring_stop_fuse_req_end(struct fuse_req *req) +{ + clear_bit(FR_SENT, &req->flags); + req->out.h.error = -ECONNABORTED; + fuse_request_end(req); +} + +/* + * Release a request/entry on connection tear down + */ +static void fuse_uring_entry_teardown(struct fuse_ring_ent *ent) +{ + struct fuse_req *req; + struct io_uring_cmd *cmd; + + struct fuse_ring_queue *queue = ent->queue; + + spin_lock(&queue->lock); + cmd = ent->cmd; + ent->cmd = NULL; + req = ent->fuse_req; + ent->fuse_req = NULL; + if (req) { + /* remove entry from queue->fpq->processing */ + list_del_init(&req->list); + } + + /* + * The entry must not be freed immediately, due to access of direct + * pointer access of entries through IO_URING_F_CANCEL - there is a risk + * of race between daemon termination (which triggers IO_URING_F_CANCEL + * and accesses entries without checking the list state first + */ + list_move(&ent->list, &queue->ent_released); + ent->state = FRRS_RELEASED; + spin_unlock(&queue->lock); + + if (cmd) + io_uring_cmd_done(cmd, -ENOTCONN, 0, IO_URING_F_UNLOCKED); + + if (req) + fuse_uring_stop_fuse_req_end(req); +} + +static void fuse_uring_stop_list_entries(struct list_head *head, + struct fuse_ring_queue *queue, + enum fuse_ring_req_state exp_state) +{ + struct fuse_ring *ring = queue->ring; + struct fuse_ring_ent *ent, *next; + ssize_t queue_refs = SSIZE_MAX; + LIST_HEAD(to_teardown); + + spin_lock(&queue->lock); + list_for_each_entry_safe(ent, next, head, list) { + if (ent->state != exp_state) { + pr_warn("entry teardown qid=%d state=%d expected=%d", + queue->qid, ent->state, exp_state); + continue; + } + + ent->state = FRRS_TEARDOWN; + list_move(&ent->list, &to_teardown); + } + spin_unlock(&queue->lock); + + /* no queue lock to avoid lock order issues */ + list_for_each_entry_safe(ent, next, &to_teardown, list) { + fuse_uring_entry_teardown(ent); + queue_refs = atomic_dec_return(&ring->queue_refs); + WARN_ON_ONCE(queue_refs < 0); + } +} + +static void fuse_uring_teardown_entries(struct fuse_ring_queue *queue) +{ + fuse_uring_stop_list_entries(&queue->ent_in_userspace, queue, + FRRS_USERSPACE); + fuse_uring_stop_list_entries(&queue->ent_avail_queue, queue, + FRRS_AVAILABLE); +} + +/* + * Log state debug info + */ +static void fuse_uring_log_ent_state(struct fuse_ring *ring) +{ + int qid; + struct fuse_ring_ent *ent; + + for (qid = 0; qid < ring->nr_queues; qid++) { + struct fuse_ring_queue *queue = ring->queues[qid]; + + if (!queue) + continue; + + spin_lock(&queue->lock); + /* + * Log entries from the intermediate queue, the other queues + * should be empty + */ + list_for_each_entry(ent, &queue->ent_w_req_queue, list) { + pr_info(" ent-req-queue ring=%p qid=%d ent=%p state=%d\n", + ring, qid, ent, ent->state); + } + list_for_each_entry(ent, &queue->ent_commit_queue, list) { + pr_info(" ent-commit-queue ring=%p qid=%d ent=%p state=%d\n", + ring, qid, ent, ent->state); + } + spin_unlock(&queue->lock); + } + ring->stop_debug_log = 1; +} + +static void fuse_uring_async_stop_queues(struct work_struct *work) +{ + int qid; + struct fuse_ring *ring = + container_of(work, struct fuse_ring, async_teardown_work.work); + + /* XXX code dup */ + for (qid = 0; qid < ring->nr_queues; qid++) { + struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]); + + if (!queue) + continue; + + fuse_uring_teardown_entries(queue); + } + + /* + * Some ring entries might be in the middle of IO operations, + * i.e. in process to get handled by file_operations::uring_cmd + * or on the way to userspace - we could handle that with conditions in + * run time code, but easier/cleaner to have an async tear down handler + * If there are still queue references left + */ + if (atomic_read(&ring->queue_refs) > 0) { + if (time_after(jiffies, + ring->teardown_time + FUSE_URING_TEARDOWN_TIMEOUT)) + fuse_uring_log_ent_state(ring); + + schedule_delayed_work(&ring->async_teardown_work, + FUSE_URING_TEARDOWN_INTERVAL); + } else { + wake_up_all(&ring->stop_waitq); + } +} + +/* + * Stop the ring queues + */ +void fuse_uring_stop_queues(struct fuse_ring *ring) +{ + int qid; + + for (qid = 0; qid < ring->nr_queues; qid++) { + struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]); + + if (!queue) + continue; + + fuse_uring_teardown_entries(queue); + } + + if (atomic_read(&ring->queue_refs) > 0) { + ring->teardown_time = jiffies; + INIT_DELAYED_WORK(&ring->async_teardown_work, + fuse_uring_async_stop_queues); + schedule_delayed_work(&ring->async_teardown_work, + FUSE_URING_TEARDOWN_INTERVAL); + } else { + wake_up_all(&ring->stop_waitq); + } +} + +/* + * Handle IO_URING_F_CANCEL, typically should come on daemon termination. + * + * Releasing the last entry should trigger fuse_dev_release() if + * the daemon was terminated + */ +static void fuse_uring_cancel(struct io_uring_cmd *cmd, + unsigned int issue_flags) +{ + struct fuse_ring_ent *ent = uring_cmd_to_ring_ent(cmd); + struct fuse_ring_queue *queue; + bool need_cmd_done = false; + + /* + * direct access on ent - it must not be destructed as long as + * IO_URING_F_CANCEL might come up + */ + queue = ent->queue; + spin_lock(&queue->lock); + if (ent->state == FRRS_AVAILABLE) { + ent->state = FRRS_USERSPACE; + list_move(&ent->list, &queue->ent_in_userspace); + need_cmd_done = true; + ent->cmd = NULL; + } + spin_unlock(&queue->lock); + + if (need_cmd_done) { + /* no queue lock to avoid lock order issues */ + io_uring_cmd_done(cmd, -ENOTCONN, 0, issue_flags); + } +} + +static void fuse_uring_prepare_cancel(struct io_uring_cmd *cmd, int issue_flags, + struct fuse_ring_ent *ring_ent) +{ + uring_cmd_set_ring_ent(cmd, ring_ent); + io_uring_cmd_mark_cancelable(cmd, issue_flags); +} + +/* + * Checks for errors and stores it into the request + */ +static int fuse_uring_out_header_has_err(struct fuse_out_header *oh, + struct fuse_req *req, + struct fuse_conn *fc) +{ + int err; + + err = -EINVAL; + if (oh->unique == 0) { + /* Not supported through io-uring yet */ + pr_warn_once("notify through fuse-io-uring not supported\n"); + goto err; + } + + if (oh->error <= -ERESTARTSYS || oh->error > 0) + goto err; + + if (oh->error) { + err = oh->error; + goto err; + } + + err = -ENOENT; + if ((oh->unique & ~FUSE_INT_REQ_BIT) != req->in.h.unique) { + pr_warn_ratelimited("unique mismatch, expected: %llu got %llu\n", + req->in.h.unique, + oh->unique & ~FUSE_INT_REQ_BIT); + goto err; + } + + /* + * Is it an interrupt reply ID? + * XXX: Not supported through fuse-io-uring yet, it should not even + * find the request - should not happen. + */ + WARN_ON_ONCE(oh->unique & FUSE_INT_REQ_BIT); + + err = 0; +err: + return err; +} + +static int fuse_uring_copy_from_ring(struct fuse_ring *ring, + struct fuse_req *req, + struct fuse_ring_ent *ent) +{ + struct fuse_copy_state cs; + struct fuse_args *args = req->args; + struct iov_iter iter; + int err; + struct fuse_uring_ent_in_out ring_in_out; + + err = copy_from_user(&ring_in_out, &ent->headers->ring_ent_in_out, + sizeof(ring_in_out)); + if (err) + return -EFAULT; + + err = import_ubuf(ITER_SOURCE, ent->payload, ring->max_payload_sz, + &iter); + if (err) + return err; + + fuse_copy_init(&cs, 0, &iter); + cs.is_uring = 1; + cs.req = req; + + return fuse_copy_out_args(&cs, args, ring_in_out.payload_sz); +} + + /* + * Copy data from the req to the ring buffer + */ +static int fuse_uring_args_to_ring(struct fuse_ring *ring, struct fuse_req *req, + struct fuse_ring_ent *ent) +{ + struct fuse_copy_state cs; + struct fuse_args *args = req->args; + struct fuse_in_arg *in_args = args->in_args; + int num_args = args->in_numargs; + int err; + struct iov_iter iter; + struct fuse_uring_ent_in_out ent_in_out = { + .flags = 0, + .commit_id = req->in.h.unique, + }; + + err = import_ubuf(ITER_DEST, ent->payload, ring->max_payload_sz, &iter); + if (err) { + pr_info_ratelimited("fuse: Import of user buffer failed\n"); + return err; + } + + fuse_copy_init(&cs, 1, &iter); + cs.is_uring = 1; + cs.req = req; + + if (num_args > 0) { + /* + * Expectation is that the first argument is the per op header. + * Some op code have that as zero size. + */ + if (args->in_args[0].size > 0) { + err = copy_to_user(&ent->headers->op_in, in_args->value, + in_args->size); + if (err) { + pr_info_ratelimited( + "Copying the header failed.\n"); + return -EFAULT; + } + } + in_args++; + num_args--; + } + + /* copy the payload */ + err = fuse_copy_args(&cs, num_args, args->in_pages, + (struct fuse_arg *)in_args, 0); + if (err) { + pr_info_ratelimited("%s fuse_copy_args failed\n", __func__); + return err; + } + + ent_in_out.payload_sz = cs.ring.copied_sz; + err = copy_to_user(&ent->headers->ring_ent_in_out, &ent_in_out, + sizeof(ent_in_out)); + return err ? -EFAULT : 0; +} + +static int fuse_uring_copy_to_ring(struct fuse_ring_ent *ent, + struct fuse_req *req) +{ + struct fuse_ring_queue *queue = ent->queue; + struct fuse_ring *ring = queue->ring; + int err; + + err = -EIO; + if (WARN_ON(ent->state != FRRS_FUSE_REQ)) { + pr_err("qid=%d ring-req=%p invalid state %d on send\n", + queue->qid, ent, ent->state); + return err; + } + + err = -EINVAL; + if (WARN_ON(req->in.h.unique == 0)) + return err; + + /* copy the request */ + err = fuse_uring_args_to_ring(ring, req, ent); + if (unlikely(err)) { + pr_info_ratelimited("Copy to ring failed: %d\n", err); + return err; + } + + /* copy fuse_in_header */ + err = copy_to_user(&ent->headers->in_out, &req->in.h, + sizeof(req->in.h)); + if (err) { + err = -EFAULT; + return err; + } + + return 0; +} + +static int fuse_uring_prepare_send(struct fuse_ring_ent *ent, + struct fuse_req *req) +{ + int err; + + err = fuse_uring_copy_to_ring(ent, req); + if (!err) + set_bit(FR_SENT, &req->flags); + else + fuse_uring_req_end(ent, req, err); + + return err; +} + +/* + * Write data to the ring buffer and send the request to userspace, + * userspace will read it + * This is comparable with classical read(/dev/fuse) + */ +static int fuse_uring_send_next_to_ring(struct fuse_ring_ent *ent, + struct fuse_req *req, + unsigned int issue_flags) +{ + struct fuse_ring_queue *queue = ent->queue; + int err; + struct io_uring_cmd *cmd; + + err = fuse_uring_prepare_send(ent, req); + if (err) + return err; + + spin_lock(&queue->lock); + cmd = ent->cmd; + ent->cmd = NULL; + ent->state = FRRS_USERSPACE; + list_move(&ent->list, &queue->ent_in_userspace); + spin_unlock(&queue->lock); + + io_uring_cmd_done(cmd, 0, 0, issue_flags); + return 0; +} + +/* + * Make a ring entry available for fuse_req assignment + */ +static void fuse_uring_ent_avail(struct fuse_ring_ent *ent, + struct fuse_ring_queue *queue) +{ + WARN_ON_ONCE(!ent->cmd); + list_move(&ent->list, &queue->ent_avail_queue); + ent->state = FRRS_AVAILABLE; +} + +/* Used to find the request on SQE commit */ +static void fuse_uring_add_to_pq(struct fuse_ring_ent *ent, + struct fuse_req *req) +{ + struct fuse_ring_queue *queue = ent->queue; + struct fuse_pqueue *fpq = &queue->fpq; + unsigned int hash; + + req->ring_entry = ent; + hash = fuse_req_hash(req->in.h.unique); + list_move_tail(&req->list, &fpq->processing[hash]); +} + +/* + * Assign a fuse queue entry to the given entry + */ +static void fuse_uring_add_req_to_ring_ent(struct fuse_ring_ent *ent, + struct fuse_req *req) +{ + struct fuse_ring_queue *queue = ent->queue; + struct fuse_conn *fc = req->fm->fc; + struct fuse_iqueue *fiq = &fc->iq; + + lockdep_assert_held(&queue->lock); + + if (WARN_ON_ONCE(ent->state != FRRS_AVAILABLE && + ent->state != FRRS_COMMIT)) { + pr_warn("%s qid=%d state=%d\n", __func__, ent->queue->qid, + ent->state); + } + + spin_lock(&fiq->lock); + clear_bit(FR_PENDING, &req->flags); + spin_unlock(&fiq->lock); + ent->fuse_req = req; + ent->state = FRRS_FUSE_REQ; + list_move(&ent->list, &queue->ent_w_req_queue); + fuse_uring_add_to_pq(ent, req); +} + +/* Fetch the next fuse request if available */ +static struct fuse_req *fuse_uring_ent_assign_req(struct fuse_ring_ent *ent) + __must_hold(&queue->lock) +{ + struct fuse_req *req; + struct fuse_ring_queue *queue = ent->queue; + struct list_head *req_queue = &queue->fuse_req_queue; + + lockdep_assert_held(&queue->lock); + + /* get and assign the next entry while it is still holding the lock */ + req = list_first_entry_or_null(req_queue, struct fuse_req, list); + if (req) + fuse_uring_add_req_to_ring_ent(ent, req); + + return req; +} + +/* + * Read data from the ring buffer, which user space has written to + * This is comparible with handling of classical write(/dev/fuse). + * Also make the ring request available again for new fuse requests. + */ +static void fuse_uring_commit(struct fuse_ring_ent *ent, struct fuse_req *req, + unsigned int issue_flags) +{ + struct fuse_ring *ring = ent->queue->ring; + struct fuse_conn *fc = ring->fc; + ssize_t err = 0; + + err = copy_from_user(&req->out.h, &ent->headers->in_out, + sizeof(req->out.h)); + if (err) { + req->out.h.error = -EFAULT; + goto out; + } + + err = fuse_uring_out_header_has_err(&req->out.h, req, fc); + if (err) { + /* req->out.h.error already set */ + goto out; + } + + err = fuse_uring_copy_from_ring(ring, req, ent); +out: + fuse_uring_req_end(ent, req, err); +} + +/* + * Get the next fuse req and send it + */ +static void fuse_uring_next_fuse_req(struct fuse_ring_ent *ent, + struct fuse_ring_queue *queue, + unsigned int issue_flags) +{ + int err; + struct fuse_req *req; + +retry: + spin_lock(&queue->lock); + fuse_uring_ent_avail(ent, queue); + req = fuse_uring_ent_assign_req(ent); + spin_unlock(&queue->lock); + + if (req) { + err = fuse_uring_send_next_to_ring(ent, req, issue_flags); + if (err) + goto retry; + } +} + +static int fuse_ring_ent_set_commit(struct fuse_ring_ent *ent) +{ + struct fuse_ring_queue *queue = ent->queue; + + lockdep_assert_held(&queue->lock); + + if (WARN_ON_ONCE(ent->state != FRRS_USERSPACE)) + return -EIO; + + ent->state = FRRS_COMMIT; + list_move(&ent->list, &queue->ent_commit_queue); + + return 0; +} + +/* FUSE_URING_CMD_COMMIT_AND_FETCH handler */ +static int fuse_uring_commit_fetch(struct io_uring_cmd *cmd, int issue_flags, + struct fuse_conn *fc) +{ + const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe); + struct fuse_ring_ent *ent; + int err; + struct fuse_ring *ring = fc->ring; + struct fuse_ring_queue *queue; + uint64_t commit_id = READ_ONCE(cmd_req->commit_id); + unsigned int qid = READ_ONCE(cmd_req->qid); + struct fuse_pqueue *fpq; + struct fuse_req *req; + + err = -ENOTCONN; + if (!ring) + return err; + + if (qid >= ring->nr_queues) + return -EINVAL; + + queue = ring->queues[qid]; + if (!queue) + return err; + fpq = &queue->fpq; + + if (!READ_ONCE(fc->connected) || READ_ONCE(queue->stopped)) + return err; + + spin_lock(&queue->lock); + /* Find a request based on the unique ID of the fuse request + * This should get revised, as it needs a hash calculation and list + * search. And full struct fuse_pqueue is needed (memory overhead). + * As well as the link from req to ring_ent. + */ + req = fuse_request_find(fpq, commit_id); + err = -ENOENT; + if (!req) { + pr_info("qid=%d commit_id %llu not found\n", queue->qid, + commit_id); + spin_unlock(&queue->lock); + return err; + } + list_del_init(&req->list); + ent = req->ring_entry; + req->ring_entry = NULL; + + err = fuse_ring_ent_set_commit(ent); + if (err != 0) { + pr_info_ratelimited("qid=%d commit_id %llu state %d", + queue->qid, commit_id, ent->state); + spin_unlock(&queue->lock); + req->out.h.error = err; + clear_bit(FR_SENT, &req->flags); + fuse_request_end(req); + return err; + } + + ent->cmd = cmd; + spin_unlock(&queue->lock); + + /* without the queue lock, as other locks are taken */ + fuse_uring_prepare_cancel(cmd, issue_flags, ent); + fuse_uring_commit(ent, req, issue_flags); + + /* + * Fetching the next request is absolutely required as queued + * fuse requests would otherwise not get processed - committing + * and fetching is done in one step vs legacy fuse, which has separated + * read (fetch request) and write (commit result). + */ + fuse_uring_next_fuse_req(ent, queue, issue_flags); + return 0; +} + +static bool is_ring_ready(struct fuse_ring *ring, int current_qid) +{ + int qid; + struct fuse_ring_queue *queue; + bool ready = true; + + for (qid = 0; qid < ring->nr_queues && ready; qid++) { + if (current_qid == qid) + continue; + + queue = ring->queues[qid]; + if (!queue) { + ready = false; + break; + } + + spin_lock(&queue->lock); + if (list_empty(&queue->ent_avail_queue)) + ready = false; + spin_unlock(&queue->lock); + } + + return ready; +} + +/* + * fuse_uring_req_fetch command handling + */ +static void fuse_uring_do_register(struct fuse_ring_ent *ent, + struct io_uring_cmd *cmd, + unsigned int issue_flags) +{ + struct fuse_ring_queue *queue = ent->queue; + struct fuse_ring *ring = queue->ring; + struct fuse_conn *fc = ring->fc; + struct fuse_iqueue *fiq = &fc->iq; + + fuse_uring_prepare_cancel(cmd, issue_flags, ent); + + spin_lock(&queue->lock); + ent->cmd = cmd; + fuse_uring_ent_avail(ent, queue); + spin_unlock(&queue->lock); + + if (!ring->ready) { + bool ready = is_ring_ready(ring, queue->qid); + + if (ready) { + WRITE_ONCE(fiq->ops, &fuse_io_uring_ops); + WRITE_ONCE(ring->ready, true); + wake_up_all(&fc->blocked_waitq); + } + } +} + +/* + * sqe->addr is a ptr to an iovec array, iov[0] has the headers, iov[1] + * the payload + */ +static int fuse_uring_get_iovec_from_sqe(const struct io_uring_sqe *sqe, + struct iovec iov[FUSE_URING_IOV_SEGS]) +{ + struct iovec __user *uiov = u64_to_user_ptr(READ_ONCE(sqe->addr)); + struct iov_iter iter; + ssize_t ret; + + if (sqe->len != FUSE_URING_IOV_SEGS) + return -EINVAL; + + /* + * Direction for buffer access will actually be READ and WRITE, + * using write for the import should include READ access as well. + */ + ret = import_iovec(WRITE, uiov, FUSE_URING_IOV_SEGS, + FUSE_URING_IOV_SEGS, &iov, &iter); + if (ret < 0) + return ret; + + return 0; +} + +static struct fuse_ring_ent * +fuse_uring_create_ring_ent(struct io_uring_cmd *cmd, + struct fuse_ring_queue *queue) +{ + struct fuse_ring *ring = queue->ring; + struct fuse_ring_ent *ent; + size_t payload_size; + struct iovec iov[FUSE_URING_IOV_SEGS]; + int err; + + err = fuse_uring_get_iovec_from_sqe(cmd->sqe, iov); + if (err) { + pr_info_ratelimited("Failed to get iovec from sqe, err=%d\n", + err); + return ERR_PTR(err); + } + + err = -EINVAL; + if (iov[0].iov_len < sizeof(struct fuse_uring_req_header)) { + pr_info_ratelimited("Invalid header len %zu\n", iov[0].iov_len); + return ERR_PTR(err); + } + + payload_size = iov[1].iov_len; + if (payload_size < ring->max_payload_sz) { + pr_info_ratelimited("Invalid req payload len %zu\n", + payload_size); + return ERR_PTR(err); + } + + err = -ENOMEM; + ent = kzalloc(sizeof(*ent), GFP_KERNEL_ACCOUNT); + if (!ent) + return ERR_PTR(err); + + INIT_LIST_HEAD(&ent->list); + + ent->queue = queue; + ent->headers = iov[0].iov_base; + ent->payload = iov[1].iov_base; + + atomic_inc(&ring->queue_refs); + return ent; +} + +/* + * Register header and payload buffer with the kernel and puts the + * entry as "ready to get fuse requests" on the queue + */ +static int fuse_uring_register(struct io_uring_cmd *cmd, + unsigned int issue_flags, struct fuse_conn *fc) +{ + const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe); + struct fuse_ring *ring = fc->ring; + struct fuse_ring_queue *queue; + struct fuse_ring_ent *ent; + int err; + unsigned int qid = READ_ONCE(cmd_req->qid); + + err = -ENOMEM; + if (!ring) { + ring = fuse_uring_create(fc); + if (!ring) + return err; + } + + if (qid >= ring->nr_queues) { + pr_info_ratelimited("fuse: Invalid ring qid %u\n", qid); + return -EINVAL; + } + + queue = ring->queues[qid]; + if (!queue) { + queue = fuse_uring_create_queue(ring, qid); + if (!queue) + return err; + } + + /* + * The created queue above does not need to be destructed in + * case of entry errors below, will be done at ring destruction time. + */ + + ent = fuse_uring_create_ring_ent(cmd, queue); + if (IS_ERR(ent)) + return PTR_ERR(ent); + + fuse_uring_do_register(ent, cmd, issue_flags); + + return 0; +} + +/* + * Entry function from io_uring to handle the given passthrough command + * (op code IORING_OP_URING_CMD) + */ +int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) +{ + struct fuse_dev *fud; + struct fuse_conn *fc; + u32 cmd_op = cmd->cmd_op; + int err; + + if ((unlikely(issue_flags & IO_URING_F_CANCEL))) { + fuse_uring_cancel(cmd, issue_flags); + return 0; + } + + /* This extra SQE size holds struct fuse_uring_cmd_req */ + if (!(issue_flags & IO_URING_F_SQE128)) + return -EINVAL; + + fud = fuse_get_dev(cmd->file); + if (!fud) { + pr_info_ratelimited("No fuse device found\n"); + return -ENOTCONN; + } + fc = fud->fc; + + /* Once a connection has io-uring enabled on it, it can't be disabled */ + if (!enable_uring && !fc->io_uring) { + pr_info_ratelimited("fuse-io-uring is disabled\n"); + return -EOPNOTSUPP; + } + + if (fc->aborted) + return -ECONNABORTED; + if (!fc->connected) + return -ENOTCONN; + + /* + * fuse_uring_register() needs the ring to be initialized, + * we need to know the max payload size + */ + if (!fc->initialized) + return -EAGAIN; + + switch (cmd_op) { + case FUSE_IO_URING_CMD_REGISTER: + err = fuse_uring_register(cmd, issue_flags, fc); + if (err) { + pr_info_once("FUSE_IO_URING_CMD_REGISTER failed err=%d\n", + err); + fc->io_uring = 0; + wake_up_all(&fc->blocked_waitq); + return err; + } + break; + case FUSE_IO_URING_CMD_COMMIT_AND_FETCH: + err = fuse_uring_commit_fetch(cmd, issue_flags, fc); + if (err) { + pr_info_once("FUSE_IO_URING_COMMIT_AND_FETCH failed err=%d\n", + err); + return err; + } + break; + default: + return -EINVAL; + } + + return -EIOCBQUEUED; +} + +static void fuse_uring_send(struct fuse_ring_ent *ent, struct io_uring_cmd *cmd, + ssize_t ret, unsigned int issue_flags) +{ + struct fuse_ring_queue *queue = ent->queue; + + spin_lock(&queue->lock); + ent->state = FRRS_USERSPACE; + list_move(&ent->list, &queue->ent_in_userspace); + ent->cmd = NULL; + spin_unlock(&queue->lock); + + io_uring_cmd_done(cmd, ret, 0, issue_flags); +} + +/* + * This prepares and sends the ring request in fuse-uring task context. + * User buffers are not mapped yet - the application does not have permission + * to write to it - this has to be executed in ring task context. + */ +static void fuse_uring_send_in_task(struct io_uring_cmd *cmd, + unsigned int issue_flags) +{ + struct fuse_ring_ent *ent = uring_cmd_to_ring_ent(cmd); + struct fuse_ring_queue *queue = ent->queue; + int err; + + if (!(issue_flags & IO_URING_F_TASK_DEAD)) { + err = fuse_uring_prepare_send(ent, ent->fuse_req); + if (err) { + fuse_uring_next_fuse_req(ent, queue, issue_flags); + return; + } + } else { + err = -ECANCELED; + } + + fuse_uring_send(ent, cmd, err, issue_flags); +} + +static struct fuse_ring_queue *fuse_uring_task_to_queue(struct fuse_ring *ring) +{ + unsigned int qid; + struct fuse_ring_queue *queue; + + qid = task_cpu(current); + + if (WARN_ONCE(qid >= ring->nr_queues, + "Core number (%u) exceeds nr queues (%zu)\n", qid, + ring->nr_queues)) + qid = 0; + + queue = ring->queues[qid]; + WARN_ONCE(!queue, "Missing queue for qid %d\n", qid); + + return queue; +} + +static void fuse_uring_dispatch_ent(struct fuse_ring_ent *ent) +{ + struct io_uring_cmd *cmd = ent->cmd; + + uring_cmd_set_ring_ent(cmd, ent); + io_uring_cmd_complete_in_task(cmd, fuse_uring_send_in_task); +} + +/* queue a fuse request and send it if a ring entry is available */ +void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req) +{ + struct fuse_conn *fc = req->fm->fc; + struct fuse_ring *ring = fc->ring; + struct fuse_ring_queue *queue; + struct fuse_ring_ent *ent = NULL; + int err; + + err = -EINVAL; + queue = fuse_uring_task_to_queue(ring); + if (!queue) + goto err; + + if (req->in.h.opcode != FUSE_NOTIFY_REPLY) + req->in.h.unique = fuse_get_unique(fiq); + + spin_lock(&queue->lock); + err = -ENOTCONN; + if (unlikely(queue->stopped)) + goto err_unlock; + + ent = list_first_entry_or_null(&queue->ent_avail_queue, + struct fuse_ring_ent, list); + if (ent) + fuse_uring_add_req_to_ring_ent(ent, req); + else + list_add_tail(&req->list, &queue->fuse_req_queue); + spin_unlock(&queue->lock); + + if (ent) + fuse_uring_dispatch_ent(ent); + + return; + +err_unlock: + spin_unlock(&queue->lock); +err: + req->out.h.error = err; + clear_bit(FR_PENDING, &req->flags); + fuse_request_end(req); +} + +bool fuse_uring_queue_bq_req(struct fuse_req *req) +{ + struct fuse_conn *fc = req->fm->fc; + struct fuse_ring *ring = fc->ring; + struct fuse_ring_queue *queue; + struct fuse_ring_ent *ent = NULL; + + queue = fuse_uring_task_to_queue(ring); + if (!queue) + return false; + + spin_lock(&queue->lock); + if (unlikely(queue->stopped)) { + spin_unlock(&queue->lock); + return false; + } + + list_add_tail(&req->list, &queue->fuse_req_bg_queue); + + ent = list_first_entry_or_null(&queue->ent_avail_queue, + struct fuse_ring_ent, list); + spin_lock(&fc->bg_lock); + fc->num_background++; + if (fc->num_background == fc->max_background) + fc->blocked = 1; + fuse_uring_flush_bg(queue); + spin_unlock(&fc->bg_lock); + + /* + * Due to bg_queue flush limits there might be other bg requests + * in the queue that need to be handled first. Or no further req + * might be available. + */ + req = list_first_entry_or_null(&queue->fuse_req_queue, struct fuse_req, + list); + if (ent && req) { + fuse_uring_add_req_to_ring_ent(ent, req); + spin_unlock(&queue->lock); + + fuse_uring_dispatch_ent(ent); + } else { + spin_unlock(&queue->lock); + } + + return true; +} + +static const struct fuse_iqueue_ops fuse_io_uring_ops = { + /* should be send over io-uring as enhancement */ + .send_forget = fuse_dev_queue_forget, + + /* + * could be send over io-uring, but interrupts should be rare, + * no need to make the code complex + */ + .send_interrupt = fuse_dev_queue_interrupt, + .send_req = fuse_uring_queue_fuse_req, +}; diff --git a/fs/fuse/dev_uring_i.h b/fs/fuse/dev_uring_i.h new file mode 100644 index 000000000000..2102b3d0c1ae --- /dev/null +++ b/fs/fuse/dev_uring_i.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * FUSE: Filesystem in Userspace + * Copyright (c) 2023-2024 DataDirect Networks. + */ + +#ifndef _FS_FUSE_DEV_URING_I_H +#define _FS_FUSE_DEV_URING_I_H + +#include "fuse_i.h" + +#ifdef CONFIG_FUSE_IO_URING + +#define FUSE_URING_TEARDOWN_TIMEOUT (5 * HZ) +#define FUSE_URING_TEARDOWN_INTERVAL (HZ/20) + +enum fuse_ring_req_state { + FRRS_INVALID = 0, + + /* The ring entry received from userspace and it is being processed */ + FRRS_COMMIT, + + /* The ring entry is waiting for new fuse requests */ + FRRS_AVAILABLE, + + /* The ring entry got assigned a fuse req */ + FRRS_FUSE_REQ, + + /* The ring entry is in or on the way to user space */ + FRRS_USERSPACE, + + /* The ring entry is in teardown */ + FRRS_TEARDOWN, + + /* The ring entry is released, but not freed yet */ + FRRS_RELEASED, +}; + +/** A fuse ring entry, part of the ring queue */ +struct fuse_ring_ent { + /* userspace buffer */ + struct fuse_uring_req_header __user *headers; + void __user *payload; + + /* the ring queue that owns the request */ + struct fuse_ring_queue *queue; + + /* fields below are protected by queue->lock */ + + struct io_uring_cmd *cmd; + + struct list_head list; + + enum fuse_ring_req_state state; + + struct fuse_req *fuse_req; +}; + +struct fuse_ring_queue { + /* + * back pointer to the main fuse uring structure that holds this + * queue + */ + struct fuse_ring *ring; + + /* queue id, corresponds to the cpu core */ + unsigned int qid; + + /* + * queue lock, taken when any value in the queue changes _and_ also + * a ring entry state changes. + */ + spinlock_t lock; + + /* available ring entries (struct fuse_ring_ent) */ + struct list_head ent_avail_queue; + + /* + * entries in the process of being committed or in the process + * to be sent to userspace + */ + struct list_head ent_w_req_queue; + struct list_head ent_commit_queue; + + /* entries in userspace */ + struct list_head ent_in_userspace; + + /* entries that are released */ + struct list_head ent_released; + + /* fuse requests waiting for an entry slot */ + struct list_head fuse_req_queue; + + /* background fuse requests */ + struct list_head fuse_req_bg_queue; + + struct fuse_pqueue fpq; + + unsigned int active_background; + + bool stopped; +}; + +/** + * Describes if uring is for communication and holds alls the data needed + * for uring communication + */ +struct fuse_ring { + /* back pointer */ + struct fuse_conn *fc; + + /* number of ring queues */ + size_t nr_queues; + + /* maximum payload/arg size */ + size_t max_payload_sz; + + struct fuse_ring_queue **queues; + + /* + * Log ring entry states on stop when entries cannot be released + */ + unsigned int stop_debug_log : 1; + + wait_queue_head_t stop_waitq; + + /* async tear down */ + struct delayed_work async_teardown_work; + + /* log */ + unsigned long teardown_time; + + atomic_t queue_refs; + + bool ready; +}; + +bool fuse_uring_enabled(void); +void fuse_uring_destruct(struct fuse_conn *fc); +void fuse_uring_stop_queues(struct fuse_ring *ring); +void fuse_uring_abort_end_requests(struct fuse_ring *ring); +int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags); +void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req); +bool fuse_uring_queue_bq_req(struct fuse_req *req); + +static inline void fuse_uring_abort(struct fuse_conn *fc) +{ + struct fuse_ring *ring = fc->ring; + + if (ring == NULL) + return; + + if (atomic_read(&ring->queue_refs) > 0) { + fuse_uring_abort_end_requests(ring); + fuse_uring_stop_queues(ring); + } +} + +static inline void fuse_uring_wait_stopped_queues(struct fuse_conn *fc) +{ + struct fuse_ring *ring = fc->ring; + + if (ring) + wait_event(ring->stop_waitq, + atomic_read(&ring->queue_refs) == 0); +} + +static inline bool fuse_uring_ready(struct fuse_conn *fc) +{ + return fc->ring && fc->ring->ready; +} + +#else /* CONFIG_FUSE_IO_URING */ + +struct fuse_ring; + +static inline void fuse_uring_create(struct fuse_conn *fc) +{ +} + +static inline void fuse_uring_destruct(struct fuse_conn *fc) +{ +} + +static inline bool fuse_uring_enabled(void) +{ + return false; +} + +static inline void fuse_uring_abort(struct fuse_conn *fc) +{ +} + +static inline void fuse_uring_wait_stopped_queues(struct fuse_conn *fc) +{ +} + +static inline bool fuse_uring_ready(struct fuse_conn *fc) +{ + return false; +} + +#endif /* CONFIG_FUSE_IO_URING */ + +#endif /* _FS_FUSE_DEV_URING_I_H */ diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index e540d05549ff..198862b086ff 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -175,9 +175,12 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args, memset(outarg, 0, sizeof(struct fuse_entry_out)); args->opcode = FUSE_LOOKUP; args->nodeid = nodeid; - args->in_numargs = 1; - args->in_args[0].size = name->len + 1; - args->in_args[0].value = name->name; + args->in_numargs = 3; + fuse_set_zero_arg0(args); + args->in_args[1].size = name->len; + args->in_args[1].value = name->name; + args->in_args[2].size = 1; + args->in_args[2].value = ""; args->out_numargs = 1; args->out_args[0].size = sizeof(struct fuse_entry_out); args->out_args[0].value = outarg; @@ -192,10 +195,10 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args, * the lookup once more. If the lookup results in the same inode, * then refresh the attributes, timeouts and mark the dentry valid. */ -static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) +static int fuse_dentry_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *entry, unsigned int flags) { struct inode *inode; - struct dentry *parent; struct fuse_mount *fm; struct fuse_inode *fi; int ret; @@ -227,11 +230,9 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) attr_version = fuse_get_attr_version(fm->fc); - parent = dget_parent(entry); - fuse_lookup_init(fm->fc, &args, get_node_id(d_inode(parent)), - &entry->d_name, &outarg); + fuse_lookup_init(fm->fc, &args, get_node_id(dir), + name, &outarg); ret = fuse_simple_request(fm, &args); - dput(parent); /* Zero nodeid is same as -ENOENT */ if (!ret && !outarg.nodeid) ret = -ENOENT; @@ -265,9 +266,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) if (test_bit(FUSE_I_INIT_RDPLUS, &fi->state)) return -ECHILD; } else if (test_and_clear_bit(FUSE_I_INIT_RDPLUS, &fi->state)) { - parent = dget_parent(entry); - fuse_advise_use_readdirplus(d_inode(parent)); - dput(parent); + fuse_advise_use_readdirplus(dir); } } ret = 1; @@ -467,29 +466,29 @@ static int get_security_context(struct dentry *entry, umode_t mode, { struct fuse_secctx *fctx; struct fuse_secctx_header *header; - void *ctx = NULL, *ptr; - u32 ctxlen, total_len = sizeof(*header); + struct lsm_context lsmctx = { }; + void *ptr; + u32 total_len = sizeof(*header); int err, nr_ctx = 0; - const char *name; + const char *name = NULL; size_t namelen; err = security_dentry_init_security(entry, mode, &entry->d_name, - &name, &ctx, &ctxlen); - if (err) { - if (err != -EOPNOTSUPP) - goto out_err; - /* No LSM is supporting this security hook. Ignore error */ - ctxlen = 0; - ctx = NULL; - } + &name, &lsmctx); - if (ctxlen) { + /* If no LSM is supporting this security hook ignore error */ + if (err && err != -EOPNOTSUPP) + goto out_err; + + if (lsmctx.len) { nr_ctx = 1; namelen = strlen(name) + 1; err = -EIO; - if (WARN_ON(namelen > XATTR_NAME_MAX + 1 || ctxlen > S32_MAX)) + if (WARN_ON(namelen > XATTR_NAME_MAX + 1 || + lsmctx.len > S32_MAX)) goto out_err; - total_len += FUSE_REC_ALIGN(sizeof(*fctx) + namelen + ctxlen); + total_len += FUSE_REC_ALIGN(sizeof(*fctx) + namelen + + lsmctx.len); } err = -ENOMEM; @@ -502,19 +501,20 @@ static int get_security_context(struct dentry *entry, umode_t mode, ptr += sizeof(*header); if (nr_ctx) { fctx = ptr; - fctx->size = ctxlen; + fctx->size = lsmctx.len; ptr += sizeof(*fctx); strcpy(ptr, name); ptr += namelen; - memcpy(ptr, ctx, ctxlen); + memcpy(ptr, lsmctx.context, lsmctx.len); } ext->size = total_len; ext->value = header; err = 0; out_err: - kfree(ctx); + if (nr_ctx) + security_release_secctx(&lsmctx); return err; } @@ -928,11 +928,12 @@ static int fuse_symlink(struct mnt_idmap *idmap, struct inode *dir, FUSE_ARGS(args); args.opcode = FUSE_SYMLINK; - args.in_numargs = 2; - args.in_args[0].size = entry->d_name.len + 1; - args.in_args[0].value = entry->d_name.name; - args.in_args[1].size = len; - args.in_args[1].value = link; + args.in_numargs = 3; + fuse_set_zero_arg0(&args); + args.in_args[1].size = entry->d_name.len + 1; + args.in_args[1].value = entry->d_name.name; + args.in_args[2].size = len; + args.in_args[2].value = link; return create_new_entry(idmap, fm, &args, dir, entry, S_IFLNK); } @@ -992,9 +993,10 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry) args.opcode = FUSE_UNLINK; args.nodeid = get_node_id(dir); - args.in_numargs = 1; - args.in_args[0].size = entry->d_name.len + 1; - args.in_args[0].value = entry->d_name.name; + args.in_numargs = 2; + fuse_set_zero_arg0(&args); + args.in_args[1].size = entry->d_name.len + 1; + args.in_args[1].value = entry->d_name.name; err = fuse_simple_request(fm, &args); if (!err) { fuse_dir_changed(dir); @@ -1015,9 +1017,10 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry) args.opcode = FUSE_RMDIR; args.nodeid = get_node_id(dir); - args.in_numargs = 1; - args.in_args[0].size = entry->d_name.len + 1; - args.in_args[0].value = entry->d_name.name; + args.in_numargs = 2; + fuse_set_zero_arg0(&args); + args.in_args[1].size = entry->d_name.len + 1; + args.in_args[1].value = entry->d_name.name; err = fuse_simple_request(fm, &args); if (!err) { fuse_dir_changed(dir); diff --git a/fs/fuse/fuse_dev_i.h b/fs/fuse/fuse_dev_i.h new file mode 100644 index 000000000000..3b2bfe1248d3 --- /dev/null +++ b/fs/fuse/fuse_dev_i.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * FUSE: Filesystem in Userspace + * Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu> + */ +#ifndef _FS_FUSE_DEV_I_H +#define _FS_FUSE_DEV_I_H + +#include <linux/types.h> + +/* Ordinary requests have even IDs, while interrupts IDs are odd */ +#define FUSE_INT_REQ_BIT (1ULL << 0) +#define FUSE_REQ_ID_STEP (1ULL << 1) + +struct fuse_arg; +struct fuse_args; +struct fuse_pqueue; +struct fuse_req; +struct fuse_iqueue; +struct fuse_forget_link; + +struct fuse_copy_state { + int write; + struct fuse_req *req; + struct iov_iter *iter; + struct pipe_buffer *pipebufs; + struct pipe_buffer *currbuf; + struct pipe_inode_info *pipe; + unsigned long nr_segs; + struct page *pg; + unsigned int len; + unsigned int offset; + unsigned int move_pages:1; + unsigned int is_uring:1; + struct { + unsigned int copied_sz; /* copied size into the user buffer */ + } ring; +}; + +static inline struct fuse_dev *fuse_get_dev(struct file *file) +{ + /* + * Lockless access is OK, because file->private data is set + * once during mount and is valid until the file is released. + */ + return READ_ONCE(file->private_data); +} + +unsigned int fuse_req_hash(u64 unique); +struct fuse_req *fuse_request_find(struct fuse_pqueue *fpq, u64 unique); + +void fuse_dev_end_requests(struct list_head *head); + +void fuse_copy_init(struct fuse_copy_state *cs, int write, + struct iov_iter *iter); +int fuse_copy_args(struct fuse_copy_state *cs, unsigned int numargs, + unsigned int argpages, struct fuse_arg *args, + int zeroing); +int fuse_copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args, + unsigned int nbytes); +void fuse_dev_queue_forget(struct fuse_iqueue *fiq, + struct fuse_forget_link *forget); +void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req); + +#endif + diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 74744c6f2860..fee96fe7887b 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -310,7 +310,7 @@ struct fuse_args { bool is_ext:1; bool is_pinned:1; bool invalidate_vmap:1; - struct fuse_in_arg in_args[3]; + struct fuse_in_arg in_args[4]; struct fuse_arg out_args[2]; void (*end)(struct fuse_mount *fm, struct fuse_args *args, int error); /* Used for kvec iter backed by vmalloc address */ @@ -438,6 +438,10 @@ struct fuse_req { /** fuse_mount this request belongs to */ struct fuse_mount *fm; + +#ifdef CONFIG_FUSE_IO_URING + void *ring_entry; +#endif }; struct fuse_iqueue; @@ -863,6 +867,9 @@ struct fuse_conn { /* Use pages instead of pointer for kernel I/O */ unsigned int use_pages_for_kvec_io:1; + /* Use io_uring for communication */ + unsigned int io_uring; + /** Maximum stack depth for passthrough backing files */ int max_stack_depth; @@ -923,6 +930,11 @@ struct fuse_conn { /** IDR for backing files ids */ struct idr backing_files_map; #endif + +#ifdef CONFIG_FUSE_IO_URING + /** uring connection information*/ + struct fuse_ring *ring; +#endif }; /* @@ -947,6 +959,19 @@ struct fuse_mount { struct rcu_head rcu; }; +/* + * Empty header for FUSE opcodes without specific header needs. + * Used as a placeholder in args->in_args[0] for consistency + * across all FUSE operations, simplifying request handling. + */ +struct fuse_zero_header {}; + +static inline void fuse_set_zero_arg0(struct fuse_args *args) +{ + args->in_args[0].size = sizeof(struct fuse_zero_header); + args->in_args[0].value = NULL; +} + static inline struct fuse_mount *get_fuse_mount_super(struct super_block *sb) { return sb->s_fs_info; @@ -1220,6 +1245,11 @@ void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o); struct fuse_conn *fuse_conn_get(struct fuse_conn *fc); /** + * Initialize the fuse processing queue + */ +void fuse_pqueue_init(struct fuse_pqueue *fpq); + +/** * Initialize fuse_conn */ void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm, diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 3ce4f4e81d09..e9db2cb8c150 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -7,6 +7,7 @@ */ #include "fuse_i.h" +#include "dev_uring_i.h" #include <linux/pagemap.h> #include <linux/slab.h> @@ -937,7 +938,7 @@ static void fuse_iqueue_init(struct fuse_iqueue *fiq, fiq->priv = priv; } -static void fuse_pqueue_init(struct fuse_pqueue *fpq) +void fuse_pqueue_init(struct fuse_pqueue *fpq) { unsigned int i; @@ -992,6 +993,8 @@ static void delayed_release(struct rcu_head *p) { struct fuse_conn *fc = container_of(p, struct fuse_conn, rcu); + fuse_uring_destruct(fc); + put_user_ns(fc->user_ns); fc->release(fc); } @@ -1387,6 +1390,8 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args, else ok = false; } + if (flags & FUSE_OVER_IO_URING && fuse_uring_enabled()) + fc->io_uring = 1; } else { ra_pages = fc->max_read / PAGE_SIZE; fc->no_lock = 1; @@ -1446,6 +1451,13 @@ void fuse_send_init(struct fuse_mount *fm) if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH)) flags |= FUSE_PASSTHROUGH; + /* + * This is just an information flag for fuse server. No need to check + * the reply - server is either sending IORING_OP_URING_CMD or not. + */ + if (fuse_uring_enabled()) + flags |= FUSE_OVER_IO_URING; + ia->in.flags = flags; ia->in.flags2 = flags >> 32; diff --git a/fs/fuse/sysctl.c b/fs/fuse/sysctl.c index b272bb333005..63fb1e5bee30 100644 --- a/fs/fuse/sysctl.c +++ b/fs/fuse/sysctl.c @@ -13,7 +13,7 @@ static struct ctl_table_header *fuse_table_header; /* Bound by fuse_init_out max_pages, which is a u16 */ static unsigned int sysctl_fuse_max_pages_limit = 65535; -static struct ctl_table fuse_sysctl_table[] = { +static const struct ctl_table fuse_sysctl_table[] = { { .procname = "max_pages_limit", .data = &fuse_max_pages_limit, diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c index 9f568d345c51..93dfb06b6cea 100644 --- a/fs/fuse/xattr.c +++ b/fs/fuse/xattr.c @@ -164,9 +164,10 @@ int fuse_removexattr(struct inode *inode, const char *name) args.opcode = FUSE_REMOVEXATTR; args.nodeid = get_node_id(inode); - args.in_numargs = 1; - args.in_args[0].size = strlen(name) + 1; - args.in_args[0].value = name; + args.in_numargs = 2; + fuse_set_zero_arg0(&args); + args.in_args[1].size = strlen(name) + 1; + args.in_args[1].value = name; err = fuse_simple_request(fm, &args); if (err == -ENOSYS) { fm->fc->no_removexattr = 1; diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c index 2e215e8c3c88..95050e719233 100644 --- a/fs/gfs2/dentry.c +++ b/fs/gfs2/dentry.c @@ -21,7 +21,9 @@ /** * gfs2_drevalidate - Check directory lookup consistency - * @dentry: the mapping to check + * @dir: expected parent directory inode + * @name: expexted name + * @dentry: dentry to check * @flags: lookup flags * * Check to make sure the lookup necessary to arrive at this inode from its @@ -30,50 +32,43 @@ * Returns: 1 if the dentry is ok, 0 if it isn't */ -static int gfs2_drevalidate(struct dentry *dentry, unsigned int flags) +static int gfs2_drevalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { - struct dentry *parent; - struct gfs2_sbd *sdp; - struct gfs2_inode *dip; + struct gfs2_sbd *sdp = GFS2_SB(dir); + struct gfs2_inode *dip = GFS2_I(dir); struct inode *inode; struct gfs2_holder d_gh; struct gfs2_inode *ip = NULL; - int error, valid = 0; + int error, valid; int had_lock = 0; if (flags & LOOKUP_RCU) return -ECHILD; - parent = dget_parent(dentry); - sdp = GFS2_SB(d_inode(parent)); - dip = GFS2_I(d_inode(parent)); inode = d_inode(dentry); if (inode) { if (is_bad_inode(inode)) - goto out; + return 0; ip = GFS2_I(inode); } - if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) { - valid = 1; - goto out; - } + if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) + return 1; had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL); if (!had_lock) { error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); if (error) - goto out; + return 0; } - error = gfs2_dir_check(d_inode(parent), &dentry->d_name, ip); + error = gfs2_dir_check(dir, name, ip); valid = inode ? !error : (error == -ENOENT); if (!had_lock) gfs2_glock_dq_uninit(&d_gh); -out: - dput(parent); return valid; } diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 1e73cf87ff88..c9bb3be21d2b 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -251,6 +251,7 @@ static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask) error = filemap_fdatawait(inode->i_mapping); if (error) goto out; + truncate_inode_pages(inode->i_mapping, 0); if (new_flags & GFS2_DIF_JDATA) gfs2_ordered_del_inode(ip); } diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 8c4c1f871a88..65c07aa95718 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1201,8 +1201,8 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, if (glops->go_instantiate) gl->gl_flags |= BIT(GLF_INSTANTIATE_NEEDED); gl->gl_name = name; + lockref_init(&gl->gl_lockref); lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass); - gl->gl_lockref.count = 1; gl->gl_state = LM_ST_UNLOCKED; gl->gl_target = LM_ST_UNLOCKED; gl->gl_demote_state = LM_ST_EXCLUSIVE; diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index 04cadc02e5a6..0727f60ad028 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -51,7 +51,6 @@ static void gfs2_init_glock_once(void *foo) { struct gfs2_glock *gl = foo; - spin_lock_init(&gl->gl_lockref.lock); INIT_LIST_HEAD(&gl->gl_holders); INIT_LIST_HEAD(&gl->gl_lru); INIT_LIST_HEAD(&gl->gl_ail_list); diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 72b48f6f5561..2298e06797ac 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -236,8 +236,7 @@ static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, str return NULL; qd->qd_sbd = sdp; - qd->qd_lockref.count = 0; - spin_lock_init(&qd->qd_lockref.lock); + lockref_init(&qd->qd_lockref); qd->qd_id = qid; qd->qd_slot = -1; INIT_LIST_HEAD(&qd->qd_lru); @@ -298,7 +297,6 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid, spin_lock_bucket(hash); *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid); if (qd == NULL) { - new_qd->qd_lockref.count++; *qdp = new_qd; list_add(&new_qd->qd_list, &sdp->sd_quota_list); hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]); @@ -1451,6 +1449,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp) if (qd == NULL) goto fail_brelse; + qd->qd_lockref.count = 0; set_bit(QDF_CHANGE, &qd->qd_flags); qd->qd_change = qc_change; qd->qd_slot = slot; diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h index f462d9cb3087..988f38dc5b2c 100644 --- a/fs/gfs2/quota.h +++ b/fs/gfs2/quota.h @@ -44,8 +44,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip, int ret; ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */ - if (capable(CAP_SYS_RESOURCE) || - sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) + if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF || + capable(CAP_SYS_RESOURCE)) return 0; ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); if (ret) diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c index 76fa02e3835b..ef54fc8093cf 100644 --- a/fs/hfs/sysdep.c +++ b/fs/hfs/sysdep.c @@ -13,7 +13,8 @@ /* dentry case-handling: just lowercase everything */ -static int hfs_revalidate_dentry(struct dentry *dentry, unsigned int flags) +static int hfs_revalidate_dentry(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { struct inode *inode; int diff; diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 7e51d2cec64b..e0741e468956 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -95,32 +95,17 @@ __uml_setup("hostfs=", hostfs_args, static char *__dentry_name(struct dentry *dentry, char *name) { char *p = dentry_path_raw(dentry, name, PATH_MAX); - char *root; - size_t len; - struct hostfs_fs_info *fsi; - - fsi = dentry->d_sb->s_fs_info; - root = fsi->host_root_path; - len = strlen(root); - if (IS_ERR(p)) { - __putname(name); - return NULL; - } + struct hostfs_fs_info *fsi = dentry->d_sb->s_fs_info; + char *root = fsi->host_root_path; + size_t len = strlen(root); - /* - * This function relies on the fact that dentry_path_raw() will place - * the path name at the end of the provided buffer. - */ - BUG_ON(p + strlen(p) + 1 != name + PATH_MAX); - - strscpy(name, root, PATH_MAX); - if (len > p - name) { + if (IS_ERR(p) || len > p - name) { __putname(name); return NULL; } - if (p > name + len) - strcpy(name + len, p); + memcpy(name, root, len); + memmove(name + len, p, name + PATH_MAX - p); return name; } @@ -410,38 +395,33 @@ static const struct file_operations hostfs_dir_fops = { .fsync = hostfs_fsync, }; -static int hostfs_writepage(struct page *page, struct writeback_control *wbc) +static int hostfs_writepages(struct address_space *mapping, + struct writeback_control *wbc) { - struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; - char *buffer; - loff_t base = page_offset(page); - int count = PAGE_SIZE; - int end_index = inode->i_size >> PAGE_SHIFT; - int err; - - if (page->index >= end_index) - count = inode->i_size & (PAGE_SIZE-1); - - buffer = kmap_local_page(page); - - err = write_file(HOSTFS_I(inode)->fd, &base, buffer, count); - if (err != count) { - if (err >= 0) - err = -EIO; - mapping_set_error(mapping, err); - goto out; + struct folio *folio = NULL; + loff_t i_size = i_size_read(inode); + int err = 0; + + while ((folio = writeback_iter(mapping, wbc, folio, &err))) { + loff_t pos = folio_pos(folio); + size_t count = folio_size(folio); + char *buffer; + int ret; + + if (count > i_size - pos) + count = i_size - pos; + + buffer = kmap_local_folio(folio, 0); + ret = write_file(HOSTFS_I(inode)->fd, &pos, buffer, count); + kunmap_local(buffer); + folio_unlock(folio); + if (ret != count) { + err = ret < 0 ? ret : -EIO; + mapping_set_error(mapping, err); + } } - if (base > inode->i_size) - inode->i_size = base; - - err = 0; - - out: - kunmap_local(buffer); - unlock_page(page); - return err; } @@ -506,11 +486,12 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping, } static const struct address_space_operations hostfs_aops = { - .writepage = hostfs_writepage, + .writepages = hostfs_writepages, .read_folio = hostfs_read_folio, .dirty_folio = filemap_dirty_folio, .write_begin = hostfs_write_begin, .write_end = hostfs_write_end, + .migrate_folio = filemap_migrate_folio, }; static int hostfs_inode_update(struct inode *ino, const struct hostfs_stat *st) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index fc1ae5132127..0fc179a59830 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -99,7 +99,6 @@ static const struct fs_parameter_spec hugetlb_fs_parameters[] = { static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *inode = file_inode(file); - struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); loff_t len, vma_len; int ret; struct hstate *h = hstate_file(file); @@ -116,10 +115,6 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND); vma->vm_ops = &hugetlb_vm_ops; - ret = seal_check_write(info->seals, vma); - if (ret) - return ret; - /* * page based offset in vm_pgoff could be sufficiently large to * overflow a loff_t when converted to byte offset. This can @@ -819,7 +814,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, * folios in these areas, we need to consume the reserves * to keep reservation accounting consistent. */ - folio = alloc_hugetlb_folio(&pseudo_vma, addr, 0); + folio = alloc_hugetlb_folio(&pseudo_vma, addr, false); if (IS_ERR(folio)) { mutex_unlock(&hugetlb_fault_mutex_table[hash]); error = PTR_ERR(folio); diff --git a/fs/inode.c b/fs/inode.c index 6b4c77268fc0..5587aabdaa5e 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -184,7 +184,7 @@ static int proc_nr_inodes(const struct ctl_table *table, int write, void *buffer return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } -static struct ctl_table inodes_sysctls[] = { +static const struct ctl_table inodes_sysctls[] = { { .procname = "inode-nr", .data = &inodes_stat, diff --git a/fs/iomap/swapfile.c b/fs/iomap/swapfile.c index 5fc0ac36dee3..b90d0eda9e51 100644 --- a/fs/iomap/swapfile.c +++ b/fs/iomap/swapfile.c @@ -189,7 +189,6 @@ int iomap_swapfile_activate(struct swap_info_struct *sis, *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage; sis->max = isi.nr_pages; sis->pages = isi.nr_pages - 1; - sis->highest_bit = isi.nr_pages - 1; return isi.nr_extents; } EXPORT_SYMBOL_GPL(iomap_swapfile_activate); diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c index 34d5baa5d88a..5f3b6da0e022 100644 --- a/fs/isofs/compress.c +++ b/fs/isofs/compress.c @@ -301,7 +301,6 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount, */ static int zisofs_read_folio(struct file *file, struct folio *folio) { - struct page *page = &folio->page; struct inode *inode = file_inode(file); struct address_space *mapping = inode->i_mapping; int err; @@ -311,16 +310,15 @@ static int zisofs_read_folio(struct file *file, struct folio *folio) PAGE_SHIFT <= zisofs_block_shift ? (1 << (zisofs_block_shift - PAGE_SHIFT)) : 0; struct page **pages; - pgoff_t index = page->index, end_index; + pgoff_t index = folio->index, end_index; end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; /* - * If this page is wholly outside i_size we just return zero; + * If this folio is wholly outside i_size we just return zero; * do_generic_file_read() will handle this for us */ if (index >= end_index) { - SetPageUptodate(page); - unlock_page(page); + folio_end_read(folio, true); return 0; } @@ -338,10 +336,10 @@ static int zisofs_read_folio(struct file *file, struct folio *folio) pages = kcalloc(max_t(unsigned int, zisofs_pages_per_cblock, 1), sizeof(*pages), GFP_KERNEL); if (!pages) { - unlock_page(page); + folio_unlock(folio); return -ENOMEM; } - pages[full_page] = page; + pages[full_page] = &folio->page; for (i = 0; i < pcount; i++, index++) { if (i != full_page) diff --git a/fs/jbd2/Kconfig b/fs/jbd2/Kconfig index 4ad2c67f93f1..9c19e1512101 100644 --- a/fs/jbd2/Kconfig +++ b/fs/jbd2/Kconfig @@ -2,8 +2,6 @@ config JBD2 tristate select CRC32 - select CRYPTO - select CRYPTO_CRC32C help This is a generic journaling layer for block devices that support both 32-bit and 64-bit block numbers. It is currently used by diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 7e49d912b091..d8084b31b361 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1369,20 +1369,12 @@ static int journal_check_superblock(journal_t *journal) return err; } - /* Load the checksum driver */ if (jbd2_journal_has_csum_v2or3_feature(journal)) { if (sb->s_checksum_type != JBD2_CRC32C_CHKSUM) { printk(KERN_ERR "JBD2: Unknown checksum type\n"); return err; } - journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); - if (IS_ERR(journal->j_chksum_driver)) { - printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n"); - err = PTR_ERR(journal->j_chksum_driver); - journal->j_chksum_driver = NULL; - return err; - } /* Check superblock checksum */ if (sb->s_checksum != jbd2_superblock_csum(journal, sb)) { printk(KERN_ERR "JBD2: journal checksum error\n"); @@ -1608,8 +1600,6 @@ static journal_t *journal_init_common(struct block_device *bdev, err_cleanup: percpu_counter_destroy(&journal->j_checkpoint_jh_count); - if (journal->j_chksum_driver) - crypto_free_shash(journal->j_chksum_driver); kfree(journal->j_wbuf); jbd2_journal_destroy_revoke(journal); journal_fail_superblock(journal); @@ -2191,8 +2181,6 @@ int jbd2_journal_destroy(journal_t *journal) iput(journal->j_inode); if (journal->j_revoke) jbd2_journal_destroy_revoke(journal); - if (journal->j_chksum_driver) - crypto_free_shash(journal->j_chksum_driver); kfree(journal->j_fc_wbuf); kfree(journal->j_wbuf); kfree(journal); @@ -2337,27 +2325,15 @@ int jbd2_journal_set_features(journal_t *journal, unsigned long compat, } } - /* Load the checksum driver if necessary */ - if ((journal->j_chksum_driver == NULL) && - INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) { - journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); - if (IS_ERR(journal->j_chksum_driver)) { - printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n"); - journal->j_chksum_driver = NULL; - return 0; - } - /* Precompute checksum seed for all metadata */ - journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid, - sizeof(sb->s_uuid)); - } - lock_buffer(journal->j_sb_buffer); - /* If enabling v3 checksums, update superblock */ + /* If enabling v3 checksums, update superblock and precompute seed */ if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) { sb->s_checksum_type = JBD2_CRC32C_CHKSUM; sb->s_feature_compat &= ~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM); + journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid, + sizeof(sb->s_uuid)); } /* If enabling v1 checksums, downgrade superblock */ diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index d68a4e6ac345..fc8ede43afde 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -1576,7 +1576,8 @@ out: return result; } -static int jfs_ci_revalidate(struct dentry *dentry, unsigned int flags) +static int jfs_ci_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { /* * This is not negative dentry. Always valid. diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 458519e416fe..5f0f8b95f44c 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -1109,7 +1109,8 @@ struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent, return ERR_PTR(rc); } -static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags) +static int kernfs_dop_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { struct kernfs_node *kn; struct kernfs_root *root; diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index 8502ef68459b..0eb320617d7b 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -927,7 +927,7 @@ repeat: if (!inode) continue; - name = (struct qstr)QSTR_INIT(kn->name, strlen(kn->name)); + name = QSTR(kn->name); parent = kernfs_get_parent(kn); if (parent) { p_inode = ilookup(info->sb, kernfs_ino(parent)); diff --git a/fs/libfs.c b/fs/libfs.c index 748ac5923154..8444f5cc4064 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -245,9 +245,16 @@ const struct inode_operations simple_dir_inode_operations = { }; EXPORT_SYMBOL(simple_dir_inode_operations); -/* 0 is '.', 1 is '..', so always start with offset 2 or more */ +/* simple_offset_add() never assigns these to a dentry */ enum { - DIR_OFFSET_MIN = 2, + DIR_OFFSET_FIRST = 2, /* Find first real entry */ + DIR_OFFSET_EOD = S32_MAX, +}; + +/* simple_offset_add() allocation range */ +enum { + DIR_OFFSET_MIN = DIR_OFFSET_FIRST + 1, + DIR_OFFSET_MAX = DIR_OFFSET_EOD - 1, }; static void offset_set(struct dentry *dentry, long offset) @@ -291,9 +298,10 @@ int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry) return -EBUSY; ret = mtree_alloc_cyclic(&octx->mt, &offset, dentry, DIR_OFFSET_MIN, - LONG_MAX, &octx->next_offset, GFP_KERNEL); - if (ret < 0) - return ret; + DIR_OFFSET_MAX, &octx->next_offset, + GFP_KERNEL); + if (unlikely(ret < 0)) + return ret == -EBUSY ? -ENOSPC : ret; offset_set(dentry, offset); return 0; @@ -330,38 +338,6 @@ void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry) } /** - * simple_offset_empty - Check if a dentry can be unlinked - * @dentry: dentry to be tested - * - * Returns 0 if @dentry is a non-empty directory; otherwise returns 1. - */ -int simple_offset_empty(struct dentry *dentry) -{ - struct inode *inode = d_inode(dentry); - struct offset_ctx *octx; - struct dentry *child; - unsigned long index; - int ret = 1; - - if (!inode || !S_ISDIR(inode->i_mode)) - return ret; - - index = DIR_OFFSET_MIN; - octx = inode->i_op->get_offset_ctx(inode); - mt_for_each(&octx->mt, child, index, LONG_MAX) { - spin_lock(&child->d_lock); - if (simple_positive(child)) { - spin_unlock(&child->d_lock); - ret = 0; - break; - } - spin_unlock(&child->d_lock); - } - - return ret; -} - -/** * simple_offset_rename - handle directory offsets for rename * @old_dir: parent directory of source entry * @old_dentry: dentry of source entry @@ -454,14 +430,6 @@ void simple_offset_destroy(struct offset_ctx *octx) mtree_destroy(&octx->mt); } -static int offset_dir_open(struct inode *inode, struct file *file) -{ - struct offset_ctx *ctx = inode->i_op->get_offset_ctx(inode); - - file->private_data = (void *)ctx->next_offset; - return 0; -} - /** * offset_dir_llseek - Advance the read position of a directory descriptor * @file: an open directory whose position is to be updated @@ -475,9 +443,6 @@ static int offset_dir_open(struct inode *inode, struct file *file) */ static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence) { - struct inode *inode = file->f_inode; - struct offset_ctx *ctx = inode->i_op->get_offset_ctx(inode); - switch (whence) { case SEEK_CUR: offset += file->f_pos; @@ -490,62 +455,89 @@ static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence) return -EINVAL; } - /* In this case, ->private_data is protected by f_pos_lock */ - if (!offset) - file->private_data = (void *)ctx->next_offset; return vfs_setpos(file, offset, LONG_MAX); } -static struct dentry *offset_find_next(struct offset_ctx *octx, loff_t offset) +static struct dentry *find_positive_dentry(struct dentry *parent, + struct dentry *dentry, + bool next) { - MA_STATE(mas, &octx->mt, offset, offset); + struct dentry *found = NULL; + + spin_lock(&parent->d_lock); + if (next) + dentry = d_next_sibling(dentry); + else if (!dentry) + dentry = d_first_child(parent); + hlist_for_each_entry_from(dentry, d_sib) { + if (!simple_positive(dentry)) + continue; + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); + if (simple_positive(dentry)) + found = dget_dlock(dentry); + spin_unlock(&dentry->d_lock); + if (likely(found)) + break; + } + spin_unlock(&parent->d_lock); + return found; +} + +static noinline_for_stack struct dentry * +offset_dir_lookup(struct dentry *parent, loff_t offset) +{ + struct inode *inode = d_inode(parent); + struct offset_ctx *octx = inode->i_op->get_offset_ctx(inode); struct dentry *child, *found = NULL; - rcu_read_lock(); - child = mas_find(&mas, LONG_MAX); - if (!child) - goto out; - spin_lock(&child->d_lock); - if (simple_positive(child)) - found = dget_dlock(child); - spin_unlock(&child->d_lock); -out: - rcu_read_unlock(); + MA_STATE(mas, &octx->mt, offset, offset); + + if (offset == DIR_OFFSET_FIRST) + found = find_positive_dentry(parent, NULL, false); + else { + rcu_read_lock(); + child = mas_find(&mas, DIR_OFFSET_MAX); + found = find_positive_dentry(parent, child, false); + rcu_read_unlock(); + } return found; } static bool offset_dir_emit(struct dir_context *ctx, struct dentry *dentry) { struct inode *inode = d_inode(dentry); - long offset = dentry2offset(dentry); - return ctx->actor(ctx, dentry->d_name.name, dentry->d_name.len, offset, - inode->i_ino, fs_umode_to_dtype(inode->i_mode)); + return dir_emit(ctx, dentry->d_name.name, dentry->d_name.len, + inode->i_ino, fs_umode_to_dtype(inode->i_mode)); } -static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx, long last_index) +static void offset_iterate_dir(struct file *file, struct dir_context *ctx) { - struct offset_ctx *octx = inode->i_op->get_offset_ctx(inode); + struct dentry *dir = file->f_path.dentry; struct dentry *dentry; + dentry = offset_dir_lookup(dir, ctx->pos); + if (!dentry) + goto out_eod; while (true) { - dentry = offset_find_next(octx, ctx->pos); - if (!dentry) - return; - - if (dentry2offset(dentry) >= last_index) { - dput(dentry); - return; - } + struct dentry *next; - if (!offset_dir_emit(ctx, dentry)) { - dput(dentry); - return; - } + ctx->pos = dentry2offset(dentry); + if (!offset_dir_emit(ctx, dentry)) + break; - ctx->pos = dentry2offset(dentry) + 1; + next = find_positive_dentry(dir, dentry, true); dput(dentry); + + if (!next) + goto out_eod; + dentry = next; } + dput(dentry); + return; + +out_eod: + ctx->pos = DIR_OFFSET_EOD; } /** @@ -565,6 +557,8 @@ static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx, lon * * On return, @ctx->pos contains an offset that will read the next entry * in this directory when offset_readdir() is called again with @ctx. + * Caller places this value in the d_off field of the last entry in the + * user's buffer. * * Return values: * %0 - Complete @@ -572,19 +566,17 @@ static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx, lon static int offset_readdir(struct file *file, struct dir_context *ctx) { struct dentry *dir = file->f_path.dentry; - long last_index = (long)file->private_data; lockdep_assert_held(&d_inode(dir)->i_rwsem); if (!dir_emit_dots(file, ctx)) return 0; - - offset_iterate_dir(d_inode(dir), ctx, last_index); + if (ctx->pos != DIR_OFFSET_EOD) + offset_iterate_dir(file, ctx); return 0; } const struct file_operations simple_offset_dir_operations = { - .open = offset_dir_open, .llseek = offset_dir_llseek, .iterate_shared = offset_readdir, .read = generic_read_dir, @@ -673,6 +665,7 @@ static int pseudo_fs_fill_super(struct super_block *s, struct fs_context *fc) s->s_blocksize_bits = PAGE_SHIFT; s->s_magic = ctx->magic; s->s_op = ctx->ops ?: &simple_super_operations; + s->s_export_op = ctx->eops; s->s_xattr = ctx->xattr; s->s_time_gran = 1; root = new_inode(s); @@ -1789,7 +1782,7 @@ int generic_ci_d_compare(const struct dentry *dentry, unsigned int len, { const struct dentry *parent; const struct inode *dir; - char strbuf[DNAME_INLINE_LEN]; + union shortname_store strbuf; struct qstr qstr; /* @@ -1809,22 +1802,23 @@ int generic_ci_d_compare(const struct dentry *dentry, unsigned int len, if (!dir || !IS_CASEFOLDED(dir)) return 1; + qstr.len = len; + qstr.name = str; /* * If the dentry name is stored in-line, then it may be concurrently * modified by a rename. If this happens, the VFS will eventually retry * the lookup, so it doesn't matter what ->d_compare() returns. * However, it's unsafe to call utf8_strncasecmp() with an unstable * string. Therefore, we have to copy the name into a temporary buffer. + * As above, len is guaranteed to match str, so the shortname case + * is exactly when str points to ->d_shortname. */ - if (len <= DNAME_INLINE_LEN - 1) { - memcpy(strbuf, str, len); - strbuf[len] = 0; - str = strbuf; + if (qstr.name == dentry->d_shortname.string) { + strbuf = dentry->d_shortname; // NUL is guaranteed to be in there + qstr.name = strbuf.string; /* prevent compiler from optimizing out the temporary buffer */ barrier(); } - qstr.len = len; - qstr.name = str; return utf8_strncasecmp(dentry->d_sb->s_encoding, name, &qstr); } diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 4ec22c2f2ea3..2c8eedc6c2cc 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -70,9 +70,6 @@ static unsigned long nlm_grace_period; unsigned long nlm_timeout = LOCKD_DFLT_TIMEO; static int nlm_udpport, nlm_tcpport; -/* RLIM_NOFILE defaults to 1024. That seems like a reasonable default here. */ -static unsigned int nlm_max_connections = 1024; - /* * Constants needed for the sysctl interface. */ @@ -136,9 +133,6 @@ lockd(void *vrqstp) * NFS mount or NFS daemon has gone away. */ while (!svc_thread_should_stop(rqstp)) { - /* update sv_maxconn if it has changed */ - rqstp->rq_server->sv_maxconn = nlm_max_connections; - nlmsvc_retry_blocked(rqstp); svc_recv(rqstp); } @@ -340,7 +334,6 @@ static int lockd_get(void) return -ENOMEM; } - serv->sv_maxconn = nlm_max_connections; error = svc_set_num_threads(serv, NULL, 1); if (error < 0) { svc_destroy(&serv); @@ -419,7 +412,7 @@ EXPORT_SYMBOL_GPL(lockd_down); * Sysctl parameters (same as module parameters, different interface). */ -static struct ctl_table nlm_sysctls[] = { +static const struct ctl_table nlm_sysctls[] = { { .procname = "nlm_grace_period", .data = &nlm_grace_period, @@ -542,7 +535,6 @@ module_param_call(nlm_udpport, param_set_port, param_get_int, module_param_call(nlm_tcpport, param_set_port, param_get_int, &nlm_tcpport, 0644); module_param(nsm_use_hostnames, bool, 0644); -module_param(nlm_max_connections, uint, 0644); static int lockd_init_net(struct net *net) { diff --git a/fs/locks.c b/fs/locks.c index 25afc8d9c9d1..1619cddfa7a4 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -97,7 +97,7 @@ static int leases_enable = 1; static int lease_break_time = 45; #ifdef CONFIG_SYSCTL -static struct ctl_table locks_sysctls[] = { +static const struct ctl_table locks_sysctls[] = { { .procname = "leases-enable", .data = &leases_enable, diff --git a/fs/mount.h b/fs/mount.h index 179f690a0c72..ffb613cdfeee 100644 --- a/fs/mount.h +++ b/fs/mount.h @@ -8,15 +8,23 @@ struct mnt_namespace { struct ns_common ns; struct mount * root; - struct rb_root mounts; /* Protected by namespace_sem */ + struct { + struct rb_root mounts; /* Protected by namespace_sem */ + struct rb_node *mnt_last_node; /* last (rightmost) mount in the rbtree */ + struct rb_node *mnt_first_node; /* first (leftmost) mount in the rbtree */ + }; struct user_namespace *user_ns; struct ucounts *ucounts; u64 seq; /* Sequence number to prevent loops */ - wait_queue_head_t poll; + union { + wait_queue_head_t poll; + struct rcu_head mnt_ns_rcu; + }; u64 event; unsigned int nr_mounts; /* # of mounts in the namespace */ unsigned int pending_mounts; struct rb_node mnt_ns_tree_node; /* node in the mnt_ns_tree */ + struct list_head mnt_ns_list; /* entry in the sequential list of mounts namespace */ refcount_t passive; /* number references not pinning @mounts */ } __randomize_layout; @@ -150,22 +158,21 @@ static inline bool mnt_ns_attached(const struct mount *mnt) static inline void move_from_ns(struct mount *mnt, struct list_head *dt_list) { + struct mnt_namespace *ns = mnt->mnt_ns; WARN_ON(!mnt_ns_attached(mnt)); - rb_erase(&mnt->mnt_node, &mnt->mnt_ns->mounts); + if (ns->mnt_last_node == &mnt->mnt_node) + ns->mnt_last_node = rb_prev(&mnt->mnt_node); + if (ns->mnt_first_node == &mnt->mnt_node) + ns->mnt_first_node = rb_next(&mnt->mnt_node); + rb_erase(&mnt->mnt_node, &ns->mounts); RB_CLEAR_NODE(&mnt->mnt_node); list_add_tail(&mnt->mnt_list, dt_list); } bool has_locked_children(struct mount *mnt, struct dentry *dentry); -struct mnt_namespace *__lookup_next_mnt_ns(struct mnt_namespace *mnt_ns, bool previous); -static inline struct mnt_namespace *lookup_next_mnt_ns(struct mnt_namespace *mntns) -{ - return __lookup_next_mnt_ns(mntns, false); -} -static inline struct mnt_namespace *lookup_prev_mnt_ns(struct mnt_namespace *mntns) -{ - return __lookup_next_mnt_ns(mntns, true); -} +struct mnt_namespace *get_sequential_mnt_ns(struct mnt_namespace *mnt_ns, + bool previous); + static inline struct mnt_namespace *to_mnt_ns(struct ns_common *ns) { return container_of(ns, struct mnt_namespace, ns); diff --git a/fs/namei.c b/fs/namei.c index 9d30c7aa9aa6..3ab9440c5b93 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -921,10 +921,11 @@ out_dput: return false; } -static inline int d_revalidate(struct dentry *dentry, unsigned int flags) +static inline int d_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) - return dentry->d_op->d_revalidate(dentry, flags); + return dentry->d_op->d_revalidate(dir, name, dentry, flags); else return 1; } @@ -1099,7 +1100,7 @@ static int sysctl_protected_fifos __read_mostly; static int sysctl_protected_regular __read_mostly; #ifdef CONFIG_SYSCTL -static struct ctl_table namei_sysctls[] = { +static const struct ctl_table namei_sysctls[] = { { .procname = "protected_symlinks", .data = &sysctl_protected_symlinks, @@ -1652,7 +1653,7 @@ static struct dentry *lookup_dcache(const struct qstr *name, { struct dentry *dentry = d_lookup(dir, name); if (dentry) { - int error = d_revalidate(dentry, flags); + int error = d_revalidate(dir->d_inode, name, dentry, flags); if (unlikely(error <= 0)) { if (!error) d_invalidate(dentry); @@ -1737,19 +1738,20 @@ static struct dentry *lookup_fast(struct nameidata *nd) if (read_seqcount_retry(&parent->d_seq, nd->seq)) return ERR_PTR(-ECHILD); - status = d_revalidate(dentry, nd->flags); + status = d_revalidate(nd->inode, &nd->last, dentry, nd->flags); if (likely(status > 0)) return dentry; if (!try_to_unlazy_next(nd, dentry)) return ERR_PTR(-ECHILD); if (status == -ECHILD) /* we'd been told to redo it in non-rcu mode */ - status = d_revalidate(dentry, nd->flags); + status = d_revalidate(nd->inode, &nd->last, + dentry, nd->flags); } else { dentry = __d_lookup(parent, &nd->last); if (unlikely(!dentry)) return NULL; - status = d_revalidate(dentry, nd->flags); + status = d_revalidate(nd->inode, &nd->last, dentry, nd->flags); } if (unlikely(status <= 0)) { if (!status) @@ -1777,7 +1779,7 @@ again: if (IS_ERR(dentry)) return dentry; if (unlikely(!d_in_lookup(dentry))) { - int error = d_revalidate(dentry, flags); + int error = d_revalidate(inode, name, dentry, flags); if (unlikely(error <= 0)) { if (!error) { d_invalidate(dentry); @@ -3575,7 +3577,7 @@ static struct dentry *lookup_open(struct nameidata *nd, struct file *file, if (d_in_lookup(dentry)) break; - error = d_revalidate(dentry, nd->flags); + error = d_revalidate(dir_inode, &nd->last, dentry, nd->flags); if (likely(error > 0)) break; if (error) @@ -5272,19 +5274,16 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna getname(newname), 0); } -int readlink_copy(char __user *buffer, int buflen, const char *link) +int readlink_copy(char __user *buffer, int buflen, const char *link, int linklen) { - int len = PTR_ERR(link); - if (IS_ERR(link)) - goto out; + int copylen; - len = strlen(link); - if (len > (unsigned) buflen) - len = buflen; - if (copy_to_user(buffer, link, len)) - len = -EFAULT; -out: - return len; + copylen = linklen; + if (unlikely(copylen > (unsigned) buflen)) + copylen = buflen; + if (copy_to_user(buffer, link, copylen)) + copylen = -EFAULT; + return copylen; } /** @@ -5304,6 +5303,9 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen) const char *link; int res; + if (inode->i_opflags & IOP_CACHED_LINK) + return readlink_copy(buffer, buflen, inode->i_link, inode->i_linklen); + if (unlikely(!(inode->i_opflags & IOP_DEFAULT_READLINK))) { if (unlikely(inode->i_op->readlink)) return inode->i_op->readlink(dentry, buffer, buflen); @@ -5322,7 +5324,7 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen) if (IS_ERR(link)) return PTR_ERR(link); } - res = readlink_copy(buffer, buflen, link); + res = readlink_copy(buffer, buflen, link, strlen(link)); do_delayed_call(&done); return res; } @@ -5391,10 +5393,14 @@ EXPORT_SYMBOL(page_put_link); int page_readlink(struct dentry *dentry, char __user *buffer, int buflen) { + const char *link; + int res; + DEFINE_DELAYED_CALL(done); - int res = readlink_copy(buffer, buflen, - page_get_link(dentry, d_inode(dentry), - &done)); + link = page_get_link(dentry, d_inode(dentry), &done); + res = PTR_ERR(link); + if (!IS_ERR(link)) + res = readlink_copy(buffer, buflen, link, strlen(link)); do_delayed_call(&done); return res; } diff --git a/fs/namespace.c b/fs/namespace.c index eac057e56948..8f1000f9f3df 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -32,7 +32,7 @@ #include <linux/fs_context.h> #include <linux/shmem_fs.h> #include <linux/mnt_idmapping.h> -#include <linux/nospec.h> +#include <linux/pidfs.h> #include "pnode.h" #include "internal.h" @@ -66,12 +66,12 @@ static int __init set_mphash_entries(char *str) __setup("mphash_entries=", set_mphash_entries); static u64 event; -static DEFINE_IDA(mnt_id_ida); +static DEFINE_XARRAY_FLAGS(mnt_id_xa, XA_FLAGS_ALLOC); static DEFINE_IDA(mnt_group_ida); /* Don't allow confusion with old 32bit mount ID */ #define MNT_UNIQUE_ID_OFFSET (1ULL << 31) -static atomic64_t mnt_id_ctr = ATOMIC64_INIT(MNT_UNIQUE_ID_OFFSET); +static u64 mnt_id_ctr = MNT_UNIQUE_ID_OFFSET; static struct hlist_head *mount_hashtable __ro_after_init; static struct hlist_head *mountpoint_hashtable __ro_after_init; @@ -79,8 +79,10 @@ static struct kmem_cache *mnt_cache __ro_after_init; static DECLARE_RWSEM(namespace_sem); static HLIST_HEAD(unmounted); /* protected by namespace_sem */ static LIST_HEAD(ex_mountpoints); /* protected by namespace_sem */ -static DEFINE_RWLOCK(mnt_ns_tree_lock); +static DEFINE_SEQLOCK(mnt_ns_tree_lock); + static struct rb_root mnt_ns_tree = RB_ROOT; /* protected by mnt_ns_tree_lock */ +static LIST_HEAD(mnt_ns_list); /* protected by mnt_ns_tree_lock */ struct mount_kattr { unsigned int attr_set; @@ -106,17 +108,6 @@ EXPORT_SYMBOL_GPL(fs_kobj); */ __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); -static int mnt_ns_cmp(u64 seq, const struct mnt_namespace *ns) -{ - u64 seq_b = ns->seq; - - if (seq < seq_b) - return -1; - if (seq > seq_b) - return 1; - return 0; -} - static inline struct mnt_namespace *node_to_mnt_ns(const struct rb_node *node) { if (!node) @@ -124,25 +115,52 @@ static inline struct mnt_namespace *node_to_mnt_ns(const struct rb_node *node) return rb_entry(node, struct mnt_namespace, mnt_ns_tree_node); } -static bool mnt_ns_less(struct rb_node *a, const struct rb_node *b) +static int mnt_ns_cmp(struct rb_node *a, const struct rb_node *b) { struct mnt_namespace *ns_a = node_to_mnt_ns(a); struct mnt_namespace *ns_b = node_to_mnt_ns(b); u64 seq_a = ns_a->seq; + u64 seq_b = ns_b->seq; + + if (seq_a < seq_b) + return -1; + if (seq_a > seq_b) + return 1; + return 0; +} + +static inline void mnt_ns_tree_write_lock(void) +{ + write_seqlock(&mnt_ns_tree_lock); +} - return mnt_ns_cmp(seq_a, ns_b) < 0; +static inline void mnt_ns_tree_write_unlock(void) +{ + write_sequnlock(&mnt_ns_tree_lock); } static void mnt_ns_tree_add(struct mnt_namespace *ns) { - guard(write_lock)(&mnt_ns_tree_lock); - rb_add(&ns->mnt_ns_tree_node, &mnt_ns_tree, mnt_ns_less); + struct rb_node *node, *prev; + + mnt_ns_tree_write_lock(); + node = rb_find_add_rcu(&ns->mnt_ns_tree_node, &mnt_ns_tree, mnt_ns_cmp); + /* + * If there's no previous entry simply add it after the + * head and if there is add it after the previous entry. + */ + prev = rb_prev(&ns->mnt_ns_tree_node); + if (!prev) + list_add_rcu(&ns->mnt_ns_list, &mnt_ns_list); + else + list_add_rcu(&ns->mnt_ns_list, &node_to_mnt_ns(prev)->mnt_ns_list); + mnt_ns_tree_write_unlock(); + + WARN_ON_ONCE(node); } static void mnt_ns_release(struct mnt_namespace *ns) { - lockdep_assert_not_held(&mnt_ns_tree_lock); - /* keep alive for {list,stat}mount() */ if (refcount_dec_and_test(&ns->passive)) { put_user_ns(ns->user_ns); @@ -151,41 +169,34 @@ static void mnt_ns_release(struct mnt_namespace *ns) } DEFINE_FREE(mnt_ns_release, struct mnt_namespace *, if (_T) mnt_ns_release(_T)) +static void mnt_ns_release_rcu(struct rcu_head *rcu) +{ + mnt_ns_release(container_of(rcu, struct mnt_namespace, mnt_ns_rcu)); +} + static void mnt_ns_tree_remove(struct mnt_namespace *ns) { /* remove from global mount namespace list */ if (!is_anon_ns(ns)) { - guard(write_lock)(&mnt_ns_tree_lock); + mnt_ns_tree_write_lock(); rb_erase(&ns->mnt_ns_tree_node, &mnt_ns_tree); + list_bidir_del_rcu(&ns->mnt_ns_list); + mnt_ns_tree_write_unlock(); } - mnt_ns_release(ns); + call_rcu(&ns->mnt_ns_rcu, mnt_ns_release_rcu); } -/* - * Returns the mount namespace which either has the specified id, or has the - * next smallest id afer the specified one. - */ -static struct mnt_namespace *mnt_ns_find_id_at(u64 mnt_ns_id) +static int mnt_ns_find(const void *key, const struct rb_node *node) { - struct rb_node *node = mnt_ns_tree.rb_node; - struct mnt_namespace *ret = NULL; + const u64 mnt_ns_id = *(u64 *)key; + const struct mnt_namespace *ns = node_to_mnt_ns(node); - lockdep_assert_held(&mnt_ns_tree_lock); - - while (node) { - struct mnt_namespace *n = node_to_mnt_ns(node); - - if (mnt_ns_id <= n->seq) { - ret = node_to_mnt_ns(node); - if (mnt_ns_id == n->seq) - break; - node = node->rb_left; - } else { - node = node->rb_right; - } - } - return ret; + if (mnt_ns_id < ns->seq) + return -1; + if (mnt_ns_id > ns->seq) + return 1; + return 0; } /* @@ -195,18 +206,37 @@ static struct mnt_namespace *mnt_ns_find_id_at(u64 mnt_ns_id) * namespace the @namespace_sem must first be acquired. If the namespace has * already shut down before acquiring @namespace_sem, {list,stat}mount() will * see that the mount rbtree of the namespace is empty. + * + * Note the lookup is lockless protected by a sequence counter. We only + * need to guard against false negatives as false positives aren't + * possible. So if we didn't find a mount namespace and the sequence + * counter has changed we need to retry. If the sequence counter is + * still the same we know the search actually failed. */ static struct mnt_namespace *lookup_mnt_ns(u64 mnt_ns_id) { - struct mnt_namespace *ns; + struct mnt_namespace *ns; + struct rb_node *node; + unsigned int seq; + + guard(rcu)(); + do { + seq = read_seqbegin(&mnt_ns_tree_lock); + node = rb_find_rcu(&mnt_ns_id, &mnt_ns_tree, mnt_ns_find); + if (node) + break; + } while (read_seqretry(&mnt_ns_tree_lock, seq)); - guard(read_lock)(&mnt_ns_tree_lock); - ns = mnt_ns_find_id_at(mnt_ns_id); - if (!ns || ns->seq != mnt_ns_id) - return NULL; + if (!node) + return NULL; - refcount_inc(&ns->passive); - return ns; + /* + * The last reference count is put with RCU delay so we can + * unconditonally acquire a reference here. + */ + ns = node_to_mnt_ns(node); + refcount_inc(&ns->passive); + return ns; } static inline void lock_mount_hash(void) @@ -236,18 +266,19 @@ static inline struct hlist_head *mp_hash(struct dentry *dentry) static int mnt_alloc_id(struct mount *mnt) { - int res = ida_alloc(&mnt_id_ida, GFP_KERNEL); + int res; - if (res < 0) - return res; - mnt->mnt_id = res; - mnt->mnt_id_unique = atomic64_inc_return(&mnt_id_ctr); - return 0; + xa_lock(&mnt_id_xa); + res = __xa_alloc(&mnt_id_xa, &mnt->mnt_id, mnt, XA_LIMIT(1, INT_MAX), GFP_KERNEL); + if (!res) + mnt->mnt_id_unique = ++mnt_id_ctr; + xa_unlock(&mnt_id_xa); + return res; } static void mnt_free_id(struct mount *mnt) { - ida_free(&mnt_id_ida, mnt->mnt_id); + xa_erase(&mnt_id_xa, mnt->mnt_id); } /* @@ -1124,16 +1155,25 @@ static void mnt_add_to_ns(struct mnt_namespace *ns, struct mount *mnt) { struct rb_node **link = &ns->mounts.rb_node; struct rb_node *parent = NULL; + bool mnt_first_node = true, mnt_last_node = true; WARN_ON(mnt_ns_attached(mnt)); mnt->mnt_ns = ns; while (*link) { parent = *link; - if (mnt->mnt_id_unique < node_to_mount(parent)->mnt_id_unique) + if (mnt->mnt_id_unique < node_to_mount(parent)->mnt_id_unique) { link = &parent->rb_left; - else + mnt_last_node = false; + } else { link = &parent->rb_right; + mnt_first_node = false; + } } + + if (mnt_last_node) + ns->mnt_last_node = &mnt->mnt_node; + if (mnt_first_node) + ns->mnt_first_node = &mnt->mnt_node; rb_link_node(&mnt->mnt_node, parent, link); rb_insert_color(&mnt->mnt_node, &ns->mounts); } @@ -2069,30 +2109,34 @@ struct ns_common *from_mnt_ns(struct mnt_namespace *mnt) return &mnt->ns; } -struct mnt_namespace *__lookup_next_mnt_ns(struct mnt_namespace *mntns, bool previous) +struct mnt_namespace *get_sequential_mnt_ns(struct mnt_namespace *mntns, bool previous) { - guard(read_lock)(&mnt_ns_tree_lock); + guard(rcu)(); + for (;;) { - struct rb_node *node; + struct list_head *list; if (previous) - node = rb_prev(&mntns->mnt_ns_tree_node); + list = rcu_dereference(list_bidir_prev_rcu(&mntns->mnt_ns_list)); else - node = rb_next(&mntns->mnt_ns_tree_node); - if (!node) + list = rcu_dereference(list_next_rcu(&mntns->mnt_ns_list)); + if (list_is_head(list, &mnt_ns_list)) return ERR_PTR(-ENOENT); - mntns = node_to_mnt_ns(node); - node = &mntns->mnt_ns_tree_node; + mntns = list_entry_rcu(list, struct mnt_namespace, mnt_ns_list); + /* + * The last passive reference count is put with RCU + * delay so accessing the mount namespace is not just + * safe but all relevant members are still valid. + */ if (!ns_capable_noaudit(mntns->user_ns, CAP_SYS_ADMIN)) continue; /* - * Holding mnt_ns_tree_lock prevents the mount namespace from - * being freed but it may well be on it's deathbed. We want an - * active reference, not just a passive one here as we're - * persisting the mount namespace. + * We need an active reference count as we're persisting + * the mount namespace and it might already be on its + * deathbed. */ if (!refcount_inc_not_zero(&mntns->ns.count)) continue; @@ -2736,8 +2780,13 @@ static struct mount *__do_loopback(struct path *old_path, int recurse) if (IS_MNT_UNBINDABLE(old)) return mnt; - if (!check_mnt(old) && old_path->dentry->d_op != &ns_dentry_operations) - return mnt; + if (!check_mnt(old)) { + const struct dentry_operations *d_op = old_path->dentry->d_op; + + if (d_op != &ns_dentry_operations && + d_op != &pidfs_dentry_operations) + return mnt; + } if (!recurse && has_locked_children(old, old_path->dentry)) return mnt; @@ -3839,7 +3888,7 @@ int path_mount(const char *dev_name, struct path *path, data_page); } -long do_mount(const char *dev_name, const char __user *dir_name, +int do_mount(const char *dev_name, const char __user *dir_name, const char *type_page, unsigned long flags, void *data_page) { struct path path; @@ -3909,6 +3958,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns, bool a refcount_set(&new_ns->ns.count, 1); refcount_set(&new_ns->passive, 1); new_ns->mounts = RB_ROOT; + INIT_LIST_HEAD(&new_ns->mnt_ns_list); RB_CLEAR_NODE(&new_ns->mnt_ns_tree_node); init_waitqueue_head(&new_ns->poll); new_ns->user_ns = get_user_ns(user_ns); @@ -3988,7 +4038,6 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, while (p->mnt.mnt_root != q->mnt.mnt_root) p = next_mnt(skip_mnt_tree(p), old); } - mnt_ns_tree_add(new_ns); namespace_unlock(); if (rootmnt) @@ -3996,6 +4045,7 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, if (pwdmnt) mntput(pwdmnt); + mnt_ns_tree_add(new_ns); return new_ns; } @@ -5037,26 +5087,29 @@ static int statmount_mnt_opts(struct kstatmount *s, struct seq_file *seq) { struct vfsmount *mnt = s->mnt; struct super_block *sb = mnt->mnt_sb; + size_t start = seq->count; int err; - if (sb->s_op->show_options) { - size_t start = seq->count; + err = security_sb_show_options(seq, sb); + if (err) + return err; + if (sb->s_op->show_options) { err = sb->s_op->show_options(seq, mnt->mnt_root); if (err) return err; + } - if (unlikely(seq_has_overflowed(seq))) - return -EAGAIN; + if (unlikely(seq_has_overflowed(seq))) + return -EAGAIN; - if (seq->count == start) - return 0; + if (seq->count == start) + return 0; - /* skip leading comma */ - memmove(seq->buf + start, seq->buf + start + 1, - seq->count - start - 1); - seq->count--; - } + /* skip leading comma */ + memmove(seq->buf + start, seq->buf + start + 1, + seq->count - start - 1); + seq->count--; return 0; } @@ -5137,39 +5190,45 @@ static int statmount_string(struct kstatmount *s, u64 flag) size_t kbufsize; struct seq_file *seq = &s->seq; struct statmount *sm = &s->sm; - u32 start = seq->count; + u32 start, *offp; + + /* Reserve an empty string at the beginning for any unset offsets */ + if (!seq->count) + seq_putc(seq, 0); + + start = seq->count; switch (flag) { case STATMOUNT_FS_TYPE: - sm->fs_type = start; + offp = &sm->fs_type; ret = statmount_fs_type(s, seq); break; case STATMOUNT_MNT_ROOT: - sm->mnt_root = start; + offp = &sm->mnt_root; ret = statmount_mnt_root(s, seq); break; case STATMOUNT_MNT_POINT: - sm->mnt_point = start; + offp = &sm->mnt_point; ret = statmount_mnt_point(s, seq); break; case STATMOUNT_MNT_OPTS: - sm->mnt_opts = start; + offp = &sm->mnt_opts; ret = statmount_mnt_opts(s, seq); break; case STATMOUNT_OPT_ARRAY: - sm->opt_array = start; + offp = &sm->opt_array; ret = statmount_opt_array(s, seq); break; case STATMOUNT_OPT_SEC_ARRAY: - sm->opt_sec_array = start; + offp = &sm->opt_sec_array; ret = statmount_opt_sec_array(s, seq); break; case STATMOUNT_FS_SUBTYPE: - sm->fs_subtype = start; + offp = &sm->fs_subtype; statmount_fs_subtype(s, seq); break; case STATMOUNT_SB_SOURCE: - sm->sb_source = start; + offp = &sm->sb_source; ret = statmount_sb_source(s, seq); break; default: @@ -5197,6 +5256,7 @@ static int statmount_string(struct kstatmount *s, u64 flag) seq->buf[seq->count++] = '\0'; sm->mask |= flag; + *offp = start; return 0; } @@ -5529,9 +5589,9 @@ static ssize_t do_listmount(struct mnt_namespace *ns, u64 mnt_parent_id, if (!last_mnt_id) { if (reverse) - first = node_to_mount(rb_last(&ns->mounts)); + first = node_to_mount(ns->mnt_last_node); else - first = node_to_mount(rb_first(&ns->mounts)); + first = node_to_mount(ns->mnt_first_node); } else { if (reverse) first = mnt_find_id_at_reverse(ns, last_mnt_id - 1); @@ -5931,7 +5991,7 @@ const struct proc_ns_operations mntns_operations = { }; #ifdef CONFIG_SYSCTL -static struct ctl_table fs_namespace_sysctls[] = { +static const struct ctl_table fs_namespace_sysctls[] = { { .procname = "mount-max", .data = &sysctl_mount_max, diff --git a/fs/netfs/Makefile b/fs/netfs/Makefile index d08b0bfb6756..b43188d64bd8 100644 --- a/fs/netfs/Makefile +++ b/fs/netfs/Makefile @@ -13,8 +13,11 @@ netfs-y := \ read_collect.o \ read_pgpriv2.o \ read_retry.o \ + read_single.o \ + rolling_buffer.o \ write_collect.o \ - write_issue.o + write_issue.o \ + write_retry.o netfs-$(CONFIG_NETFS_STATS) += stats.o diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index 4dc9b8286355..f761d44b3436 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -64,37 +64,6 @@ static int netfs_begin_cache_read(struct netfs_io_request *rreq, struct netfs_in } /* - * Decant the list of folios to read into a rolling buffer. - */ -static size_t netfs_load_buffer_from_ra(struct netfs_io_request *rreq, - struct folio_queue *folioq, - struct folio_batch *put_batch) -{ - unsigned int order, nr; - size_t size = 0; - - nr = __readahead_batch(rreq->ractl, (struct page **)folioq->vec.folios, - ARRAY_SIZE(folioq->vec.folios)); - folioq->vec.nr = nr; - for (int i = 0; i < nr; i++) { - struct folio *folio = folioq_folio(folioq, i); - - trace_netfs_folio(folio, netfs_folio_trace_read); - order = folio_order(folio); - folioq->orders[i] = order; - size += PAGE_SIZE << order; - - if (!folio_batch_add(put_batch, folio)) - folio_batch_release(put_batch); - } - - for (int i = nr; i < folioq_nr_slots(folioq); i++) - folioq_clear(folioq, i); - - return size; -} - -/* * netfs_prepare_read_iterator - Prepare the subreq iterator for I/O * @subreq: The subrequest to be set up * @@ -128,19 +97,12 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq) folio_batch_init(&put_batch); while (rreq->submitted < subreq->start + rsize) { - struct folio_queue *tail = rreq->buffer_tail, *new; - size_t added; - - new = kmalloc(sizeof(*new), GFP_NOFS); - if (!new) - return -ENOMEM; - netfs_stat(&netfs_n_folioq); - folioq_init(new); - new->prev = tail; - tail->next = new; - rreq->buffer_tail = new; - added = netfs_load_buffer_from_ra(rreq, new, &put_batch); - rreq->iter.count += added; + ssize_t added; + + added = rolling_buffer_load_from_ra(&rreq->buffer, rreq->ractl, + &put_batch); + if (added < 0) + return added; rreq->submitted += added; } folio_batch_release(&put_batch); @@ -148,7 +110,7 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq) subreq->len = rsize; if (unlikely(rreq->io_streams[0].sreq_max_segs)) { - size_t limit = netfs_limit_iter(&rreq->iter, 0, rsize, + size_t limit = netfs_limit_iter(&rreq->buffer.iter, 0, rsize, rreq->io_streams[0].sreq_max_segs); if (limit < rsize) { @@ -157,20 +119,10 @@ static ssize_t netfs_prepare_read_iterator(struct netfs_io_subrequest *subreq) } } - subreq->io_iter = rreq->iter; - - if (iov_iter_is_folioq(&subreq->io_iter)) { - if (subreq->io_iter.folioq_slot >= folioq_nr_slots(subreq->io_iter.folioq)) { - subreq->io_iter.folioq = subreq->io_iter.folioq->next; - subreq->io_iter.folioq_slot = 0; - } - subreq->curr_folioq = (struct folio_queue *)subreq->io_iter.folioq; - subreq->curr_folioq_slot = subreq->io_iter.folioq_slot; - subreq->curr_folio_order = subreq->curr_folioq->orders[subreq->curr_folioq_slot]; - } + subreq->io_iter = rreq->buffer.iter; iov_iter_truncate(&subreq->io_iter, subreq->len); - iov_iter_advance(&rreq->iter, subreq->len); + rolling_buffer_advance(&rreq->buffer, subreq->len); return subreq->len; } @@ -179,25 +131,14 @@ static enum netfs_io_source netfs_cache_prepare_read(struct netfs_io_request *rr loff_t i_size) { struct netfs_cache_resources *cres = &rreq->cache_resources; + enum netfs_io_source source; if (!cres->ops) return NETFS_DOWNLOAD_FROM_SERVER; - return cres->ops->prepare_read(subreq, i_size); -} - -static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, - bool was_async) -{ - struct netfs_io_subrequest *subreq = priv; - - if (transferred_or_error < 0) { - netfs_read_subreq_terminated(subreq, transferred_or_error, was_async); - return; - } + source = cres->ops->prepare_read(subreq, i_size); + trace_netfs_sreq(subreq, netfs_sreq_trace_prepare); + return source; - if (transferred_or_error > 0) - subreq->transferred += transferred_or_error; - netfs_read_subreq_terminated(subreq, 0, was_async); } /* @@ -214,6 +155,47 @@ static void netfs_read_cache_to_pagecache(struct netfs_io_request *rreq, netfs_cache_read_terminated, subreq); } +static void netfs_issue_read(struct netfs_io_request *rreq, + struct netfs_io_subrequest *subreq) +{ + struct netfs_io_stream *stream = &rreq->io_streams[0]; + + __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags); + + /* We add to the end of the list whilst the collector may be walking + * the list. The collector only goes nextwards and uses the lock to + * remove entries off of the front. + */ + spin_lock(&rreq->lock); + list_add_tail(&subreq->rreq_link, &stream->subrequests); + if (list_is_first(&subreq->rreq_link, &stream->subrequests)) { + stream->front = subreq; + if (!stream->active) { + stream->collected_to = stream->front->start; + /* Store list pointers before active flag */ + smp_store_release(&stream->active, true); + } + } + + spin_unlock(&rreq->lock); + + switch (subreq->source) { + case NETFS_DOWNLOAD_FROM_SERVER: + rreq->netfs_ops->issue_read(subreq); + break; + case NETFS_READ_FROM_CACHE: + netfs_read_cache_to_pagecache(rreq, subreq); + break; + default: + __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); + subreq->error = 0; + iov_iter_zero(subreq->len, &subreq->io_iter); + subreq->transferred = subreq->len; + netfs_read_subreq_terminated(subreq); + break; + } +} + /* * Perform a read to the pagecache from a series of sources of different types, * slicing up the region to be read according to available cache blocks and @@ -226,11 +208,9 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq) ssize_t size = rreq->len; int ret = 0; - atomic_inc(&rreq->nr_outstanding); - do { struct netfs_io_subrequest *subreq; - enum netfs_io_source source = NETFS_DOWNLOAD_FROM_SERVER; + enum netfs_io_source source = NETFS_SOURCE_UNKNOWN; ssize_t slice; subreq = netfs_alloc_subrequest(rreq); @@ -242,20 +222,14 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq) subreq->start = start; subreq->len = size; - atomic_inc(&rreq->nr_outstanding); - spin_lock_bh(&rreq->lock); - list_add_tail(&subreq->rreq_link, &rreq->subrequests); - subreq->prev_donated = rreq->prev_donated; - rreq->prev_donated = 0; - trace_netfs_sreq(subreq, netfs_sreq_trace_added); - spin_unlock_bh(&rreq->lock); - source = netfs_cache_prepare_read(rreq, subreq, rreq->i_size); subreq->source = source; if (source == NETFS_DOWNLOAD_FROM_SERVER) { unsigned long long zp = umin(ictx->zero_point, rreq->i_size); size_t len = subreq->len; + if (unlikely(rreq->origin == NETFS_READ_SINGLE)) + zp = rreq->i_size; if (subreq->start >= zp) { subreq->source = source = NETFS_FILL_WITH_ZEROES; goto fill_with_zeroes; @@ -275,17 +249,18 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq) netfs_stat(&netfs_n_rh_download); if (rreq->netfs_ops->prepare_read) { ret = rreq->netfs_ops->prepare_read(subreq); - if (ret < 0) - goto prep_failed; + if (ret < 0) { + subreq->error = ret; + /* Not queued - release both refs. */ + netfs_put_subrequest(subreq, false, + netfs_sreq_trace_put_cancel); + netfs_put_subrequest(subreq, false, + netfs_sreq_trace_put_cancel); + break; + } trace_netfs_sreq(subreq, netfs_sreq_trace_prepare); } - - slice = netfs_prepare_read_iterator(subreq); - if (slice < 0) - goto prep_iter_failed; - - rreq->netfs_ops->issue_read(subreq); - goto done; + goto issue; } fill_with_zeroes: @@ -293,94 +268,50 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq) subreq->source = NETFS_FILL_WITH_ZEROES; trace_netfs_sreq(subreq, netfs_sreq_trace_submit); netfs_stat(&netfs_n_rh_zero); - slice = netfs_prepare_read_iterator(subreq); - if (slice < 0) - goto prep_iter_failed; - __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); - netfs_read_subreq_terminated(subreq, 0, false); - goto done; + goto issue; } if (source == NETFS_READ_FROM_CACHE) { trace_netfs_sreq(subreq, netfs_sreq_trace_submit); - slice = netfs_prepare_read_iterator(subreq); - if (slice < 0) - goto prep_iter_failed; - netfs_read_cache_to_pagecache(rreq, subreq); - goto done; + goto issue; } pr_err("Unexpected read source %u\n", source); WARN_ON_ONCE(1); break; - prep_iter_failed: - ret = slice; - prep_failed: - subreq->error = ret; - atomic_dec(&rreq->nr_outstanding); - netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel); - break; - - done: + issue: + slice = netfs_prepare_read_iterator(subreq); + if (slice < 0) { + ret = slice; + subreq->error = ret; + trace_netfs_sreq(subreq, netfs_sreq_trace_cancel); + /* Not queued - release both refs. */ + netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel); + netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel); + break; + } size -= slice; start += slice; + if (size <= 0) { + smp_wmb(); /* Write lists before ALL_QUEUED. */ + set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); + } + + netfs_issue_read(rreq, subreq); cond_resched(); } while (size > 0); - if (atomic_dec_and_test(&rreq->nr_outstanding)) - netfs_rreq_terminated(rreq, false); + if (unlikely(size > 0)) { + smp_wmb(); /* Write lists before ALL_QUEUED. */ + set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); + netfs_wake_read_collector(rreq); + } /* Defer error return as we may need to wait for outstanding I/O. */ cmpxchg(&rreq->error, 0, ret); } -/* - * Wait for the read operation to complete, successfully or otherwise. - */ -static int netfs_wait_for_read(struct netfs_io_request *rreq) -{ - int ret; - - trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip); - wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS, TASK_UNINTERRUPTIBLE); - ret = rreq->error; - if (ret == 0 && rreq->submitted < rreq->len) { - trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read); - ret = -EIO; - } - - return ret; -} - -/* - * Set up the initial folioq of buffer folios in the rolling buffer and set the - * iterator to refer to it. - */ -static int netfs_prime_buffer(struct netfs_io_request *rreq) -{ - struct folio_queue *folioq; - struct folio_batch put_batch; - size_t added; - - folioq = kmalloc(sizeof(*folioq), GFP_KERNEL); - if (!folioq) - return -ENOMEM; - netfs_stat(&netfs_n_folioq); - folioq_init(folioq); - rreq->buffer = folioq; - rreq->buffer_tail = folioq; - rreq->submitted = rreq->start; - iov_iter_folio_queue(&rreq->iter, ITER_DEST, folioq, 0, 0, 0); - - folio_batch_init(&put_batch); - added = netfs_load_buffer_from_ra(rreq, folioq, &put_batch); - folio_batch_release(&put_batch); - rreq->iter.count += added; - rreq->submitted += added; - return 0; -} - /** * netfs_readahead - Helper to manage a read request * @ractl: The description of the readahead request @@ -409,6 +340,8 @@ void netfs_readahead(struct readahead_control *ractl) if (IS_ERR(rreq)) return; + __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags); + ret = netfs_begin_cache_read(rreq, ictx); if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) goto cleanup_free; @@ -420,7 +353,8 @@ void netfs_readahead(struct readahead_control *ractl) netfs_rreq_expand(rreq, ractl); rreq->ractl = ractl; - if (netfs_prime_buffer(rreq) < 0) + rreq->submitted = rreq->start; + if (rolling_buffer_init(&rreq->buffer, rreq->debug_id, ITER_DEST) < 0) goto cleanup_free; netfs_read_to_pagecache(rreq); @@ -436,23 +370,18 @@ EXPORT_SYMBOL(netfs_readahead); /* * Create a rolling buffer with a single occupying folio. */ -static int netfs_create_singular_buffer(struct netfs_io_request *rreq, struct folio *folio) +static int netfs_create_singular_buffer(struct netfs_io_request *rreq, struct folio *folio, + unsigned int rollbuf_flags) { - struct folio_queue *folioq; + ssize_t added; - folioq = kmalloc(sizeof(*folioq), GFP_KERNEL); - if (!folioq) + if (rolling_buffer_init(&rreq->buffer, rreq->debug_id, ITER_DEST) < 0) return -ENOMEM; - netfs_stat(&netfs_n_folioq); - folioq_init(folioq); - folioq_append(folioq, folio); - BUG_ON(folioq_folio(folioq, 0) != folio); - BUG_ON(folioq_folio_order(folioq, 0) != folio_order(folio)); - rreq->buffer = folioq; - rreq->buffer_tail = folioq; - rreq->submitted = rreq->start + rreq->len; - iov_iter_folio_queue(&rreq->iter, ITER_DEST, folioq, 0, 0, rreq->len); + added = rolling_buffer_append(&rreq->buffer, folio, rollbuf_flags); + if (added < 0) + return added; + rreq->submitted = rreq->start + added; rreq->ractl = (struct readahead_control *)1UL; return 0; } @@ -520,7 +449,7 @@ static int netfs_read_gaps(struct file *file, struct folio *folio) } if (to < flen) bvec_set_folio(&bvec[i++], folio, flen - to, to); - iov_iter_bvec(&rreq->iter, ITER_DEST, bvec, i, rreq->len); + iov_iter_bvec(&rreq->buffer.iter, ITER_DEST, bvec, i, rreq->len); rreq->submitted = rreq->start + flen; netfs_read_to_pagecache(rreq); @@ -529,7 +458,7 @@ static int netfs_read_gaps(struct file *file, struct folio *folio) folio_put(sink); ret = netfs_wait_for_read(rreq); - if (ret == 0) { + if (ret >= 0) { flush_dcache_folio(folio); folio_mark_uptodate(folio); } @@ -588,7 +517,7 @@ int netfs_read_folio(struct file *file, struct folio *folio) trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage); /* Set up the output buffer */ - ret = netfs_create_singular_buffer(rreq, folio); + ret = netfs_create_singular_buffer(rreq, folio, 0); if (ret < 0) goto discard; @@ -745,7 +674,7 @@ retry: trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin); /* Set up the output buffer */ - ret = netfs_create_singular_buffer(rreq, folio); + ret = netfs_create_singular_buffer(rreq, folio, 0); if (ret < 0) goto error_put; @@ -810,15 +739,14 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio, trace_netfs_read(rreq, start, flen, netfs_read_trace_prefetch_for_write); /* Set up the output buffer */ - ret = netfs_create_singular_buffer(rreq, folio); + ret = netfs_create_singular_buffer(rreq, folio, NETFS_ROLLBUF_PAGECACHE_MARK); if (ret < 0) goto error_put; - folioq_mark2(rreq->buffer, 0); netfs_read_to_pagecache(rreq); ret = netfs_wait_for_read(rreq); netfs_put_request(rreq, false, netfs_rreq_trace_put_return); - return ret; + return ret < 0 ? ret : 0; error_put: netfs_put_request(rreq, false, netfs_rreq_trace_put_discard); diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c index b1a66a6e6bc2..0bf3c2f5a710 100644 --- a/fs/netfs/direct_read.c +++ b/fs/netfs/direct_read.c @@ -25,7 +25,7 @@ static void netfs_prepare_dio_read_iterator(struct netfs_io_subrequest *subreq) subreq->len = rsize; if (unlikely(rreq->io_streams[0].sreq_max_segs)) { - size_t limit = netfs_limit_iter(&rreq->iter, 0, rsize, + size_t limit = netfs_limit_iter(&rreq->buffer.iter, 0, rsize, rreq->io_streams[0].sreq_max_segs); if (limit < rsize) { @@ -36,9 +36,9 @@ static void netfs_prepare_dio_read_iterator(struct netfs_io_subrequest *subreq) trace_netfs_sreq(subreq, netfs_sreq_trace_prepare); - subreq->io_iter = rreq->iter; + subreq->io_iter = rreq->buffer.iter; iov_iter_truncate(&subreq->io_iter, subreq->len); - iov_iter_advance(&rreq->iter, subreq->len); + iov_iter_advance(&rreq->buffer.iter, subreq->len); } /* @@ -47,12 +47,11 @@ static void netfs_prepare_dio_read_iterator(struct netfs_io_subrequest *subreq) */ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq) { + struct netfs_io_stream *stream = &rreq->io_streams[0]; unsigned long long start = rreq->start; ssize_t size = rreq->len; int ret = 0; - atomic_set(&rreq->nr_outstanding, 1); - do { struct netfs_io_subrequest *subreq; ssize_t slice; @@ -67,19 +66,25 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq) subreq->start = start; subreq->len = size; - atomic_inc(&rreq->nr_outstanding); - spin_lock_bh(&rreq->lock); - list_add_tail(&subreq->rreq_link, &rreq->subrequests); - subreq->prev_donated = rreq->prev_donated; - rreq->prev_donated = 0; + __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags); + + spin_lock(&rreq->lock); + list_add_tail(&subreq->rreq_link, &stream->subrequests); + if (list_is_first(&subreq->rreq_link, &stream->subrequests)) { + stream->front = subreq; + if (!stream->active) { + stream->collected_to = stream->front->start; + /* Store list pointers before active flag */ + smp_store_release(&stream->active, true); + } + } trace_netfs_sreq(subreq, netfs_sreq_trace_added); - spin_unlock_bh(&rreq->lock); + spin_unlock(&rreq->lock); netfs_stat(&netfs_n_rh_download); if (rreq->netfs_ops->prepare_read) { ret = rreq->netfs_ops->prepare_read(subreq); if (ret < 0) { - atomic_dec(&rreq->nr_outstanding); netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel); break; } @@ -87,20 +92,32 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq) netfs_prepare_dio_read_iterator(subreq); slice = subreq->len; - rreq->netfs_ops->issue_read(subreq); - size -= slice; start += slice; rreq->submitted += slice; + if (size <= 0) { + smp_wmb(); /* Write lists before ALL_QUEUED. */ + set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); + } + + rreq->netfs_ops->issue_read(subreq); + if (test_bit(NETFS_RREQ_PAUSE, &rreq->flags)) + netfs_wait_for_pause(rreq); + if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) + break; if (test_bit(NETFS_RREQ_BLOCKED, &rreq->flags) && test_bit(NETFS_RREQ_NONBLOCK, &rreq->flags)) break; cond_resched(); } while (size > 0); - if (atomic_dec_and_test(&rreq->nr_outstanding)) - netfs_rreq_terminated(rreq, false); + if (unlikely(size > 0)) { + smp_wmb(); /* Write lists before ALL_QUEUED. */ + set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); + netfs_wake_read_collector(rreq); + } + return ret; } @@ -133,21 +150,10 @@ static int netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync) goto out; } - if (sync) { - trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip); - wait_on_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS, - TASK_UNINTERRUPTIBLE); - - ret = rreq->error; - if (ret == 0 && rreq->submitted < rreq->len && - rreq->origin != NETFS_DIO_READ) { - trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read); - ret = -EIO; - } - } else { + if (sync) + ret = netfs_wait_for_read(rreq); + else ret = -EIOCBQUEUED; - } - out: _leave(" = %d", ret); return ret; @@ -199,15 +205,15 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i * the request. */ if (user_backed_iter(iter)) { - ret = netfs_extract_user_iter(iter, rreq->len, &rreq->iter, 0); + ret = netfs_extract_user_iter(iter, rreq->len, &rreq->buffer.iter, 0); if (ret < 0) goto out; - rreq->direct_bv = (struct bio_vec *)rreq->iter.bvec; + rreq->direct_bv = (struct bio_vec *)rreq->buffer.iter.bvec; rreq->direct_bv_count = ret; rreq->direct_bv_unpin = iov_iter_extract_will_pin(iter); - rreq->len = iov_iter_count(&rreq->iter); + rreq->len = iov_iter_count(&rreq->buffer.iter); } else { - rreq->iter = *iter; + rreq->buffer.iter = *iter; rreq->len = orig_count; rreq->direct_bv_unpin = false; iov_iter_advance(iter, orig_count); @@ -215,8 +221,10 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i // TODO: Set up bounce buffer if needed - if (!sync) + if (!sync) { rreq->iocb = iocb; + __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags); + } ret = netfs_unbuffered_read(rreq, sync); if (ret < 0) diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c index f9421f3e6d37..42ce53cc216e 100644 --- a/fs/netfs/direct_write.c +++ b/fs/netfs/direct_write.c @@ -68,12 +68,12 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter * * request. */ if (user_backed_iter(iter)) { - n = netfs_extract_user_iter(iter, len, &wreq->iter, 0); + n = netfs_extract_user_iter(iter, len, &wreq->buffer.iter, 0); if (n < 0) { ret = n; goto out; } - wreq->direct_bv = (struct bio_vec *)wreq->iter.bvec; + wreq->direct_bv = (struct bio_vec *)wreq->buffer.iter.bvec; wreq->direct_bv_count = n; wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter); } else { @@ -82,10 +82,8 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter * * (eg. a bio_vec array) will persist till the end of * the op. */ - wreq->iter = *iter; + wreq->buffer.iter = *iter; } - - wreq->io_iter = wreq->iter; } __set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags); @@ -97,7 +95,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter * __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); if (async) wreq->iocb = iocb; - wreq->len = iov_iter_count(&wreq->io_iter); + wreq->len = iov_iter_count(&wreq->buffer.iter); wreq->cleanup = netfs_cleanup_dio_write; ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len); if (ret < 0) { diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h index c562aec3b483..eb76f98c894b 100644 --- a/fs/netfs/internal.h +++ b/fs/netfs/internal.h @@ -23,6 +23,7 @@ /* * buffered_read.c */ +void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool was_async); int netfs_prefetch_for_write(struct file *file, struct folio *folio, size_t offset, size_t len); @@ -58,11 +59,8 @@ static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {} /* * misc.c */ -struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq); -int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio, - bool needs_put); -struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq); -void netfs_clear_buffer(struct netfs_io_request *rreq); +struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq, + enum netfs_folioq_trace trace); void netfs_reset_iter(struct netfs_io_subrequest *subreq); /* @@ -84,20 +82,27 @@ static inline void netfs_see_request(struct netfs_io_request *rreq, trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what); } +static inline void netfs_see_subrequest(struct netfs_io_subrequest *subreq, + enum netfs_sreq_ref_trace what) +{ + trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, + refcount_read(&subreq->ref), what); +} + /* * read_collect.c */ -void netfs_read_termination_worker(struct work_struct *work); -void netfs_rreq_terminated(struct netfs_io_request *rreq, bool was_async); +void netfs_read_collection_worker(struct work_struct *work); +void netfs_wake_read_collector(struct netfs_io_request *rreq); +void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool was_async); +ssize_t netfs_wait_for_read(struct netfs_io_request *rreq); +void netfs_wait_for_pause(struct netfs_io_request *rreq); /* * read_pgpriv2.c */ -void netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest *subreq, - struct netfs_io_request *rreq, - struct folio_queue *folioq, - int slot); -void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq); +void netfs_pgpriv2_copy_to_cache(struct netfs_io_request *rreq, struct folio *folio); +void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq); bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq); /* @@ -113,6 +118,7 @@ void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq); extern atomic_t netfs_n_rh_dio_read; extern atomic_t netfs_n_rh_readahead; extern atomic_t netfs_n_rh_read_folio; +extern atomic_t netfs_n_rh_read_single; extern atomic_t netfs_n_rh_rreq; extern atomic_t netfs_n_rh_sreq; extern atomic_t netfs_n_rh_download; @@ -181,9 +187,9 @@ void netfs_reissue_write(struct netfs_io_stream *stream, struct iov_iter *source); void netfs_issue_write(struct netfs_io_request *wreq, struct netfs_io_stream *stream); -int netfs_advance_write(struct netfs_io_request *wreq, - struct netfs_io_stream *stream, - loff_t start, size_t len, bool to_eof); +size_t netfs_advance_write(struct netfs_io_request *wreq, + struct netfs_io_stream *stream, + loff_t start, size_t len, bool to_eof); struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len); int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, struct folio *folio, size_t copied, bool to_page_end, @@ -193,6 +199,11 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_contr int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len); /* + * write_retry.c + */ +void netfs_retry_writes(struct netfs_io_request *wreq); + +/* * Miscellaneous functions. */ static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx) diff --git a/fs/netfs/main.c b/fs/netfs/main.c index 6c7be1377ee0..4e3e62040831 100644 --- a/fs/netfs/main.c +++ b/fs/netfs/main.c @@ -37,9 +37,11 @@ static const char *netfs_origins[nr__netfs_io_origin] = { [NETFS_READAHEAD] = "RA", [NETFS_READPAGE] = "RP", [NETFS_READ_GAPS] = "RG", + [NETFS_READ_SINGLE] = "R1", [NETFS_READ_FOR_WRITE] = "RW", [NETFS_DIO_READ] = "DR", [NETFS_WRITEBACK] = "WB", + [NETFS_WRITEBACK_SINGLE] = "W1", [NETFS_WRITETHROUGH] = "WT", [NETFS_UNBUFFERED_WRITE] = "UW", [NETFS_DIO_WRITE] = "DW", @@ -69,7 +71,7 @@ static int netfs_requests_seq_show(struct seq_file *m, void *v) refcount_read(&rreq->ref), rreq->flags, rreq->error, - atomic_read(&rreq->nr_outstanding), + 0, rreq->start, rreq->submitted, rreq->len); seq_putc(m, '\n'); return 0; @@ -116,7 +118,7 @@ static int __init netfs_init(void) goto error_reqpool; netfs_subrequest_slab = kmem_cache_create("netfs_subrequest", - sizeof(struct netfs_io_subrequest), 0, + sizeof(struct netfs_io_subrequest) + 16, 0, SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL); if (!netfs_subrequest_slab) diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c index 78fe5796b2b2..7099aa07737a 100644 --- a/fs/netfs/misc.c +++ b/fs/netfs/misc.c @@ -8,113 +8,101 @@ #include <linux/swap.h> #include "internal.h" -/* - * Make sure there's space in the rolling queue. +/** + * netfs_alloc_folioq_buffer - Allocate buffer space into a folio queue + * @mapping: Address space to set on the folio (or NULL). + * @_buffer: Pointer to the folio queue to add to (may point to a NULL; updated). + * @_cur_size: Current size of the buffer (updated). + * @size: Target size of the buffer. + * @gfp: The allocation constraints. */ -struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq) +int netfs_alloc_folioq_buffer(struct address_space *mapping, + struct folio_queue **_buffer, + size_t *_cur_size, ssize_t size, gfp_t gfp) { - struct folio_queue *tail = rreq->buffer_tail, *prev; - unsigned int prev_nr_slots = 0; - - if (WARN_ON_ONCE(!rreq->buffer && tail) || - WARN_ON_ONCE(rreq->buffer && !tail)) - return ERR_PTR(-EIO); - - prev = tail; - if (prev) { - if (!folioq_full(tail)) - return tail; - prev_nr_slots = folioq_nr_slots(tail); - } - - tail = kmalloc(sizeof(*tail), GFP_NOFS); - if (!tail) - return ERR_PTR(-ENOMEM); - netfs_stat(&netfs_n_folioq); - folioq_init(tail); - tail->prev = prev; - if (prev) - /* [!] NOTE: After we set prev->next, the consumer is entirely - * at liberty to delete prev. - */ - WRITE_ONCE(prev->next, tail); - - rreq->buffer_tail = tail; - if (!rreq->buffer) { - rreq->buffer = tail; - iov_iter_folio_queue(&rreq->io_iter, ITER_SOURCE, tail, 0, 0, 0); - } else { - /* Make sure we don't leave the master iterator pointing to a - * block that might get immediately consumed. - */ - if (rreq->io_iter.folioq == prev && - rreq->io_iter.folioq_slot == prev_nr_slots) { - rreq->io_iter.folioq = tail; - rreq->io_iter.folioq_slot = 0; + struct folio_queue *tail = *_buffer, *p; + + size = round_up(size, PAGE_SIZE); + if (*_cur_size >= size) + return 0; + + if (tail) + while (tail->next) + tail = tail->next; + + do { + struct folio *folio; + int order = 0, slot; + + if (!tail || folioq_full(tail)) { + p = netfs_folioq_alloc(0, GFP_NOFS, netfs_trace_folioq_alloc_buffer); + if (!p) + return -ENOMEM; + if (tail) { + tail->next = p; + p->prev = tail; + } else { + *_buffer = p; + } + tail = p; } - } - rreq->buffer_tail_slot = 0; - return tail; -} -/* - * Append a folio to the rolling queue. - */ -int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio, - bool needs_put) -{ - struct folio_queue *tail; - unsigned int slot, order = folio_order(folio); + if (size - *_cur_size > PAGE_SIZE) + order = umin(ilog2(size - *_cur_size) - PAGE_SHIFT, + MAX_PAGECACHE_ORDER); - tail = netfs_buffer_make_space(rreq); - if (IS_ERR(tail)) - return PTR_ERR(tail); + folio = folio_alloc(gfp, order); + if (!folio && order > 0) + folio = folio_alloc(gfp, 0); + if (!folio) + return -ENOMEM; - rreq->io_iter.count += PAGE_SIZE << order; + folio->mapping = mapping; + folio->index = *_cur_size / PAGE_SIZE; + trace_netfs_folio(folio, netfs_folio_trace_alloc_buffer); + slot = folioq_append_mark(tail, folio); + *_cur_size += folioq_folio_size(tail, slot); + } while (*_cur_size < size); - slot = folioq_append(tail, folio); - /* Store the counter after setting the slot. */ - smp_store_release(&rreq->buffer_tail_slot, slot); return 0; } +EXPORT_SYMBOL(netfs_alloc_folioq_buffer); -/* - * Delete the head of a rolling queue. +/** + * netfs_free_folioq_buffer - Free a folio queue. + * @fq: The start of the folio queue to free + * + * Free up a chain of folio_queues and, if marked, the marked folios they point + * to. */ -struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq) +void netfs_free_folioq_buffer(struct folio_queue *fq) { - struct folio_queue *head = wreq->buffer, *next = head->next; - - if (next) - next->prev = NULL; - netfs_stat_d(&netfs_n_folioq); - kfree(head); - wreq->buffer = next; - return next; -} + struct folio_queue *next; + struct folio_batch fbatch; -/* - * Clear out a rolling queue. - */ -void netfs_clear_buffer(struct netfs_io_request *rreq) -{ - struct folio_queue *p; + folio_batch_init(&fbatch); + + for (; fq; fq = next) { + for (int slot = 0; slot < folioq_count(fq); slot++) { + struct folio *folio = folioq_folio(fq, slot); - while ((p = rreq->buffer)) { - rreq->buffer = p->next; - for (int slot = 0; slot < folioq_count(p); slot++) { - struct folio *folio = folioq_folio(p, slot); - if (!folio) + if (!folio || + !folioq_is_marked(fq, slot)) continue; - if (folioq_is_marked(p, slot)) { - trace_netfs_folio(folio, netfs_folio_trace_put); - folio_put(folio); - } + + trace_netfs_folio(folio, netfs_folio_trace_put); + if (folio_batch_add(&fbatch, folio)) + folio_batch_release(&fbatch); } + netfs_stat_d(&netfs_n_folioq); - kfree(p); + next = fq->next; + kfree(fq); } + + folio_batch_release(&fbatch); } +EXPORT_SYMBOL(netfs_free_folioq_buffer); /* * Reset the subrequest iterator to refer just to the region remaining to be diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c index 31e388ec6e48..dc6b41ef18b0 100644 --- a/fs/netfs/objects.c +++ b/fs/netfs/objects.c @@ -48,17 +48,20 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, spin_lock_init(&rreq->lock); INIT_LIST_HEAD(&rreq->io_streams[0].subrequests); INIT_LIST_HEAD(&rreq->io_streams[1].subrequests); - INIT_LIST_HEAD(&rreq->subrequests); + init_waitqueue_head(&rreq->waitq); refcount_set(&rreq->ref, 1); if (origin == NETFS_READAHEAD || origin == NETFS_READPAGE || origin == NETFS_READ_GAPS || + origin == NETFS_READ_SINGLE || origin == NETFS_READ_FOR_WRITE || - origin == NETFS_DIO_READ) - INIT_WORK(&rreq->work, netfs_read_termination_worker); - else + origin == NETFS_DIO_READ) { + INIT_WORK(&rreq->work, netfs_read_collection_worker); + rreq->io_streams[0].avail = true; + } else { INIT_WORK(&rreq->work, netfs_write_collection_worker); + } __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags); if (file && file->f_flags & O_NONBLOCK) @@ -92,14 +95,6 @@ void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async) struct netfs_io_stream *stream; int s; - while (!list_empty(&rreq->subrequests)) { - subreq = list_first_entry(&rreq->subrequests, - struct netfs_io_subrequest, rreq_link); - list_del(&subreq->rreq_link); - netfs_put_subrequest(subreq, was_async, - netfs_sreq_trace_put_clear); - } - for (s = 0; s < ARRAY_SIZE(rreq->io_streams); s++) { stream = &rreq->io_streams[s]; while (!list_empty(&stream->subrequests)) { @@ -143,7 +138,7 @@ static void netfs_free_request(struct work_struct *work) } kvfree(rreq->direct_bv); } - netfs_clear_buffer(rreq); + rolling_buffer_clear(&rreq->buffer); if (atomic_dec_and_test(&ictx->io_count)) wake_up_var(&ictx->io_count); diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c index e8624f5c7fcc..f65affa5a9e4 100644 --- a/fs/netfs/read_collect.c +++ b/fs/netfs/read_collect.c @@ -14,6 +14,14 @@ #include <linux/task_io_accounting_ops.h> #include "internal.h" +/* Notes made in the collector */ +#define HIT_PENDING 0x01 /* A front op was still pending */ +#define MADE_PROGRESS 0x04 /* Made progress cleaning up a stream or the folio set */ +#define BUFFERED 0x08 /* The pagecache needs cleaning up */ +#define NEED_RETRY 0x10 /* A front op requests retrying */ +#define COPY_TO_CACHE 0x40 /* Need to copy subrequest to cache */ +#define ABANDON_SREQ 0x80 /* Need to abandon untransferred part of subrequest */ + /* * Clear the unread part of an I/O request. */ @@ -31,14 +39,18 @@ static void netfs_clear_unread(struct netfs_io_subrequest *subreq) * cache the folio, we set the group to NETFS_FOLIO_COPY_TO_CACHE, mark it * dirty and let writeback handle it. */ -static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq, - struct netfs_io_request *rreq, +static void netfs_unlock_read_folio(struct netfs_io_request *rreq, struct folio_queue *folioq, int slot) { struct netfs_folio *finfo; struct folio *folio = folioq_folio(folioq, slot); + if (unlikely(folio_pos(folio) < rreq->abandon_to)) { + trace_netfs_folio(folio, netfs_folio_trace_abandon); + goto just_unlock; + } + flush_dcache_folio(folio); folio_mark_uptodate(folio); @@ -53,7 +65,7 @@ static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq, kfree(finfo); } - if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) { + if (test_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags)) { if (!WARN_ON_ONCE(folio_get_private(folio) != NULL)) { trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache); folio_attach_private(folio, NETFS_FOLIO_COPY_TO_CACHE); @@ -66,12 +78,11 @@ static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq, folioq_clear(folioq, slot); } else { // TODO: Use of PG_private_2 is deprecated. - if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) - netfs_pgpriv2_mark_copy_to_cache(subreq, rreq, folioq, slot); - else - folioq_clear(folioq, slot); + if (test_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags)) + netfs_pgpriv2_copy_to_cache(rreq, folio); } +just_unlock: if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) { if (folio->index == rreq->no_unlock_folio && test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) { @@ -81,238 +92,249 @@ static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq, folio_unlock(folio); } } + + folioq_clear(folioq, slot); } /* - * Unlock any folios that are now completely read. Returns true if the - * subrequest is removed from the list. + * Unlock any folios we've finished with. */ -static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq, bool was_async) +static void netfs_read_unlock_folios(struct netfs_io_request *rreq, + unsigned int *notes) { - struct netfs_io_subrequest *prev, *next; - struct netfs_io_request *rreq = subreq->rreq; - struct folio_queue *folioq = subreq->curr_folioq; - size_t avail, prev_donated, next_donated, fsize, part, excess; - loff_t fpos, start; - loff_t fend; - int slot = subreq->curr_folioq_slot; - - if (WARN(subreq->transferred > subreq->len, - "Subreq overread: R%x[%x] %zu > %zu", - rreq->debug_id, subreq->debug_index, - subreq->transferred, subreq->len)) - subreq->transferred = subreq->len; - -next_folio: - fsize = PAGE_SIZE << subreq->curr_folio_order; - fpos = round_down(subreq->start + subreq->consumed, fsize); - fend = fpos + fsize; - - if (WARN_ON_ONCE(!folioq) || - WARN_ON_ONCE(!folioq_folio(folioq, slot)) || - WARN_ON_ONCE(folioq_folio(folioq, slot)->index != fpos / PAGE_SIZE)) { - pr_err("R=%08x[%x] s=%llx-%llx ctl=%zx/%zx/%zx sl=%u\n", - rreq->debug_id, subreq->debug_index, - subreq->start, subreq->start + subreq->transferred - 1, - subreq->consumed, subreq->transferred, subreq->len, - slot); - if (folioq) { - struct folio *folio = folioq_folio(folioq, slot); - - pr_err("folioq: orders=%02x%02x%02x%02x\n", - folioq->orders[0], folioq->orders[1], - folioq->orders[2], folioq->orders[3]); - if (folio) - pr_err("folio: %llx-%llx ix=%llx o=%u qo=%u\n", - fpos, fend - 1, folio_pos(folio), folio_order(folio), - folioq_folio_order(folioq, slot)); - } - } + struct folio_queue *folioq = rreq->buffer.tail; + unsigned long long collected_to = rreq->collected_to; + unsigned int slot = rreq->buffer.first_tail_slot; -donation_changed: - /* Try to consume the current folio if we've hit or passed the end of - * it. There's a possibility that this subreq doesn't start at the - * beginning of the folio, in which case we need to donate to/from the - * preceding subreq. - * - * We also need to include any potential donation back from the - * following subreq. - */ - prev_donated = READ_ONCE(subreq->prev_donated); - next_donated = READ_ONCE(subreq->next_donated); - if (prev_donated || next_donated) { - spin_lock_bh(&rreq->lock); - prev_donated = subreq->prev_donated; - next_donated = subreq->next_donated; - subreq->start -= prev_donated; - subreq->len += prev_donated; - subreq->transferred += prev_donated; - prev_donated = subreq->prev_donated = 0; - if (subreq->transferred == subreq->len) { - subreq->len += next_donated; - subreq->transferred += next_donated; - next_donated = subreq->next_donated = 0; + if (rreq->cleaned_to >= rreq->collected_to) + return; + + // TODO: Begin decryption + + if (slot >= folioq_nr_slots(folioq)) { + folioq = rolling_buffer_delete_spent(&rreq->buffer); + if (!folioq) { + rreq->front_folio_order = 0; + return; } - trace_netfs_sreq(subreq, netfs_sreq_trace_add_donations); - spin_unlock_bh(&rreq->lock); + slot = 0; } - avail = subreq->transferred; - if (avail == subreq->len) - avail += next_donated; - start = subreq->start; - if (subreq->consumed == 0) { - start -= prev_donated; - avail += prev_donated; - } else { - start += subreq->consumed; - avail -= subreq->consumed; - } - part = umin(avail, fsize); - - trace_netfs_progress(subreq, start, avail, part); - - if (start + avail >= fend) { - if (fpos == start) { - /* Flush, unlock and mark for caching any folio we've just read. */ - subreq->consumed = fend - subreq->start; - netfs_unlock_read_folio(subreq, rreq, folioq, slot); - folioq_mark2(folioq, slot); - if (subreq->consumed >= subreq->len) - goto remove_subreq; - } else if (fpos < start) { - excess = fend - subreq->start; - - spin_lock_bh(&rreq->lock); - /* If we complete first on a folio split with the - * preceding subreq, donate to that subreq - otherwise - * we get the responsibility. - */ - if (subreq->prev_donated != prev_donated) { - spin_unlock_bh(&rreq->lock); - goto donation_changed; - } + for (;;) { + struct folio *folio; + unsigned long long fpos, fend; + unsigned int order; + size_t fsize; - if (list_is_first(&subreq->rreq_link, &rreq->subrequests)) { - spin_unlock_bh(&rreq->lock); - pr_err("Can't donate prior to front\n"); - goto bad; - } + if (*notes & COPY_TO_CACHE) + set_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags); - prev = list_prev_entry(subreq, rreq_link); - WRITE_ONCE(prev->next_donated, prev->next_donated + excess); - subreq->start += excess; - subreq->len -= excess; - subreq->transferred -= excess; - trace_netfs_donate(rreq, subreq, prev, excess, - netfs_trace_donate_tail_to_prev); - trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_prev); - - if (subreq->consumed >= subreq->len) - goto remove_subreq_locked; - spin_unlock_bh(&rreq->lock); - } else { - pr_err("fpos > start\n"); - goto bad; - } + folio = folioq_folio(folioq, slot); + if (WARN_ONCE(!folio_test_locked(folio), + "R=%08x: folio %lx is not locked\n", + rreq->debug_id, folio->index)) + trace_netfs_folio(folio, netfs_folio_trace_not_locked); + + order = folioq_folio_order(folioq, slot); + rreq->front_folio_order = order; + fsize = PAGE_SIZE << order; + fpos = folio_pos(folio); + fend = umin(fpos + fsize, rreq->i_size); + + trace_netfs_collect_folio(rreq, folio, fend, collected_to); - /* Advance the rolling buffer to the next folio. */ + /* Unlock any folio we've transferred all of. */ + if (collected_to < fend) + break; + + netfs_unlock_read_folio(rreq, folioq, slot); + WRITE_ONCE(rreq->cleaned_to, fpos + fsize); + *notes |= MADE_PROGRESS; + + clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags); + + /* Clean up the head folioq. If we clear an entire folioq, then + * we can get rid of it provided it's not also the tail folioq + * being filled by the issuer. + */ + folioq_clear(folioq, slot); slot++; if (slot >= folioq_nr_slots(folioq)) { + folioq = rolling_buffer_delete_spent(&rreq->buffer); + if (!folioq) + goto done; slot = 0; - folioq = folioq->next; - subreq->curr_folioq = folioq; + trace_netfs_folioq(folioq, netfs_trace_folioq_read_progress); } - subreq->curr_folioq_slot = slot; - if (folioq && folioq_folio(folioq, slot)) - subreq->curr_folio_order = folioq->orders[slot]; - if (!was_async) - cond_resched(); - goto next_folio; + + if (fpos + fsize >= collected_to) + break; } - /* Deal with partial progress. */ - if (subreq->transferred < subreq->len) - return false; + rreq->buffer.tail = folioq; +done: + rreq->buffer.first_tail_slot = slot; +} - /* Donate the remaining downloaded data to one of the neighbouring - * subrequests. Note that we may race with them doing the same thing. +/* + * Collect and assess the results of various read subrequests. We may need to + * retry some of the results. + * + * Note that we have a sequence of subrequests, which may be drawing on + * different sources and may or may not be the same size or starting position + * and may not even correspond in boundary alignment. + */ +static void netfs_collect_read_results(struct netfs_io_request *rreq) +{ + struct netfs_io_subrequest *front, *remove; + struct netfs_io_stream *stream = &rreq->io_streams[0]; + unsigned int notes; + + _enter("%llx-%llx", rreq->start, rreq->start + rreq->len); + trace_netfs_rreq(rreq, netfs_rreq_trace_collect); + trace_netfs_collect(rreq); + +reassess: + if (rreq->origin == NETFS_READAHEAD || + rreq->origin == NETFS_READPAGE || + rreq->origin == NETFS_READ_FOR_WRITE) + notes = BUFFERED; + else + notes = 0; + + /* Remove completed subrequests from the front of the stream and + * advance the completion point. We stop when we hit something that's + * in progress. The issuer thread may be adding stuff to the tail + * whilst we're doing this. */ - spin_lock_bh(&rreq->lock); + front = READ_ONCE(stream->front); + while (front) { + size_t transferred; - if (subreq->prev_donated != prev_donated || - subreq->next_donated != next_donated) { - spin_unlock_bh(&rreq->lock); - cond_resched(); - goto donation_changed; - } + trace_netfs_collect_sreq(rreq, front); + _debug("sreq [%x] %llx %zx/%zx", + front->debug_index, front->start, front->transferred, front->len); - /* Deal with the trickiest case: that this subreq is in the middle of a - * folio, not touching either edge, but finishes first. In such a - * case, we donate to the previous subreq, if there is one and if it is - * contiguous, so that the donation is only handled when that completes - * - and remove this subreq from the list. - * - * If the previous subreq finished first, we will have acquired their - * donation and should be able to unlock folios and/or donate nextwards. - */ - if (!subreq->consumed && - !prev_donated && - !list_is_first(&subreq->rreq_link, &rreq->subrequests) && - subreq->start == prev->start + prev->len) { - prev = list_prev_entry(subreq, rreq_link); - WRITE_ONCE(prev->next_donated, prev->next_donated + subreq->len); - subreq->start += subreq->len; - subreq->len = 0; - subreq->transferred = 0; - trace_netfs_donate(rreq, subreq, prev, subreq->len, - netfs_trace_donate_to_prev); - trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_prev); - goto remove_subreq_locked; + if (stream->collected_to < front->start) { + trace_netfs_collect_gap(rreq, stream, front->start, 'F'); + stream->collected_to = front->start; + } + + if (test_bit(NETFS_SREQ_IN_PROGRESS, &front->flags)) + notes |= HIT_PENDING; + smp_rmb(); /* Read counters after IN_PROGRESS flag. */ + transferred = READ_ONCE(front->transferred); + + /* If we can now collect the next folio, do so. We don't want + * to defer this as we have to decide whether we need to copy + * to the cache or not, and that may differ between adjacent + * subreqs. + */ + if (notes & BUFFERED) { + size_t fsize = PAGE_SIZE << rreq->front_folio_order; + + /* Clear the tail of a short read. */ + if (!(notes & HIT_PENDING) && + front->error == 0 && + transferred < front->len && + (test_bit(NETFS_SREQ_HIT_EOF, &front->flags) || + test_bit(NETFS_SREQ_CLEAR_TAIL, &front->flags))) { + netfs_clear_unread(front); + transferred = front->transferred = front->len; + trace_netfs_sreq(front, netfs_sreq_trace_clear); + } + + stream->collected_to = front->start + transferred; + rreq->collected_to = stream->collected_to; + + if (test_bit(NETFS_SREQ_COPY_TO_CACHE, &front->flags)) + notes |= COPY_TO_CACHE; + + if (test_bit(NETFS_SREQ_FAILED, &front->flags)) { + rreq->abandon_to = front->start + front->len; + front->transferred = front->len; + transferred = front->len; + trace_netfs_rreq(rreq, netfs_rreq_trace_set_abandon); + } + if (front->start + transferred >= rreq->cleaned_to + fsize || + test_bit(NETFS_SREQ_HIT_EOF, &front->flags)) + netfs_read_unlock_folios(rreq, ¬es); + } else { + stream->collected_to = front->start + transferred; + rreq->collected_to = stream->collected_to; + } + + /* Stall if the front is still undergoing I/O. */ + if (notes & HIT_PENDING) + break; + + if (test_bit(NETFS_SREQ_FAILED, &front->flags)) { + if (!stream->failed) { + stream->error = front->error; + rreq->error = front->error; + set_bit(NETFS_RREQ_FAILED, &rreq->flags); + stream->failed = true; + } + notes |= MADE_PROGRESS | ABANDON_SREQ; + } else if (test_bit(NETFS_SREQ_NEED_RETRY, &front->flags)) { + stream->need_retry = true; + notes |= NEED_RETRY | MADE_PROGRESS; + break; + } else { + if (!stream->failed) + stream->transferred = stream->collected_to - rreq->start; + notes |= MADE_PROGRESS; + } + + /* Remove if completely consumed. */ + stream->source = front->source; + spin_lock(&rreq->lock); + + remove = front; + trace_netfs_sreq(front, netfs_sreq_trace_discard); + list_del_init(&front->rreq_link); + front = list_first_entry_or_null(&stream->subrequests, + struct netfs_io_subrequest, rreq_link); + stream->front = front; + spin_unlock(&rreq->lock); + netfs_put_subrequest(remove, false, + notes & ABANDON_SREQ ? + netfs_sreq_trace_put_abandon : + netfs_sreq_trace_put_done); } - /* If we can't donate down the chain, donate up the chain instead. */ - excess = subreq->len - subreq->consumed + next_donated; + trace_netfs_collect_stream(rreq, stream); + trace_netfs_collect_state(rreq, rreq->collected_to, notes); - if (!subreq->consumed) - excess += prev_donated; + if (!(notes & BUFFERED)) + rreq->cleaned_to = rreq->collected_to; - if (list_is_last(&subreq->rreq_link, &rreq->subrequests)) { - rreq->prev_donated = excess; - trace_netfs_donate(rreq, subreq, NULL, excess, - netfs_trace_donate_to_deferred_next); - } else { - next = list_next_entry(subreq, rreq_link); - WRITE_ONCE(next->prev_donated, excess); - trace_netfs_donate(rreq, subreq, next, excess, - netfs_trace_donate_to_next); + if (notes & NEED_RETRY) + goto need_retry; + if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &rreq->flags)) { + trace_netfs_rreq(rreq, netfs_rreq_trace_unpause); + clear_bit_unlock(NETFS_RREQ_PAUSE, &rreq->flags); + smp_mb__after_atomic(); /* Set PAUSE before task state */ + wake_up(&rreq->waitq); + } + + if (notes & MADE_PROGRESS) { + //cond_resched(); + goto reassess; } - trace_netfs_sreq(subreq, netfs_sreq_trace_donate_to_next); - subreq->len = subreq->consumed; - subreq->transferred = subreq->consumed; - goto remove_subreq_locked; - -remove_subreq: - spin_lock_bh(&rreq->lock); -remove_subreq_locked: - subreq->consumed = subreq->len; - list_del(&subreq->rreq_link); - spin_unlock_bh(&rreq->lock); - netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_consumed); - return true; - -bad: - /* Errr... prev and next both donated to us, but insufficient to finish - * the folio. + +out: + _leave(" = %x", notes); + return; + +need_retry: + /* Okay... We're going to have to retry parts of the stream. Note + * that any partially completed op will have had any wholly transferred + * folios removed from it. */ - printk("R=%08x[%x] s=%llx-%llx %zx/%zx/%zx\n", - rreq->debug_id, subreq->debug_index, - subreq->start, subreq->start + subreq->transferred - 1, - subreq->consumed, subreq->transferred, subreq->len); - printk("folio: %llx-%llx\n", fpos, fend - 1); - printk("donated: prev=%zx next=%zx\n", prev_donated, next_donated); - printk("s=%llx av=%zx part=%zx\n", start, avail, part); - BUG(); + _debug("retry"); + netfs_retry_reads(rreq); + goto out; } /* @@ -321,12 +343,13 @@ bad: static void netfs_rreq_assess_dio(struct netfs_io_request *rreq) { struct netfs_io_subrequest *subreq; + struct netfs_io_stream *stream = &rreq->io_streams[0]; unsigned int i; /* Collect unbuffered reads and direct reads, adding up the transfer * sizes until we find the first short or failed subrequest. */ - list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { + list_for_each_entry(subreq, &stream->subrequests, rreq_link) { rreq->transferred += subreq->transferred; if (subreq->transferred < subreq->len || @@ -359,25 +382,67 @@ static void netfs_rreq_assess_dio(struct netfs_io_request *rreq) } /* - * Assess the state of a read request and decide what to do next. + * Do processing after reading a monolithic single object. + */ +static void netfs_rreq_assess_single(struct netfs_io_request *rreq) +{ + struct netfs_io_stream *stream = &rreq->io_streams[0]; + + if (!rreq->error && stream->source == NETFS_DOWNLOAD_FROM_SERVER && + fscache_resources_valid(&rreq->cache_resources)) { + trace_netfs_rreq(rreq, netfs_rreq_trace_dirty); + netfs_single_mark_inode_dirty(rreq->inode); + } + + if (rreq->iocb) { + rreq->iocb->ki_pos += rreq->transferred; + if (rreq->iocb->ki_complete) + rreq->iocb->ki_complete( + rreq->iocb, rreq->error ? rreq->error : rreq->transferred); + } + if (rreq->netfs_ops->done) + rreq->netfs_ops->done(rreq); +} + +/* + * Perform the collection of subrequests and folios. * * Note that we're in normal kernel thread context at this point, possibly * running on a workqueue. */ -static void netfs_rreq_assess(struct netfs_io_request *rreq) +static void netfs_read_collection(struct netfs_io_request *rreq) { - trace_netfs_rreq(rreq, netfs_rreq_trace_assess); + struct netfs_io_stream *stream = &rreq->io_streams[0]; - //netfs_rreq_is_still_valid(rreq); + netfs_collect_read_results(rreq); - if (test_and_clear_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags)) { - netfs_retry_reads(rreq); + /* We're done when the app thread has finished posting subreqs and the + * queue is empty. + */ + if (!test_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags)) + return; + smp_rmb(); /* Read ALL_QUEUED before subreq lists. */ + + if (!list_empty(&stream->subrequests)) return; - } - if (rreq->origin == NETFS_DIO_READ || - rreq->origin == NETFS_READ_GAPS) + /* Okay, declare that all I/O is complete. */ + rreq->transferred = stream->transferred; + trace_netfs_rreq(rreq, netfs_rreq_trace_complete); + + //netfs_rreq_is_still_valid(rreq); + + switch (rreq->origin) { + case NETFS_DIO_READ: + case NETFS_READ_GAPS: netfs_rreq_assess_dio(rreq); + break; + case NETFS_READ_SINGLE: + netfs_rreq_assess_single(rreq); + break; + default: + break; + } task_io_account_read(rreq->transferred); trace_netfs_rreq(rreq, netfs_rreq_trace_wake_ip); @@ -386,61 +451,66 @@ static void netfs_rreq_assess(struct netfs_io_request *rreq) trace_netfs_rreq(rreq, netfs_rreq_trace_done); netfs_clear_subrequests(rreq, false); netfs_unlock_abandoned_read_pages(rreq); - if (unlikely(test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags))) - netfs_pgpriv2_write_to_the_cache(rreq); + if (unlikely(rreq->copy_to_cache)) + netfs_pgpriv2_end_copy_to_cache(rreq); } -void netfs_read_termination_worker(struct work_struct *work) +void netfs_read_collection_worker(struct work_struct *work) { - struct netfs_io_request *rreq = - container_of(work, struct netfs_io_request, work); + struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work); + netfs_see_request(rreq, netfs_rreq_trace_see_work); - netfs_rreq_assess(rreq); - netfs_put_request(rreq, false, netfs_rreq_trace_put_work_complete); + if (test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)) + netfs_read_collection(rreq); + netfs_put_request(rreq, false, netfs_rreq_trace_put_work); } /* - * Handle the completion of all outstanding I/O operations on a read request. - * We inherit a ref from the caller. + * Wake the collection work item. */ -void netfs_rreq_terminated(struct netfs_io_request *rreq, bool was_async) +void netfs_wake_read_collector(struct netfs_io_request *rreq) { - if (!was_async) - return netfs_rreq_assess(rreq); - if (!work_pending(&rreq->work)) { - netfs_get_request(rreq, netfs_rreq_trace_get_work); - if (!queue_work(system_unbound_wq, &rreq->work)) - netfs_put_request(rreq, was_async, netfs_rreq_trace_put_work_nq); + if (test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags)) { + if (!work_pending(&rreq->work)) { + netfs_get_request(rreq, netfs_rreq_trace_get_work); + if (!queue_work(system_unbound_wq, &rreq->work)) + netfs_put_request(rreq, true, netfs_rreq_trace_put_work_nq); + } + } else { + trace_netfs_rreq(rreq, netfs_rreq_trace_wake_queue); + wake_up(&rreq->waitq); } } /** * netfs_read_subreq_progress - Note progress of a read operation. * @subreq: The read request that has terminated. - * @was_async: True if we're in an asynchronous context. * * This tells the read side of netfs lib that a contributory I/O operation has * made some progress and that it may be possible to unlock some folios. * * Before calling, the filesystem should update subreq->transferred to track * the amount of data copied into the output buffer. - * - * If @was_async is true, the caller might be running in softirq or interrupt - * context and we can't sleep. */ -void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq, - bool was_async) +void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq) { struct netfs_io_request *rreq = subreq->rreq; + struct netfs_io_stream *stream = &rreq->io_streams[0]; + size_t fsize = PAGE_SIZE << rreq->front_folio_order; trace_netfs_sreq(subreq, netfs_sreq_trace_progress); - if (subreq->transferred > subreq->consumed && + /* If we are at the head of the queue, wake up the collector, + * getting a ref to it if we were the ones to do so. + */ + if (subreq->start + subreq->transferred > rreq->cleaned_to + fsize && (rreq->origin == NETFS_READAHEAD || rreq->origin == NETFS_READPAGE || - rreq->origin == NETFS_READ_FOR_WRITE)) { - netfs_consume_read_data(subreq, was_async); + rreq->origin == NETFS_READ_FOR_WRITE) && + list_is_first(&subreq->rreq_link, &stream->subrequests) + ) { __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags); + netfs_wake_read_collector(rreq); } } EXPORT_SYMBOL(netfs_read_subreq_progress); @@ -448,27 +518,23 @@ EXPORT_SYMBOL(netfs_read_subreq_progress); /** * netfs_read_subreq_terminated - Note the termination of an I/O operation. * @subreq: The I/O request that has terminated. - * @error: Error code indicating type of completion. - * @was_async: The termination was asynchronous * * This tells the read helper that a contributory I/O operation has terminated, * one way or another, and that it should integrate the results. * - * The caller indicates the outcome of the operation through @error, supplying - * 0 to indicate a successful or retryable transfer (if NETFS_SREQ_NEED_RETRY - * is set) or a negative error code. The helper will look after reissuing I/O - * operations as appropriate and writing downloaded data to the cache. + * The caller indicates the outcome of the operation through @subreq->error, + * supplying 0 to indicate a successful or retryable transfer (if + * NETFS_SREQ_NEED_RETRY is set) or a negative error code. The helper will + * look after reissuing I/O operations as appropriate and writing downloaded + * data to the cache. * * Before calling, the filesystem should update subreq->transferred to track * the amount of data copied into the output buffer. - * - * If @was_async is true, the caller might be running in softirq or interrupt - * context and we can't sleep. */ -void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq, - int error, bool was_async) +void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq) { struct netfs_io_request *rreq = subreq->rreq; + struct netfs_io_stream *stream = &rreq->io_streams[0]; switch (subreq->source) { case NETFS_READ_FROM_CACHE: @@ -481,71 +547,156 @@ void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq, break; } - if (rreq->origin != NETFS_DIO_READ) { - /* Collect buffered reads. - * - * If the read completed validly short, then we can clear the - * tail before going on to unlock the folios. - */ - if (error == 0 && subreq->transferred < subreq->len && - (test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags) || - test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags))) { - netfs_clear_unread(subreq); - subreq->transferred = subreq->len; - trace_netfs_sreq(subreq, netfs_sreq_trace_clear); - } - if (subreq->transferred > subreq->consumed && - (rreq->origin == NETFS_READAHEAD || - rreq->origin == NETFS_READPAGE || - rreq->origin == NETFS_READ_FOR_WRITE)) { - netfs_consume_read_data(subreq, was_async); - __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags); - } - rreq->transferred += subreq->transferred; - } - /* Deal with retry requests, short reads and errors. If we retry * but don't make progress, we abandon the attempt. */ - if (!error && subreq->transferred < subreq->len) { + if (!subreq->error && subreq->transferred < subreq->len) { if (test_bit(NETFS_SREQ_HIT_EOF, &subreq->flags)) { trace_netfs_sreq(subreq, netfs_sreq_trace_hit_eof); + } else if (test_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags)) { + trace_netfs_sreq(subreq, netfs_sreq_trace_need_clear); + } else if (test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) { + trace_netfs_sreq(subreq, netfs_sreq_trace_need_retry); + } else if (test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags)) { + __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); + trace_netfs_sreq(subreq, netfs_sreq_trace_partial_read); } else { + __set_bit(NETFS_SREQ_FAILED, &subreq->flags); + subreq->error = -ENODATA; trace_netfs_sreq(subreq, netfs_sreq_trace_short); - if (subreq->transferred > subreq->consumed) { - /* If we didn't read new data, abandon retry. */ - if (subreq->retry_count && - test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags)) { - __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); - set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags); - } - } else if (test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags)) { - __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); - set_bit(NETFS_RREQ_NEED_RETRY, &rreq->flags); - } else { - __set_bit(NETFS_SREQ_FAILED, &subreq->flags); - error = -ENODATA; - } } } - subreq->error = error; - trace_netfs_sreq(subreq, netfs_sreq_trace_terminated); - - if (unlikely(error < 0)) { - trace_netfs_failure(rreq, subreq, error, netfs_fail_read); + if (unlikely(subreq->error < 0)) { + trace_netfs_failure(rreq, subreq, subreq->error, netfs_fail_read); if (subreq->source == NETFS_READ_FROM_CACHE) { netfs_stat(&netfs_n_rh_read_failed); + __set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); } else { netfs_stat(&netfs_n_rh_download_failed); - set_bit(NETFS_RREQ_FAILED, &rreq->flags); - rreq->error = subreq->error; + __set_bit(NETFS_SREQ_FAILED, &subreq->flags); } + trace_netfs_rreq(rreq, netfs_rreq_trace_set_pause); + set_bit(NETFS_RREQ_PAUSE, &rreq->flags); } - if (atomic_dec_and_test(&rreq->nr_outstanding)) - netfs_rreq_terminated(rreq, was_async); + trace_netfs_sreq(subreq, netfs_sreq_trace_terminated); + + clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags); + smp_mb__after_atomic(); /* Clear IN_PROGRESS before task state */ + + /* If we are at the head of the queue, wake up the collector. */ + if (list_is_first(&subreq->rreq_link, &stream->subrequests)) + netfs_wake_read_collector(rreq); - netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated); + netfs_put_subrequest(subreq, true, netfs_sreq_trace_put_terminated); } EXPORT_SYMBOL(netfs_read_subreq_terminated); + +/* + * Handle termination of a read from the cache. + */ +void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error, bool was_async) +{ + struct netfs_io_subrequest *subreq = priv; + + if (transferred_or_error > 0) { + subreq->error = 0; + if (transferred_or_error > 0) { + subreq->transferred += transferred_or_error; + __set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags); + } + } else { + subreq->error = transferred_or_error; + } + netfs_read_subreq_terminated(subreq); +} + +/* + * Wait for the read operation to complete, successfully or otherwise. + */ +ssize_t netfs_wait_for_read(struct netfs_io_request *rreq) +{ + struct netfs_io_subrequest *subreq; + struct netfs_io_stream *stream = &rreq->io_streams[0]; + DEFINE_WAIT(myself); + ssize_t ret; + + for (;;) { + trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue); + prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE); + + subreq = list_first_entry_or_null(&stream->subrequests, + struct netfs_io_subrequest, rreq_link); + if (subreq && + (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags) || + test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) { + __set_current_state(TASK_RUNNING); + netfs_read_collection(rreq); + continue; + } + + if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags)) + break; + + schedule(); + trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue); + } + + finish_wait(&rreq->waitq, &myself); + + ret = rreq->error; + if (ret == 0) { + ret = rreq->transferred; + switch (rreq->origin) { + case NETFS_DIO_READ: + case NETFS_READ_SINGLE: + ret = rreq->transferred; + break; + default: + if (rreq->submitted < rreq->len) { + trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read); + ret = -EIO; + } + break; + } + } + + return ret; +} + +/* + * Wait for a paused read operation to unpause or complete in some manner. + */ +void netfs_wait_for_pause(struct netfs_io_request *rreq) +{ + struct netfs_io_subrequest *subreq; + struct netfs_io_stream *stream = &rreq->io_streams[0]; + DEFINE_WAIT(myself); + + trace_netfs_rreq(rreq, netfs_rreq_trace_wait_pause); + + for (;;) { + trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue); + prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE); + + subreq = list_first_entry_or_null(&stream->subrequests, + struct netfs_io_subrequest, rreq_link); + if (subreq && + (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags) || + test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) { + __set_current_state(TASK_RUNNING); + netfs_read_collection(rreq); + continue; + } + + if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags) || + !test_bit(NETFS_RREQ_PAUSE, &rreq->flags)) + break; + + schedule(); + trace_netfs_rreq(rreq, netfs_rreq_trace_woke_queue); + } + + finish_wait(&rreq->waitq, &myself); +} diff --git a/fs/netfs/read_pgpriv2.c b/fs/netfs/read_pgpriv2.c index 54d5004fec18..cf7727060215 100644 --- a/fs/netfs/read_pgpriv2.c +++ b/fs/netfs/read_pgpriv2.c @@ -14,52 +14,11 @@ #include "internal.h" /* - * [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2. The - * third mark in the folio queue is used to indicate that this folio needs - * writing. - */ -void netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest *subreq, - struct netfs_io_request *rreq, - struct folio_queue *folioq, - int slot) -{ - struct folio *folio = folioq_folio(folioq, slot); - - trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache); - folio_start_private_2(folio); - folioq_mark3(folioq, slot); -} - -/* - * [DEPRECATED] Cancel PG_private_2 on all marked folios in the event of an - * unrecoverable error. - */ -static void netfs_pgpriv2_cancel(struct folio_queue *folioq) -{ - struct folio *folio; - int slot; - - while (folioq) { - if (!folioq->marks3) { - folioq = folioq->next; - continue; - } - - slot = __ffs(folioq->marks3); - folio = folioq_folio(folioq, slot); - - trace_netfs_folio(folio, netfs_folio_trace_cancel_copy); - folio_end_private_2(folio); - folioq_unmark3(folioq, slot); - } -} - -/* * [DEPRECATED] Copy a folio to the cache with PG_private_2 set. */ -static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio *folio) +static void netfs_pgpriv2_copy_folio(struct netfs_io_request *creq, struct folio *folio) { - struct netfs_io_stream *cache = &wreq->io_streams[1]; + struct netfs_io_stream *cache = &creq->io_streams[1]; size_t fsize = folio_size(folio), flen = fsize; loff_t fpos = folio_pos(folio), i_size; bool to_eof = false; @@ -70,17 +29,17 @@ static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio * of the page to beyond it, but cannot move i_size into or through the * page since we have it locked. */ - i_size = i_size_read(wreq->inode); + i_size = i_size_read(creq->inode); if (fpos >= i_size) { /* mmap beyond eof. */ _debug("beyond eof"); folio_end_private_2(folio); - return 0; + return; } - if (fpos + fsize > wreq->i_size) - wreq->i_size = i_size; + if (fpos + fsize > creq->i_size) + creq->i_size = i_size; if (flen > i_size - fpos) { flen = i_size - fpos; @@ -94,8 +53,10 @@ static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio trace_netfs_folio(folio, netfs_folio_trace_store_copy); /* Attach the folio to the rolling buffer. */ - if (netfs_buffer_append_folio(wreq, folio, false) < 0) - return -ENOMEM; + if (rolling_buffer_append(&creq->buffer, folio, 0) < 0) { + clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &creq->flags); + return; + } cache->submit_extendable_to = fsize; cache->submit_off = 0; @@ -109,11 +70,11 @@ static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio do { ssize_t part; - wreq->io_iter.iov_offset = cache->submit_off; + creq->buffer.iter.iov_offset = cache->submit_off; - atomic64_set(&wreq->issued_to, fpos + cache->submit_off); + atomic64_set(&creq->issued_to, fpos + cache->submit_off); cache->submit_extendable_to = fsize - cache->submit_off; - part = netfs_advance_write(wreq, cache, fpos + cache->submit_off, + part = netfs_advance_write(creq, cache, fpos + cache->submit_off, cache->submit_len, to_eof); cache->submit_off += part; if (part > cache->submit_len) @@ -122,98 +83,95 @@ static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio cache->submit_len -= part; } while (cache->submit_len > 0); - wreq->io_iter.iov_offset = 0; - iov_iter_advance(&wreq->io_iter, fsize); - atomic64_set(&wreq->issued_to, fpos + fsize); + creq->buffer.iter.iov_offset = 0; + rolling_buffer_advance(&creq->buffer, fsize); + atomic64_set(&creq->issued_to, fpos + fsize); if (flen < fsize) - netfs_issue_write(wreq, cache); - - _leave(" = 0"); - return 0; + netfs_issue_write(creq, cache); } /* - * [DEPRECATED] Go through the buffer and write any folios that are marked with - * the third mark to the cache. + * [DEPRECATED] Set up copying to the cache. */ -void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq) +static struct netfs_io_request *netfs_pgpriv2_begin_copy_to_cache( + struct netfs_io_request *rreq, struct folio *folio) { - struct netfs_io_request *wreq; - struct folio_queue *folioq; - struct folio *folio; - int error = 0; - int slot = 0; - - _enter(""); + struct netfs_io_request *creq; if (!fscache_resources_valid(&rreq->cache_resources)) - goto couldnt_start; + goto cancel; - /* Need the first folio to be able to set up the op. */ - for (folioq = rreq->buffer; folioq; folioq = folioq->next) { - if (folioq->marks3) { - slot = __ffs(folioq->marks3); - break; - } - } - if (!folioq) - return; - folio = folioq_folio(folioq, slot); - - wreq = netfs_create_write_req(rreq->mapping, NULL, folio_pos(folio), + creq = netfs_create_write_req(rreq->mapping, NULL, folio_pos(folio), NETFS_PGPRIV2_COPY_TO_CACHE); - if (IS_ERR(wreq)) { - kleave(" [create %ld]", PTR_ERR(wreq)); - goto couldnt_start; - } + if (IS_ERR(creq)) + goto cancel; - trace_netfs_write(wreq, netfs_write_trace_copy_to_cache); + if (!creq->io_streams[1].avail) + goto cancel_put; + + trace_netfs_write(creq, netfs_write_trace_copy_to_cache); netfs_stat(&netfs_n_wh_copy_to_cache); - if (!wreq->io_streams[1].avail) { - netfs_put_request(wreq, false, netfs_rreq_trace_put_return); - goto couldnt_start; - } + rreq->copy_to_cache = creq; + return creq; + +cancel_put: + netfs_put_request(creq, false, netfs_rreq_trace_put_return); +cancel: + rreq->copy_to_cache = ERR_PTR(-ENOBUFS); + clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags); + return ERR_PTR(-ENOBUFS); +} - for (;;) { - error = netfs_pgpriv2_copy_folio(wreq, folio); - if (error < 0) - break; +/* + * [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2 and add + * it to the copy write request. + */ +void netfs_pgpriv2_copy_to_cache(struct netfs_io_request *rreq, struct folio *folio) +{ + struct netfs_io_request *creq = rreq->copy_to_cache; - folioq_unmark3(folioq, slot); - if (!folioq->marks3) { - folioq = folioq->next; - if (!folioq) - break; - } + if (!creq) + creq = netfs_pgpriv2_begin_copy_to_cache(rreq, folio); + if (IS_ERR(creq)) + return; - slot = __ffs(folioq->marks3); - folio = folioq_folio(folioq, slot); - } + trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache); + folio_start_private_2(folio); + netfs_pgpriv2_copy_folio(creq, folio); +} - netfs_issue_write(wreq, &wreq->io_streams[1]); +/* + * [DEPRECATED] End writing to the cache, flushing out any outstanding writes. + */ +void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq) +{ + struct netfs_io_request *creq = rreq->copy_to_cache; + + if (IS_ERR_OR_NULL(creq)) + return; + + netfs_issue_write(creq, &creq->io_streams[1]); smp_wmb(); /* Write lists before ALL_QUEUED. */ - set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); + set_bit(NETFS_RREQ_ALL_QUEUED, &creq->flags); - netfs_put_request(wreq, false, netfs_rreq_trace_put_return); - _leave(" = %d", error); -couldnt_start: - netfs_pgpriv2_cancel(rreq->buffer); + netfs_put_request(creq, false, netfs_rreq_trace_put_return); + creq->copy_to_cache = NULL; } /* * [DEPRECATED] Remove the PG_private_2 mark from any folios we've finished * copying. */ -bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq) +bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *creq) { - struct folio_queue *folioq = wreq->buffer; - unsigned long long collected_to = wreq->collected_to; - unsigned int slot = wreq->buffer_head_slot; + struct folio_queue *folioq = creq->buffer.tail; + unsigned long long collected_to = creq->collected_to; + unsigned int slot = creq->buffer.first_tail_slot; bool made_progress = false; if (slot >= folioq_nr_slots(folioq)) { - folioq = netfs_delete_buffer_head(wreq); + folioq = rolling_buffer_delete_spent(&creq->buffer); slot = 0; } @@ -225,16 +183,16 @@ bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq) folio = folioq_folio(folioq, slot); if (WARN_ONCE(!folio_test_private_2(folio), "R=%08x: folio %lx is not marked private_2\n", - wreq->debug_id, folio->index)) + creq->debug_id, folio->index)) trace_netfs_folio(folio, netfs_folio_trace_not_under_wback); fpos = folio_pos(folio); fsize = folio_size(folio); flen = fsize; - fend = min_t(unsigned long long, fpos + flen, wreq->i_size); + fend = min_t(unsigned long long, fpos + flen, creq->i_size); - trace_netfs_collect_folio(wreq, folio, fend, collected_to); + trace_netfs_collect_folio(creq, folio, fend, collected_to); /* Unlock any folio we've transferred all of. */ if (collected_to < fend) @@ -242,7 +200,7 @@ bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq) trace_netfs_folio(folio, netfs_folio_trace_end_copy); folio_end_private_2(folio); - wreq->cleaned_to = fpos + fsize; + creq->cleaned_to = fpos + fsize; made_progress = true; /* Clean up the head folioq. If we clear an entire folioq, then @@ -252,9 +210,9 @@ bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq) folioq_clear(folioq, slot); slot++; if (slot >= folioq_nr_slots(folioq)) { - if (READ_ONCE(wreq->buffer_tail) == folioq) - break; - folioq = netfs_delete_buffer_head(wreq); + folioq = rolling_buffer_delete_spent(&creq->buffer); + if (!folioq) + goto done; slot = 0; } @@ -262,7 +220,8 @@ bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq) break; } - wreq->buffer = folioq; - wreq->buffer_head_slot = slot; + creq->buffer.tail = folioq; +done: + creq->buffer.first_tail_slot = slot; return made_progress; } diff --git a/fs/netfs/read_retry.c b/fs/netfs/read_retry.c index 16b676c68dcd..2290af0d51ac 100644 --- a/fs/netfs/read_retry.c +++ b/fs/netfs/read_retry.c @@ -12,15 +12,7 @@ static void netfs_reissue_read(struct netfs_io_request *rreq, struct netfs_io_subrequest *subreq) { - struct iov_iter *io_iter = &subreq->io_iter; - - if (iov_iter_is_folioq(io_iter)) { - subreq->curr_folioq = (struct folio_queue *)io_iter->folioq; - subreq->curr_folioq_slot = io_iter->folioq_slot; - subreq->curr_folio_order = subreq->curr_folioq->orders[subreq->curr_folioq_slot]; - } - - atomic_inc(&rreq->nr_outstanding); + __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags); __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags); netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); subreq->rreq->netfs_ops->issue_read(subreq); @@ -33,13 +25,12 @@ static void netfs_reissue_read(struct netfs_io_request *rreq, static void netfs_retry_read_subrequests(struct netfs_io_request *rreq) { struct netfs_io_subrequest *subreq; - struct netfs_io_stream *stream0 = &rreq->io_streams[0]; - LIST_HEAD(sublist); - LIST_HEAD(queue); + struct netfs_io_stream *stream = &rreq->io_streams[0]; + struct list_head *next; _enter("R=%x", rreq->debug_id); - if (list_empty(&rreq->subrequests)) + if (list_empty(&stream->subrequests)) return; if (rreq->netfs_ops->retry_request) @@ -50,9 +41,7 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq) */ if (!rreq->netfs_ops->prepare_read && !rreq->cache_resources.ops) { - struct netfs_io_subrequest *subreq; - - list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { + list_for_each_entry(subreq, &stream->subrequests, rreq_link) { if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) break; if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) { @@ -75,48 +64,44 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq) * populating with smaller subrequests. In the event that the subreq * we just launched finishes before we insert the next subreq, it'll * fill in rreq->prev_donated instead. - + * * Note: Alternatively, we could split the tail subrequest right before * we reissue it and fix up the donations under lock. */ - list_splice_init(&rreq->subrequests, &queue); + next = stream->subrequests.next; do { - struct netfs_io_subrequest *from; + struct netfs_io_subrequest *from, *to, *tmp; struct iov_iter source; unsigned long long start, len; - size_t part, deferred_next_donated = 0; + size_t part; bool boundary = false; /* Go through the subreqs and find the next span of contiguous * buffer that we then rejig (cifs, for example, needs the * rsize renegotiating) and reissue. */ - from = list_first_entry(&queue, struct netfs_io_subrequest, rreq_link); - list_move_tail(&from->rreq_link, &sublist); + from = list_entry(next, struct netfs_io_subrequest, rreq_link); + to = from; start = from->start + from->transferred; len = from->len - from->transferred; - _debug("from R=%08x[%x] s=%llx ctl=%zx/%zx/%zx", + _debug("from R=%08x[%x] s=%llx ctl=%zx/%zx", rreq->debug_id, from->debug_index, - from->start, from->consumed, from->transferred, from->len); + from->start, from->transferred, from->len); if (test_bit(NETFS_SREQ_FAILED, &from->flags) || !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags)) goto abandon; - deferred_next_donated = from->next_donated; - while ((subreq = list_first_entry_or_null( - &queue, struct netfs_io_subrequest, rreq_link))) { - if (subreq->start != start + len || - subreq->transferred > 0 || + list_for_each_continue(next, &stream->subrequests) { + subreq = list_entry(next, struct netfs_io_subrequest, rreq_link); + if (subreq->start + subreq->transferred != start + len || + test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) || !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) break; - list_move_tail(&subreq->rreq_link, &sublist); - len += subreq->len; - deferred_next_donated = subreq->next_donated; - if (test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags)) - break; + to = subreq; + len += to->len; } _debug(" - range: %llx-%llx %llx", start, start + len - 1, len); @@ -129,38 +114,31 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq) source.count = len; /* Work through the sublist. */ - while ((subreq = list_first_entry_or_null( - &sublist, struct netfs_io_subrequest, rreq_link))) { - list_del(&subreq->rreq_link); - + subreq = from; + list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) { + if (!len) + break; subreq->source = NETFS_DOWNLOAD_FROM_SERVER; subreq->start = start - subreq->transferred; subreq->len = len + subreq->transferred; - stream0->sreq_max_len = subreq->len; - __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags); subreq->retry_count++; - spin_lock_bh(&rreq->lock); - list_add_tail(&subreq->rreq_link, &rreq->subrequests); - subreq->prev_donated += rreq->prev_donated; - rreq->prev_donated = 0; trace_netfs_sreq(subreq, netfs_sreq_trace_retry); - spin_unlock_bh(&rreq->lock); - - BUG_ON(!len); /* Renegotiate max_len (rsize) */ + stream->sreq_max_len = subreq->len; if (rreq->netfs_ops->prepare_read && rreq->netfs_ops->prepare_read(subreq) < 0) { trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed); __set_bit(NETFS_SREQ_FAILED, &subreq->flags); + goto abandon; } - part = umin(len, stream0->sreq_max_len); - if (unlikely(rreq->io_streams[0].sreq_max_segs)) - part = netfs_limit_iter(&source, 0, part, stream0->sreq_max_segs); + part = umin(len, stream->sreq_max_len); + if (unlikely(stream->sreq_max_segs)) + part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs); subreq->len = subreq->transferred + part; subreq->io_iter = source; iov_iter_truncate(&subreq->io_iter, part); @@ -170,57 +148,105 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq) if (!len) { if (boundary) __set_bit(NETFS_SREQ_BOUNDARY, &subreq->flags); - subreq->next_donated = deferred_next_donated; } else { __clear_bit(NETFS_SREQ_BOUNDARY, &subreq->flags); - subreq->next_donated = 0; } + netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); netfs_reissue_read(rreq, subreq); - if (!len) + if (subreq == to) break; - - /* If we ran out of subrequests, allocate another. */ - if (list_empty(&sublist)) { - subreq = netfs_alloc_subrequest(rreq); - if (!subreq) - goto abandon; - subreq->source = NETFS_DOWNLOAD_FROM_SERVER; - subreq->start = start; - - /* We get two refs, but need just one. */ - netfs_put_subrequest(subreq, false, netfs_sreq_trace_new); - trace_netfs_sreq(subreq, netfs_sreq_trace_split); - list_add_tail(&subreq->rreq_link, &sublist); - } } /* If we managed to use fewer subreqs, we can discard the - * excess. + * excess; if we used the same number, then we're done. */ - while ((subreq = list_first_entry_or_null( - &sublist, struct netfs_io_subrequest, rreq_link))) { - trace_netfs_sreq(subreq, netfs_sreq_trace_discard); - list_del(&subreq->rreq_link); - netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done); + if (!len) { + if (subreq == to) + continue; + list_for_each_entry_safe_from(subreq, tmp, + &stream->subrequests, rreq_link) { + trace_netfs_sreq(subreq, netfs_sreq_trace_discard); + list_del(&subreq->rreq_link); + netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done); + if (subreq == to) + break; + } + continue; } - } while (!list_empty(&queue)); + /* We ran out of subrequests, so we need to allocate some more + * and insert them after. + */ + do { + subreq = netfs_alloc_subrequest(rreq); + if (!subreq) { + subreq = to; + goto abandon_after; + } + subreq->source = NETFS_DOWNLOAD_FROM_SERVER; + subreq->start = start; + subreq->len = len; + subreq->debug_index = atomic_inc_return(&rreq->subreq_counter); + subreq->stream_nr = stream->stream_nr; + subreq->retry_count = 1; + + trace_netfs_sreq_ref(rreq->debug_id, subreq->debug_index, + refcount_read(&subreq->ref), + netfs_sreq_trace_new); + netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); + + list_add(&subreq->rreq_link, &to->rreq_link); + to = list_next_entry(to, rreq_link); + trace_netfs_sreq(subreq, netfs_sreq_trace_retry); + + stream->sreq_max_len = umin(len, rreq->rsize); + stream->sreq_max_segs = 0; + if (unlikely(stream->sreq_max_segs)) + part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs); + + netfs_stat(&netfs_n_rh_download); + if (rreq->netfs_ops->prepare_read(subreq) < 0) { + trace_netfs_sreq(subreq, netfs_sreq_trace_reprep_failed); + __set_bit(NETFS_SREQ_FAILED, &subreq->flags); + goto abandon; + } + + part = umin(len, stream->sreq_max_len); + subreq->len = subreq->transferred + part; + subreq->io_iter = source; + iov_iter_truncate(&subreq->io_iter, part); + iov_iter_advance(&source, part); + + len -= part; + start += part; + if (!len && boundary) { + __set_bit(NETFS_SREQ_BOUNDARY, &to->flags); + boundary = false; + } + + netfs_reissue_read(rreq, subreq); + } while (len); + + } while (!list_is_head(next, &stream->subrequests)); return; - /* If we hit ENOMEM, fail all remaining subrequests */ + /* If we hit an error, fail all remaining incomplete subrequests */ +abandon_after: + if (list_is_last(&subreq->rreq_link, &stream->subrequests)) + return; + subreq = list_next_entry(subreq, rreq_link); abandon: - list_splice_init(&sublist, &queue); - list_for_each_entry(subreq, &queue, rreq_link) { - if (!subreq->error) - subreq->error = -ENOMEM; - __clear_bit(NETFS_SREQ_FAILED, &subreq->flags); + list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) { + if (!subreq->error && + !test_bit(NETFS_SREQ_FAILED, &subreq->flags) && + !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) + continue; + subreq->error = -ENOMEM; + __set_bit(NETFS_SREQ_FAILED, &subreq->flags); __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); } - spin_lock_bh(&rreq->lock); - list_splice_tail_init(&queue, &rreq->subrequests); - spin_unlock_bh(&rreq->lock); } /* @@ -228,14 +254,19 @@ abandon: */ void netfs_retry_reads(struct netfs_io_request *rreq) { - trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit); + struct netfs_io_subrequest *subreq; + struct netfs_io_stream *stream = &rreq->io_streams[0]; - atomic_inc(&rreq->nr_outstanding); + /* Wait for all outstanding I/O to quiesce before performing retries as + * we may need to renegotiate the I/O sizes. + */ + list_for_each_entry(subreq, &stream->subrequests, rreq_link) { + wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS, + TASK_UNINTERRUPTIBLE); + } + trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit); netfs_retry_read_subrequests(rreq); - - if (atomic_dec_and_test(&rreq->nr_outstanding)) - netfs_rreq_terminated(rreq, false); } /* @@ -246,7 +277,7 @@ void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq) { struct folio_queue *p; - for (p = rreq->buffer; p; p = p->next) { + for (p = rreq->buffer.tail; p; p = p->next) { for (int slot = 0; slot < folioq_count(p); slot++) { struct folio *folio = folioq_folio(p, slot); diff --git a/fs/netfs/read_single.c b/fs/netfs/read_single.c new file mode 100644 index 000000000000..fea0ecdecc53 --- /dev/null +++ b/fs/netfs/read_single.c @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Single, monolithic object support (e.g. AFS directory). + * + * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#include <linux/export.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/pagemap.h> +#include <linux/slab.h> +#include <linux/uio.h> +#include <linux/sched/mm.h> +#include <linux/task_io_accounting_ops.h> +#include <linux/netfs.h> +#include "internal.h" + +/** + * netfs_single_mark_inode_dirty - Mark a single, monolithic object inode dirty + * @inode: The inode to mark + * + * Mark an inode that contains a single, monolithic object as dirty so that its + * writepages op will get called. If set, the SINGLE_NO_UPLOAD flag indicates + * that the object will only be written to the cache and not uploaded (e.g. AFS + * directory contents). + */ +void netfs_single_mark_inode_dirty(struct inode *inode) +{ + struct netfs_inode *ictx = netfs_inode(inode); + bool cache_only = test_bit(NETFS_ICTX_SINGLE_NO_UPLOAD, &ictx->flags); + bool caching = fscache_cookie_enabled(netfs_i_cookie(netfs_inode(inode))); + + if (cache_only && !caching) + return; + + mark_inode_dirty(inode); + + if (caching && !(inode->i_state & I_PINNING_NETFS_WB)) { + bool need_use = false; + + spin_lock(&inode->i_lock); + if (!(inode->i_state & I_PINNING_NETFS_WB)) { + inode->i_state |= I_PINNING_NETFS_WB; + need_use = true; + } + spin_unlock(&inode->i_lock); + + if (need_use) + fscache_use_cookie(netfs_i_cookie(ictx), true); + } + +} +EXPORT_SYMBOL(netfs_single_mark_inode_dirty); + +static int netfs_single_begin_cache_read(struct netfs_io_request *rreq, struct netfs_inode *ctx) +{ + return fscache_begin_read_operation(&rreq->cache_resources, netfs_i_cookie(ctx)); +} + +static void netfs_single_cache_prepare_read(struct netfs_io_request *rreq, + struct netfs_io_subrequest *subreq) +{ + struct netfs_cache_resources *cres = &rreq->cache_resources; + + if (!cres->ops) { + subreq->source = NETFS_DOWNLOAD_FROM_SERVER; + return; + } + subreq->source = cres->ops->prepare_read(subreq, rreq->i_size); + trace_netfs_sreq(subreq, netfs_sreq_trace_prepare); + +} + +static void netfs_single_read_cache(struct netfs_io_request *rreq, + struct netfs_io_subrequest *subreq) +{ + struct netfs_cache_resources *cres = &rreq->cache_resources; + + _enter("R=%08x[%x]", rreq->debug_id, subreq->debug_index); + netfs_stat(&netfs_n_rh_read); + cres->ops->read(cres, subreq->start, &subreq->io_iter, NETFS_READ_HOLE_FAIL, + netfs_cache_read_terminated, subreq); +} + +/* + * Perform a read to a buffer from the cache or the server. Only a single + * subreq is permitted as the object must be fetched in a single transaction. + */ +static int netfs_single_dispatch_read(struct netfs_io_request *rreq) +{ + struct netfs_io_stream *stream = &rreq->io_streams[0]; + struct netfs_io_subrequest *subreq; + int ret = 0; + + subreq = netfs_alloc_subrequest(rreq); + if (!subreq) + return -ENOMEM; + + subreq->source = NETFS_SOURCE_UNKNOWN; + subreq->start = 0; + subreq->len = rreq->len; + subreq->io_iter = rreq->buffer.iter; + + __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags); + + spin_lock(&rreq->lock); + list_add_tail(&subreq->rreq_link, &stream->subrequests); + trace_netfs_sreq(subreq, netfs_sreq_trace_added); + stream->front = subreq; + /* Store list pointers before active flag */ + smp_store_release(&stream->active, true); + spin_unlock(&rreq->lock); + + netfs_single_cache_prepare_read(rreq, subreq); + switch (subreq->source) { + case NETFS_DOWNLOAD_FROM_SERVER: + netfs_stat(&netfs_n_rh_download); + if (rreq->netfs_ops->prepare_read) { + ret = rreq->netfs_ops->prepare_read(subreq); + if (ret < 0) + goto cancel; + } + + rreq->netfs_ops->issue_read(subreq); + rreq->submitted += subreq->len; + break; + case NETFS_READ_FROM_CACHE: + trace_netfs_sreq(subreq, netfs_sreq_trace_submit); + netfs_single_read_cache(rreq, subreq); + rreq->submitted += subreq->len; + ret = 0; + break; + default: + pr_warn("Unexpected single-read source %u\n", subreq->source); + WARN_ON_ONCE(true); + ret = -EIO; + break; + } + + smp_wmb(); /* Write lists before ALL_QUEUED. */ + set_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags); + return ret; +cancel: + netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_cancel); + return ret; +} + +/** + * netfs_read_single - Synchronously read a single blob of pages. + * @inode: The inode to read from. + * @file: The file we're using to read or NULL. + * @iter: The buffer we're reading into. + * + * Fulfil a read request for a single monolithic object by drawing data from + * the cache if possible, or the netfs if not. The buffer may be larger than + * the file content; unused beyond the EOF will be zero-filled. The content + * will be read with a single I/O request (though this may be retried). + * + * The calling netfs must initialise a netfs context contiguous to the vfs + * inode before calling this. + * + * This is usable whether or not caching is enabled. If caching is enabled, + * the data will be stored as a single object into the cache. + */ +ssize_t netfs_read_single(struct inode *inode, struct file *file, struct iov_iter *iter) +{ + struct netfs_io_request *rreq; + struct netfs_inode *ictx = netfs_inode(inode); + ssize_t ret; + + rreq = netfs_alloc_request(inode->i_mapping, file, 0, iov_iter_count(iter), + NETFS_READ_SINGLE); + if (IS_ERR(rreq)) + return PTR_ERR(rreq); + + ret = netfs_single_begin_cache_read(rreq, ictx); + if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) + goto cleanup_free; + + netfs_stat(&netfs_n_rh_read_single); + trace_netfs_read(rreq, 0, rreq->len, netfs_read_trace_read_single); + + rreq->buffer.iter = *iter; + netfs_single_dispatch_read(rreq); + + ret = netfs_wait_for_read(rreq); + netfs_put_request(rreq, true, netfs_rreq_trace_put_return); + return ret; + +cleanup_free: + netfs_put_request(rreq, false, netfs_rreq_trace_put_failed); + return ret; +} +EXPORT_SYMBOL(netfs_read_single); diff --git a/fs/netfs/rolling_buffer.c b/fs/netfs/rolling_buffer.c new file mode 100644 index 000000000000..75d97af14b4a --- /dev/null +++ b/fs/netfs/rolling_buffer.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Rolling buffer helpers + * + * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#include <linux/bitops.h> +#include <linux/pagemap.h> +#include <linux/rolling_buffer.h> +#include <linux/slab.h> +#include "internal.h" + +static atomic_t debug_ids; + +/** + * netfs_folioq_alloc - Allocate a folio_queue struct + * @rreq_id: Associated debugging ID for tracing purposes + * @gfp: Allocation constraints + * @trace: Trace tag to indicate the purpose of the allocation + * + * Allocate, initialise and account the folio_queue struct and log a trace line + * to mark the allocation. + */ +struct folio_queue *netfs_folioq_alloc(unsigned int rreq_id, gfp_t gfp, + unsigned int /*enum netfs_folioq_trace*/ trace) +{ + struct folio_queue *fq; + + fq = kmalloc(sizeof(*fq), gfp); + if (fq) { + netfs_stat(&netfs_n_folioq); + folioq_init(fq, rreq_id); + fq->debug_id = atomic_inc_return(&debug_ids); + trace_netfs_folioq(fq, trace); + } + return fq; +} +EXPORT_SYMBOL(netfs_folioq_alloc); + +/** + * netfs_folioq_free - Free a folio_queue struct + * @folioq: The object to free + * @trace: Trace tag to indicate which free + * + * Free and unaccount the folio_queue struct. + */ +void netfs_folioq_free(struct folio_queue *folioq, + unsigned int /*enum netfs_trace_folioq*/ trace) +{ + trace_netfs_folioq(folioq, trace); + netfs_stat_d(&netfs_n_folioq); + kfree(folioq); +} +EXPORT_SYMBOL(netfs_folioq_free); + +/* + * Initialise a rolling buffer. We allocate an empty folio queue struct to so + * that the pointers can be independently driven by the producer and the + * consumer. + */ +int rolling_buffer_init(struct rolling_buffer *roll, unsigned int rreq_id, + unsigned int direction) +{ + struct folio_queue *fq; + + fq = netfs_folioq_alloc(rreq_id, GFP_NOFS, netfs_trace_folioq_rollbuf_init); + if (!fq) + return -ENOMEM; + + roll->head = fq; + roll->tail = fq; + iov_iter_folio_queue(&roll->iter, direction, fq, 0, 0, 0); + return 0; +} + +/* + * Add another folio_queue to a rolling buffer if there's no space left. + */ +int rolling_buffer_make_space(struct rolling_buffer *roll) +{ + struct folio_queue *fq, *head = roll->head; + + if (!folioq_full(head)) + return 0; + + fq = netfs_folioq_alloc(head->rreq_id, GFP_NOFS, netfs_trace_folioq_make_space); + if (!fq) + return -ENOMEM; + fq->prev = head; + + roll->head = fq; + if (folioq_full(head)) { + /* Make sure we don't leave the master iterator pointing to a + * block that might get immediately consumed. + */ + if (roll->iter.folioq == head && + roll->iter.folioq_slot == folioq_nr_slots(head)) { + roll->iter.folioq = fq; + roll->iter.folioq_slot = 0; + } + } + + /* Make sure the initialisation is stored before the next pointer. + * + * [!] NOTE: After we set head->next, the consumer is at liberty to + * immediately delete the old head. + */ + smp_store_release(&head->next, fq); + return 0; +} + +/* + * Decant the list of folios to read into a rolling buffer. + */ +ssize_t rolling_buffer_load_from_ra(struct rolling_buffer *roll, + struct readahead_control *ractl, + struct folio_batch *put_batch) +{ + struct folio_queue *fq; + struct page **vec; + int nr, ix, to; + ssize_t size = 0; + + if (rolling_buffer_make_space(roll) < 0) + return -ENOMEM; + + fq = roll->head; + vec = (struct page **)fq->vec.folios; + nr = __readahead_batch(ractl, vec + folio_batch_count(&fq->vec), + folio_batch_space(&fq->vec)); + ix = fq->vec.nr; + to = ix + nr; + fq->vec.nr = to; + for (; ix < to; ix++) { + struct folio *folio = folioq_folio(fq, ix); + unsigned int order = folio_order(folio); + + fq->orders[ix] = order; + size += PAGE_SIZE << order; + trace_netfs_folio(folio, netfs_folio_trace_read); + if (!folio_batch_add(put_batch, folio)) + folio_batch_release(put_batch); + } + WRITE_ONCE(roll->iter.count, roll->iter.count + size); + + /* Store the counter after setting the slot. */ + smp_store_release(&roll->next_head_slot, to); + + for (; ix < folioq_nr_slots(fq); ix++) + folioq_clear(fq, ix); + + return size; +} + +/* + * Append a folio to the rolling buffer. + */ +ssize_t rolling_buffer_append(struct rolling_buffer *roll, struct folio *folio, + unsigned int flags) +{ + ssize_t size = folio_size(folio); + int slot; + + if (rolling_buffer_make_space(roll) < 0) + return -ENOMEM; + + slot = folioq_append(roll->head, folio); + if (flags & ROLLBUF_MARK_1) + folioq_mark(roll->head, slot); + if (flags & ROLLBUF_MARK_2) + folioq_mark2(roll->head, slot); + + WRITE_ONCE(roll->iter.count, roll->iter.count + size); + + /* Store the counter after setting the slot. */ + smp_store_release(&roll->next_head_slot, slot); + return size; +} + +/* + * Delete a spent buffer from a rolling queue and return the next in line. We + * don't return the last buffer to keep the pointers independent, but return + * NULL instead. + */ +struct folio_queue *rolling_buffer_delete_spent(struct rolling_buffer *roll) +{ + struct folio_queue *spent = roll->tail, *next = READ_ONCE(spent->next); + + if (!next) + return NULL; + next->prev = NULL; + netfs_folioq_free(spent, netfs_trace_folioq_delete); + roll->tail = next; + return next; +} + +/* + * Clear out a rolling queue. Folios that have mark 1 set are put. + */ +void rolling_buffer_clear(struct rolling_buffer *roll) +{ + struct folio_batch fbatch; + struct folio_queue *p; + + folio_batch_init(&fbatch); + + while ((p = roll->tail)) { + roll->tail = p->next; + for (int slot = 0; slot < folioq_count(p); slot++) { + struct folio *folio = folioq_folio(p, slot); + + if (!folio) + continue; + if (folioq_is_marked(p, slot)) { + trace_netfs_folio(folio, netfs_folio_trace_put); + if (!folio_batch_add(&fbatch, folio)) + folio_batch_release(&fbatch); + } + } + + netfs_folioq_free(p, netfs_trace_folioq_clear); + } + + folio_batch_release(&fbatch); +} diff --git a/fs/netfs/stats.c b/fs/netfs/stats.c index 8e63516b40f6..f1af344266cc 100644 --- a/fs/netfs/stats.c +++ b/fs/netfs/stats.c @@ -12,6 +12,7 @@ atomic_t netfs_n_rh_dio_read; atomic_t netfs_n_rh_readahead; atomic_t netfs_n_rh_read_folio; +atomic_t netfs_n_rh_read_single; atomic_t netfs_n_rh_rreq; atomic_t netfs_n_rh_sreq; atomic_t netfs_n_rh_download; @@ -46,10 +47,11 @@ atomic_t netfs_n_folioq; int netfs_stats_show(struct seq_file *m, void *v) { - seq_printf(m, "Reads : DR=%u RA=%u RF=%u WB=%u WBZ=%u\n", + seq_printf(m, "Reads : DR=%u RA=%u RF=%u RS=%u WB=%u WBZ=%u\n", atomic_read(&netfs_n_rh_dio_read), atomic_read(&netfs_n_rh_readahead), atomic_read(&netfs_n_rh_read_folio), + atomic_read(&netfs_n_rh_read_single), atomic_read(&netfs_n_rh_write_begin), atomic_read(&netfs_n_rh_write_zskip)); seq_printf(m, "Writes : BW=%u WT=%u DW=%u WP=%u 2C=%u\n", diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c index ca3a11ed9b54..294f67795f79 100644 --- a/fs/netfs/write_collect.c +++ b/fs/netfs/write_collect.c @@ -17,10 +17,38 @@ #define HIT_PENDING 0x01 /* A front op was still pending */ #define NEED_REASSESS 0x02 /* Need to loop round and reassess */ #define MADE_PROGRESS 0x04 /* Made progress cleaning up a stream or the folio set */ -#define BUFFERED 0x08 /* The pagecache needs cleaning up */ +#define NEED_UNLOCK 0x08 /* The pagecache needs unlocking */ #define NEED_RETRY 0x10 /* A front op requests retrying */ #define SAW_FAILURE 0x20 /* One stream or hit a permanent failure */ +static void netfs_dump_request(const struct netfs_io_request *rreq) +{ + pr_err("Request R=%08x r=%d fl=%lx or=%x e=%ld\n", + rreq->debug_id, refcount_read(&rreq->ref), rreq->flags, + rreq->origin, rreq->error); + pr_err(" st=%llx tsl=%zx/%llx/%llx\n", + rreq->start, rreq->transferred, rreq->submitted, rreq->len); + pr_err(" cci=%llx/%llx/%llx\n", + rreq->cleaned_to, rreq->collected_to, atomic64_read(&rreq->issued_to)); + pr_err(" iw=%pSR\n", rreq->netfs_ops->issue_write); + for (int i = 0; i < NR_IO_STREAMS; i++) { + const struct netfs_io_subrequest *sreq; + const struct netfs_io_stream *s = &rreq->io_streams[i]; + + pr_err(" str[%x] s=%x e=%d acnf=%u,%u,%u,%u\n", + s->stream_nr, s->source, s->error, + s->avail, s->active, s->need_retry, s->failed); + pr_err(" str[%x] ct=%llx t=%zx\n", + s->stream_nr, s->collected_to, s->transferred); + list_for_each_entry(sreq, &s->subrequests, rreq_link) { + pr_err(" sreq[%x:%x] sc=%u s=%llx t=%zx/%zx r=%d f=%lx\n", + sreq->stream_nr, sreq->debug_index, sreq->source, + sreq->start, sreq->transferred, sreq->len, + refcount_read(&sreq->ref), sreq->flags); + } + } +} + /* * Successful completion of write of a folio to the server and/or cache. Note * that we are not allowed to lock the folio here on pain of deadlocking with @@ -83,9 +111,15 @@ end_wb: static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq, unsigned int *notes) { - struct folio_queue *folioq = wreq->buffer; + struct folio_queue *folioq = wreq->buffer.tail; unsigned long long collected_to = wreq->collected_to; - unsigned int slot = wreq->buffer_head_slot; + unsigned int slot = wreq->buffer.first_tail_slot; + + if (WARN_ON_ONCE(!folioq)) { + pr_err("[!] Writeback unlock found empty rolling buffer!\n"); + netfs_dump_request(wreq); + return; + } if (wreq->origin == NETFS_PGPRIV2_COPY_TO_CACHE) { if (netfs_pgpriv2_unlock_copied_folios(wreq)) @@ -94,7 +128,9 @@ static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq, } if (slot >= folioq_nr_slots(folioq)) { - folioq = netfs_delete_buffer_head(wreq); + folioq = rolling_buffer_delete_spent(&wreq->buffer); + if (!folioq) + return; slot = 0; } @@ -134,9 +170,9 @@ static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq, folioq_clear(folioq, slot); slot++; if (slot >= folioq_nr_slots(folioq)) { - if (READ_ONCE(wreq->buffer_tail) == folioq) - break; - folioq = netfs_delete_buffer_head(wreq); + folioq = rolling_buffer_delete_spent(&wreq->buffer); + if (!folioq) + goto done; slot = 0; } @@ -144,222 +180,9 @@ static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq, break; } - wreq->buffer = folioq; - wreq->buffer_head_slot = slot; -} - -/* - * Perform retries on the streams that need it. - */ -static void netfs_retry_write_stream(struct netfs_io_request *wreq, - struct netfs_io_stream *stream) -{ - struct list_head *next; - - _enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr); - - if (list_empty(&stream->subrequests)) - return; - - if (stream->source == NETFS_UPLOAD_TO_SERVER && - wreq->netfs_ops->retry_request) - wreq->netfs_ops->retry_request(wreq, stream); - - if (unlikely(stream->failed)) - return; - - /* If there's no renegotiation to do, just resend each failed subreq. */ - if (!stream->prepare_write) { - struct netfs_io_subrequest *subreq; - - list_for_each_entry(subreq, &stream->subrequests, rreq_link) { - if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) - break; - if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) { - struct iov_iter source = subreq->io_iter; - - iov_iter_revert(&source, subreq->len - source.count); - netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); - netfs_reissue_write(stream, subreq, &source); - } - } - return; - } - - next = stream->subrequests.next; - - do { - struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp; - struct iov_iter source; - unsigned long long start, len; - size_t part; - bool boundary = false; - - /* Go through the stream and find the next span of contiguous - * data that we then rejig (cifs, for example, needs the wsize - * renegotiating) and reissue. - */ - from = list_entry(next, struct netfs_io_subrequest, rreq_link); - to = from; - start = from->start + from->transferred; - len = from->len - from->transferred; - - if (test_bit(NETFS_SREQ_FAILED, &from->flags) || - !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags)) - return; - - list_for_each_continue(next, &stream->subrequests) { - subreq = list_entry(next, struct netfs_io_subrequest, rreq_link); - if (subreq->start + subreq->transferred != start + len || - test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) || - !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) - break; - to = subreq; - len += to->len; - } - - /* Determine the set of buffers we're going to use. Each - * subreq gets a subset of a single overall contiguous buffer. - */ - netfs_reset_iter(from); - source = from->io_iter; - source.count = len; - - /* Work through the sublist. */ - subreq = from; - list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) { - if (!len) - break; - /* Renegotiate max_len (wsize) */ - trace_netfs_sreq(subreq, netfs_sreq_trace_retry); - __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); - subreq->retry_count++; - stream->prepare_write(subreq); - - part = min(len, stream->sreq_max_len); - subreq->len = part; - subreq->start = start; - subreq->transferred = 0; - len -= part; - start += part; - if (len && subreq == to && - __test_and_clear_bit(NETFS_SREQ_BOUNDARY, &to->flags)) - boundary = true; - - netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); - netfs_reissue_write(stream, subreq, &source); - if (subreq == to) - break; - } - - /* If we managed to use fewer subreqs, we can discard the - * excess; if we used the same number, then we're done. - */ - if (!len) { - if (subreq == to) - continue; - list_for_each_entry_safe_from(subreq, tmp, - &stream->subrequests, rreq_link) { - trace_netfs_sreq(subreq, netfs_sreq_trace_discard); - list_del(&subreq->rreq_link); - netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done); - if (subreq == to) - break; - } - continue; - } - - /* We ran out of subrequests, so we need to allocate some more - * and insert them after. - */ - do { - subreq = netfs_alloc_subrequest(wreq); - subreq->source = to->source; - subreq->start = start; - subreq->debug_index = atomic_inc_return(&wreq->subreq_counter); - subreq->stream_nr = to->stream_nr; - subreq->retry_count = 1; - - trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index, - refcount_read(&subreq->ref), - netfs_sreq_trace_new); - netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); - - list_add(&subreq->rreq_link, &to->rreq_link); - to = list_next_entry(to, rreq_link); - trace_netfs_sreq(subreq, netfs_sreq_trace_retry); - - stream->sreq_max_len = len; - stream->sreq_max_segs = INT_MAX; - switch (stream->source) { - case NETFS_UPLOAD_TO_SERVER: - netfs_stat(&netfs_n_wh_upload); - stream->sreq_max_len = umin(len, wreq->wsize); - break; - case NETFS_WRITE_TO_CACHE: - netfs_stat(&netfs_n_wh_write); - break; - default: - WARN_ON_ONCE(1); - } - - stream->prepare_write(subreq); - - part = umin(len, stream->sreq_max_len); - subreq->len = subreq->transferred + part; - len -= part; - start += part; - if (!len && boundary) { - __set_bit(NETFS_SREQ_BOUNDARY, &to->flags); - boundary = false; - } - - netfs_reissue_write(stream, subreq, &source); - if (!len) - break; - - } while (len); - - } while (!list_is_head(next, &stream->subrequests)); -} - -/* - * Perform retries on the streams that need it. If we're doing content - * encryption and the server copy changed due to a third-party write, we may - * need to do an RMW cycle and also rewrite the data to the cache. - */ -static void netfs_retry_writes(struct netfs_io_request *wreq) -{ - struct netfs_io_subrequest *subreq; - struct netfs_io_stream *stream; - int s; - - /* Wait for all outstanding I/O to quiesce before performing retries as - * we may need to renegotiate the I/O sizes. - */ - for (s = 0; s < NR_IO_STREAMS; s++) { - stream = &wreq->io_streams[s]; - if (!stream->active) - continue; - - list_for_each_entry(subreq, &stream->subrequests, rreq_link) { - wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS, - TASK_UNINTERRUPTIBLE); - } - } - - // TODO: Enc: Fetch changed partial pages - // TODO: Enc: Reencrypt content if needed. - // TODO: Enc: Wind back transferred point. - // TODO: Enc: Mark cache pages for retry. - - for (s = 0; s < NR_IO_STREAMS; s++) { - stream = &wreq->io_streams[s]; - if (stream->need_retry) { - stream->need_retry = false; - netfs_retry_write_stream(wreq, stream); - } - } + wreq->buffer.tail = folioq; +done: + wreq->buffer.first_tail_slot = slot; } /* @@ -390,7 +213,7 @@ reassess_streams: if (wreq->origin == NETFS_WRITEBACK || wreq->origin == NETFS_WRITETHROUGH || wreq->origin == NETFS_PGPRIV2_COPY_TO_CACHE) - notes = BUFFERED; + notes = NEED_UNLOCK; else notes = 0; @@ -449,14 +272,14 @@ reassess_streams: cancel: /* Remove if completely consumed. */ - spin_lock_bh(&wreq->lock); + spin_lock(&wreq->lock); remove = front; list_del_init(&front->rreq_link); front = list_first_entry_or_null(&stream->subrequests, struct netfs_io_subrequest, rreq_link); stream->front = front; - spin_unlock_bh(&wreq->lock); + spin_unlock(&wreq->lock); netfs_put_subrequest(remove, false, notes & SAW_FAILURE ? netfs_sreq_trace_put_cancel : @@ -487,7 +310,7 @@ reassess_streams: trace_netfs_collect_state(wreq, wreq->collected_to, notes); /* Unlock any folios that we have now finished with. */ - if (notes & BUFFERED) { + if (notes & NEED_UNLOCK) { if (wreq->cleaned_to < wreq->collected_to) netfs_writeback_unlock_folios(wreq, ¬es); } else { @@ -500,7 +323,9 @@ reassess_streams: goto need_retry; if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) { trace_netfs_rreq(wreq, netfs_rreq_trace_unpause); - clear_and_wake_up_bit(NETFS_RREQ_PAUSE, &wreq->flags); + clear_bit_unlock(NETFS_RREQ_PAUSE, &wreq->flags); + smp_mb__after_atomic(); /* Set PAUSE before task state */ + wake_up(&wreq->waitq); } if (notes & NEED_REASSESS) { diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c index ff0e82505a0b..69727411683e 100644 --- a/fs/netfs/write_issue.c +++ b/fs/netfs/write_issue.c @@ -94,9 +94,10 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping, { struct netfs_io_request *wreq; struct netfs_inode *ictx; - bool is_buffered = (origin == NETFS_WRITEBACK || - origin == NETFS_WRITETHROUGH || - origin == NETFS_PGPRIV2_COPY_TO_CACHE); + bool is_cacheable = (origin == NETFS_WRITEBACK || + origin == NETFS_WRITEBACK_SINGLE || + origin == NETFS_WRITETHROUGH || + origin == NETFS_PGPRIV2_COPY_TO_CACHE); wreq = netfs_alloc_request(mapping, file, start, 0, origin); if (IS_ERR(wreq)) @@ -105,8 +106,10 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping, _enter("R=%x", wreq->debug_id); ictx = netfs_inode(wreq->inode); - if (is_buffered && netfs_is_cache_enabled(ictx)) + if (is_cacheable && netfs_is_cache_enabled(ictx)) fscache_begin_write_operation(&wreq->cache_resources, netfs_i_cookie(ictx)); + if (rolling_buffer_init(&wreq->buffer, wreq->debug_id, ITER_SOURCE) < 0) + goto nomem; wreq->cleaned_to = wreq->start; @@ -129,6 +132,10 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping, } return wreq; +nomem: + wreq->error = -ENOMEM; + netfs_put_request(wreq, false, netfs_rreq_trace_put_failed); + return ERR_PTR(-ENOMEM); } /** @@ -153,16 +160,15 @@ static void netfs_prepare_write(struct netfs_io_request *wreq, loff_t start) { struct netfs_io_subrequest *subreq; - struct iov_iter *wreq_iter = &wreq->io_iter; + struct iov_iter *wreq_iter = &wreq->buffer.iter; /* Make sure we don't point the iterator at a used-up folio_queue * struct being used as a placeholder to prevent the queue from * collapsing. In such a case, extend the queue. */ if (iov_iter_is_folioq(wreq_iter) && - wreq_iter->folioq_slot >= folioq_nr_slots(wreq_iter->folioq)) { - netfs_buffer_make_space(wreq); - } + wreq_iter->folioq_slot >= folioq_nr_slots(wreq_iter->folioq)) + rolling_buffer_make_space(&wreq->buffer); subreq = netfs_alloc_subrequest(wreq); subreq->source = stream->source; @@ -198,7 +204,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq, * the list. The collector only goes nextwards and uses the lock to * remove entries off of the front. */ - spin_lock_bh(&wreq->lock); + spin_lock(&wreq->lock); list_add_tail(&subreq->rreq_link, &stream->subrequests); if (list_is_first(&subreq->rreq_link, &stream->subrequests)) { stream->front = subreq; @@ -209,7 +215,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq, } } - spin_unlock_bh(&wreq->lock); + spin_unlock(&wreq->lock); stream->construct = subreq; } @@ -268,9 +274,9 @@ void netfs_issue_write(struct netfs_io_request *wreq, * we can avoid overrunning the credits obtained (cifs) and try to parallelise * content-crypto preparation with network writes. */ -int netfs_advance_write(struct netfs_io_request *wreq, - struct netfs_io_stream *stream, - loff_t start, size_t len, bool to_eof) +size_t netfs_advance_write(struct netfs_io_request *wreq, + struct netfs_io_stream *stream, + loff_t start, size_t len, bool to_eof) { struct netfs_io_subrequest *subreq = stream->construct; size_t part; @@ -327,6 +333,9 @@ static int netfs_write_folio(struct netfs_io_request *wreq, _enter(""); + if (rolling_buffer_make_space(&wreq->buffer) < 0) + return -ENOMEM; + /* netfs_perform_write() may shift i_size around the page or from out * of the page to beyond it, but cannot move i_size into or through the * page since we have it locked. @@ -431,7 +440,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq, } /* Attach the folio to the rolling buffer. */ - netfs_buffer_append_folio(wreq, folio, false); + rolling_buffer_append(&wreq->buffer, folio, 0); /* Move the submission point forward to allow for write-streaming data * not starting at the front of the page. We don't do write-streaming @@ -444,7 +453,8 @@ static int netfs_write_folio(struct netfs_io_request *wreq, stream = &wreq->io_streams[s]; stream->submit_off = foff; stream->submit_len = flen; - if ((stream->source == NETFS_WRITE_TO_CACHE && streamw) || + if (!stream->avail || + (stream->source == NETFS_WRITE_TO_CACHE && streamw) || (stream->source == NETFS_UPLOAD_TO_SERVER && fgroup == NETFS_FOLIO_COPY_TO_CACHE)) { stream->submit_off = UINT_MAX; @@ -478,7 +488,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq, /* Advance the iterator(s). */ if (stream->submit_off > iter_off) { - iov_iter_advance(&wreq->io_iter, stream->submit_off - iter_off); + rolling_buffer_advance(&wreq->buffer, stream->submit_off - iter_off); iter_off = stream->submit_off; } @@ -496,7 +506,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq, } if (fsize > iter_off) - iov_iter_advance(&wreq->io_iter, fsize - iter_off); + rolling_buffer_advance(&wreq->buffer, fsize - iter_off); atomic64_set(&wreq->issued_to, fpos + fsize); if (!debug) @@ -635,7 +645,7 @@ int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_c struct folio **writethrough_cache) { _enter("R=%x ic=%zu ws=%u cp=%zu tp=%u", - wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end); + wreq->debug_id, wreq->buffer.iter.count, wreq->wsize, copied, to_page_end); if (!*writethrough_cache) { if (folio_test_dirty(folio)) @@ -710,10 +720,10 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t part = netfs_advance_write(wreq, upload, start, len, false); start += part; len -= part; - iov_iter_advance(&wreq->io_iter, part); + rolling_buffer_advance(&wreq->buffer, part); if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) { trace_netfs_rreq(wreq, netfs_rreq_trace_wait_pause); - wait_on_bit(&wreq->flags, NETFS_RREQ_PAUSE, TASK_UNINTERRUPTIBLE); + wait_event(wreq->waitq, !test_bit(NETFS_RREQ_PAUSE, &wreq->flags)); } if (test_bit(NETFS_RREQ_FAILED, &wreq->flags)) break; @@ -723,3 +733,194 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t _leave(" = %d", error); return error; } + +/* + * Write some of a pending folio data back to the server and/or the cache. + */ +static int netfs_write_folio_single(struct netfs_io_request *wreq, + struct folio *folio) +{ + struct netfs_io_stream *upload = &wreq->io_streams[0]; + struct netfs_io_stream *cache = &wreq->io_streams[1]; + struct netfs_io_stream *stream; + size_t iter_off = 0; + size_t fsize = folio_size(folio), flen; + loff_t fpos = folio_pos(folio); + bool to_eof = false; + bool no_debug = false; + + _enter(""); + + flen = folio_size(folio); + if (flen > wreq->i_size - fpos) { + flen = wreq->i_size - fpos; + folio_zero_segment(folio, flen, fsize); + to_eof = true; + } else if (flen == wreq->i_size - fpos) { + to_eof = true; + } + + _debug("folio %zx/%zx", flen, fsize); + + if (!upload->avail && !cache->avail) { + trace_netfs_folio(folio, netfs_folio_trace_cancel_store); + return 0; + } + + if (!upload->construct) + trace_netfs_folio(folio, netfs_folio_trace_store); + else + trace_netfs_folio(folio, netfs_folio_trace_store_plus); + + /* Attach the folio to the rolling buffer. */ + folio_get(folio); + rolling_buffer_append(&wreq->buffer, folio, NETFS_ROLLBUF_PUT_MARK); + + /* Move the submission point forward to allow for write-streaming data + * not starting at the front of the page. We don't do write-streaming + * with the cache as the cache requires DIO alignment. + * + * Also skip uploading for data that's been read and just needs copying + * to the cache. + */ + for (int s = 0; s < NR_IO_STREAMS; s++) { + stream = &wreq->io_streams[s]; + stream->submit_off = 0; + stream->submit_len = flen; + if (!stream->avail) { + stream->submit_off = UINT_MAX; + stream->submit_len = 0; + } + } + + /* Attach the folio to one or more subrequests. For a big folio, we + * could end up with thousands of subrequests if the wsize is small - + * but we might need to wait during the creation of subrequests for + * network resources (eg. SMB credits). + */ + for (;;) { + ssize_t part; + size_t lowest_off = ULONG_MAX; + int choose_s = -1; + + /* Always add to the lowest-submitted stream first. */ + for (int s = 0; s < NR_IO_STREAMS; s++) { + stream = &wreq->io_streams[s]; + if (stream->submit_len > 0 && + stream->submit_off < lowest_off) { + lowest_off = stream->submit_off; + choose_s = s; + } + } + + if (choose_s < 0) + break; + stream = &wreq->io_streams[choose_s]; + + /* Advance the iterator(s). */ + if (stream->submit_off > iter_off) { + rolling_buffer_advance(&wreq->buffer, stream->submit_off - iter_off); + iter_off = stream->submit_off; + } + + atomic64_set(&wreq->issued_to, fpos + stream->submit_off); + stream->submit_extendable_to = fsize - stream->submit_off; + part = netfs_advance_write(wreq, stream, fpos + stream->submit_off, + stream->submit_len, to_eof); + stream->submit_off += part; + if (part > stream->submit_len) + stream->submit_len = 0; + else + stream->submit_len -= part; + if (part > 0) + no_debug = true; + } + + wreq->buffer.iter.iov_offset = 0; + if (fsize > iter_off) + rolling_buffer_advance(&wreq->buffer, fsize - iter_off); + atomic64_set(&wreq->issued_to, fpos + fsize); + + if (!no_debug) + kdebug("R=%x: No submit", wreq->debug_id); + _leave(" = 0"); + return 0; +} + +/** + * netfs_writeback_single - Write back a monolithic payload + * @mapping: The mapping to write from + * @wbc: Hints from the VM + * @iter: Data to write, must be ITER_FOLIOQ. + * + * Write a monolithic, non-pagecache object back to the server and/or + * the cache. + */ +int netfs_writeback_single(struct address_space *mapping, + struct writeback_control *wbc, + struct iov_iter *iter) +{ + struct netfs_io_request *wreq; + struct netfs_inode *ictx = netfs_inode(mapping->host); + struct folio_queue *fq; + size_t size = iov_iter_count(iter); + int ret; + + if (WARN_ON_ONCE(!iov_iter_is_folioq(iter))) + return -EIO; + + if (!mutex_trylock(&ictx->wb_lock)) { + if (wbc->sync_mode == WB_SYNC_NONE) { + netfs_stat(&netfs_n_wb_lock_skip); + return 0; + } + netfs_stat(&netfs_n_wb_lock_wait); + mutex_lock(&ictx->wb_lock); + } + + wreq = netfs_create_write_req(mapping, NULL, 0, NETFS_WRITEBACK_SINGLE); + if (IS_ERR(wreq)) { + ret = PTR_ERR(wreq); + goto couldnt_start; + } + + trace_netfs_write(wreq, netfs_write_trace_writeback); + netfs_stat(&netfs_n_wh_writepages); + + if (__test_and_set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags)) + wreq->netfs_ops->begin_writeback(wreq); + + for (fq = (struct folio_queue *)iter->folioq; fq; fq = fq->next) { + for (int slot = 0; slot < folioq_count(fq); slot++) { + struct folio *folio = folioq_folio(fq, slot); + size_t part = umin(folioq_folio_size(fq, slot), size); + + _debug("wbiter %lx %llx", folio->index, atomic64_read(&wreq->issued_to)); + + ret = netfs_write_folio_single(wreq, folio); + if (ret < 0) + goto stop; + size -= part; + if (size <= 0) + goto stop; + } + } + +stop: + for (int s = 0; s < NR_IO_STREAMS; s++) + netfs_issue_write(wreq, &wreq->io_streams[s]); + smp_wmb(); /* Write lists before ALL_QUEUED. */ + set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); + + mutex_unlock(&ictx->wb_lock); + + netfs_put_request(wreq, false, netfs_rreq_trace_put_return); + _leave(" = %d", ret); + return ret; + +couldnt_start: + mutex_unlock(&ictx->wb_lock); + _leave(" = %d", ret); + return ret; +} +EXPORT_SYMBOL(netfs_writeback_single); diff --git a/fs/netfs/write_retry.c b/fs/netfs/write_retry.c new file mode 100644 index 000000000000..c841a851dd73 --- /dev/null +++ b/fs/netfs/write_retry.c @@ -0,0 +1,232 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Network filesystem write retrying. + * + * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/pagemap.h> +#include <linux/slab.h> +#include "internal.h" + +/* + * Perform retries on the streams that need it. + */ +static void netfs_retry_write_stream(struct netfs_io_request *wreq, + struct netfs_io_stream *stream) +{ + struct list_head *next; + + _enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr); + + if (list_empty(&stream->subrequests)) + return; + + if (stream->source == NETFS_UPLOAD_TO_SERVER && + wreq->netfs_ops->retry_request) + wreq->netfs_ops->retry_request(wreq, stream); + + if (unlikely(stream->failed)) + return; + + /* If there's no renegotiation to do, just resend each failed subreq. */ + if (!stream->prepare_write) { + struct netfs_io_subrequest *subreq; + + list_for_each_entry(subreq, &stream->subrequests, rreq_link) { + if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) + break; + if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) { + struct iov_iter source = subreq->io_iter; + + iov_iter_revert(&source, subreq->len - source.count); + netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); + netfs_reissue_write(stream, subreq, &source); + } + } + return; + } + + next = stream->subrequests.next; + + do { + struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp; + struct iov_iter source; + unsigned long long start, len; + size_t part; + bool boundary = false; + + /* Go through the stream and find the next span of contiguous + * data that we then rejig (cifs, for example, needs the wsize + * renegotiating) and reissue. + */ + from = list_entry(next, struct netfs_io_subrequest, rreq_link); + to = from; + start = from->start + from->transferred; + len = from->len - from->transferred; + + if (test_bit(NETFS_SREQ_FAILED, &from->flags) || + !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags)) + return; + + list_for_each_continue(next, &stream->subrequests) { + subreq = list_entry(next, struct netfs_io_subrequest, rreq_link); + if (subreq->start + subreq->transferred != start + len || + test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) || + !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) + break; + to = subreq; + len += to->len; + } + + /* Determine the set of buffers we're going to use. Each + * subreq gets a subset of a single overall contiguous buffer. + */ + netfs_reset_iter(from); + source = from->io_iter; + source.count = len; + + /* Work through the sublist. */ + subreq = from; + list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) { + if (!len) + break; + + subreq->start = start; + subreq->len = len; + __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); + subreq->retry_count++; + trace_netfs_sreq(subreq, netfs_sreq_trace_retry); + + /* Renegotiate max_len (wsize) */ + stream->sreq_max_len = len; + stream->prepare_write(subreq); + + part = umin(len, stream->sreq_max_len); + if (unlikely(stream->sreq_max_segs)) + part = netfs_limit_iter(&source, 0, part, stream->sreq_max_segs); + subreq->len = part; + subreq->transferred = 0; + len -= part; + start += part; + if (len && subreq == to && + __test_and_clear_bit(NETFS_SREQ_BOUNDARY, &to->flags)) + boundary = true; + + netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); + netfs_reissue_write(stream, subreq, &source); + if (subreq == to) + break; + } + + /* If we managed to use fewer subreqs, we can discard the + * excess; if we used the same number, then we're done. + */ + if (!len) { + if (subreq == to) + continue; + list_for_each_entry_safe_from(subreq, tmp, + &stream->subrequests, rreq_link) { + trace_netfs_sreq(subreq, netfs_sreq_trace_discard); + list_del(&subreq->rreq_link); + netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done); + if (subreq == to) + break; + } + continue; + } + + /* We ran out of subrequests, so we need to allocate some more + * and insert them after. + */ + do { + subreq = netfs_alloc_subrequest(wreq); + subreq->source = to->source; + subreq->start = start; + subreq->debug_index = atomic_inc_return(&wreq->subreq_counter); + subreq->stream_nr = to->stream_nr; + subreq->retry_count = 1; + + trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index, + refcount_read(&subreq->ref), + netfs_sreq_trace_new); + netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); + + list_add(&subreq->rreq_link, &to->rreq_link); + to = list_next_entry(to, rreq_link); + trace_netfs_sreq(subreq, netfs_sreq_trace_retry); + + stream->sreq_max_len = len; + stream->sreq_max_segs = INT_MAX; + switch (stream->source) { + case NETFS_UPLOAD_TO_SERVER: + netfs_stat(&netfs_n_wh_upload); + stream->sreq_max_len = umin(len, wreq->wsize); + break; + case NETFS_WRITE_TO_CACHE: + netfs_stat(&netfs_n_wh_write); + break; + default: + WARN_ON_ONCE(1); + } + + stream->prepare_write(subreq); + + part = umin(len, stream->sreq_max_len); + subreq->len = subreq->transferred + part; + len -= part; + start += part; + if (!len && boundary) { + __set_bit(NETFS_SREQ_BOUNDARY, &to->flags); + boundary = false; + } + + netfs_reissue_write(stream, subreq, &source); + if (!len) + break; + + } while (len); + + } while (!list_is_head(next, &stream->subrequests)); +} + +/* + * Perform retries on the streams that need it. If we're doing content + * encryption and the server copy changed due to a third-party write, we may + * need to do an RMW cycle and also rewrite the data to the cache. + */ +void netfs_retry_writes(struct netfs_io_request *wreq) +{ + struct netfs_io_subrequest *subreq; + struct netfs_io_stream *stream; + int s; + + /* Wait for all outstanding I/O to quiesce before performing retries as + * we may need to renegotiate the I/O sizes. + */ + for (s = 0; s < NR_IO_STREAMS; s++) { + stream = &wreq->io_streams[s]; + if (!stream->active) + continue; + + list_for_each_entry(subreq, &stream->subrequests, rreq_link) { + wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS, + TASK_UNINTERRUPTIBLE); + } + } + + // TODO: Enc: Fetch changed partial pages + // TODO: Enc: Reencrypt content if needed. + // TODO: Enc: Wind back transferred point. + // TODO: Enc: Mark cache pages for retry. + + for (s = 0; s < NR_IO_STREAMS; s++) { + stream = &wreq->io_streams[s]; + if (stream->need_retry) { + stream->need_retry = false; + netfs_retry_write_stream(wreq, stream); + } + } +} diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index 0eb20012792f..d3f76101ad4b 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -170,7 +170,8 @@ config ROOT_NFS config NFS_FSCACHE bool "Provide NFS client caching support" - depends on NFS_FS=m && NETFS_SUPPORT || NFS_FS=y && NETFS_SUPPORT=y + depends on NFS_FS + select NETFS_SUPPORT select FSCACHE help Say Y here if you want NFS data to be cached locally on disc through diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 6cf92498a5ac..86bdc7d23fb9 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -211,10 +211,6 @@ static struct svc_serv *nfs_callback_create_svc(int minorversion) return ERR_PTR(-ENOMEM); } cb_info->serv = serv; - /* As there is only one thread we need to over-ride the - * default maximum of 80 connections - */ - serv->sv_maxconn = 1024; dprintk("nfs_callback_create_svc: service created\n"); return serv; } diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 7832fb0369a1..8397c43358bd 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -718,7 +718,7 @@ __be32 nfs4_callback_offload(void *data, void *dummy, copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL); if (!copy) - return htonl(NFS4ERR_SERVERFAULT); + return cpu_to_be32(NFS4ERR_DELAY); spin_lock(&cps->clp->cl_lock); rcu_read_lock(); diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index fdeb0b34a3d3..4254ba3ee7c5 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -984,6 +984,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp) nfs_put_client(cps.clp); goto out_invalidcred; } + svc_xprt_set_valid(rqstp->rq_xprt); } cps.minorversion = hdr_arg.minorversion; diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 550ca934c9cf..3b0918ade53c 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -38,7 +38,7 @@ #include <linux/sunrpc/bc_xprt.h> #include <linux/nsproxy.h> #include <linux/pid_namespace.h> - +#include <linux/nfslocalio.h> #include "nfs4_fs.h" #include "callback.h" @@ -186,7 +186,7 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init) seqlock_init(&clp->cl_boot_lock); ktime_get_real_ts64(&clp->cl_nfssvc_boot); nfs_uuid_init(&clp->cl_uuid); - spin_lock_init(&clp->cl_localio_lock); + INIT_WORK(&clp->cl_local_probe_work, nfs_local_probe_async_work); #endif /* CONFIG_NFS_LOCALIO */ clp->cl_principal = "*"; @@ -244,7 +244,7 @@ static void pnfs_init_server(struct nfs_server *server) */ void nfs_free_client(struct nfs_client *clp) { - nfs_local_disable(clp); + nfs_localio_disable_client(clp); /* -EIO all pending I/O */ if (!IS_ERR(clp->cl_rpcclient)) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 492cffd9d3d8..2b04038b0e40 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1672,7 +1672,7 @@ nfs_lookup_revalidate_delegated(struct inode *dir, struct dentry *dentry, return nfs_lookup_revalidate_done(dir, dentry, inode, 1); } -static int nfs_lookup_revalidate_dentry(struct inode *dir, +static int nfs_lookup_revalidate_dentry(struct inode *dir, const struct qstr *name, struct dentry *dentry, struct inode *inode, unsigned int flags) { @@ -1690,7 +1690,7 @@ static int nfs_lookup_revalidate_dentry(struct inode *dir, goto out; dir_verifier = nfs_save_change_attribute(dir); - ret = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr); + ret = NFS_PROTO(dir)->lookup(dir, dentry, name, fhandle, fattr); if (ret < 0) goto out; @@ -1732,8 +1732,8 @@ out: * cached dentry and do a new lookup. */ static int -nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry, - unsigned int flags) +nfs_do_lookup_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { struct inode *inode; int error = 0; @@ -1775,7 +1775,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry, if (NFS_STALE(inode)) goto out_bad; - return nfs_lookup_revalidate_dentry(dir, dentry, inode, flags); + return nfs_lookup_revalidate_dentry(dir, name, dentry, inode, flags); out_valid: return nfs_lookup_revalidate_done(dir, dentry, inode, 1); out_bad: @@ -1785,38 +1785,26 @@ out_bad: } static int -__nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags, - int (*reval)(struct inode *, struct dentry *, unsigned int)) +__nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags) { - struct dentry *parent; - struct inode *dir; - int ret; - if (flags & LOOKUP_RCU) { if (dentry->d_fsdata == NFS_FSDATA_BLOCKED) return -ECHILD; - parent = READ_ONCE(dentry->d_parent); - dir = d_inode_rcu(parent); - if (!dir) - return -ECHILD; - ret = reval(dir, dentry, flags); - if (parent != READ_ONCE(dentry->d_parent)) - return -ECHILD; } else { /* Wait for unlink to complete - see unblock_revalidate() */ wait_var_event(&dentry->d_fsdata, smp_load_acquire(&dentry->d_fsdata) != NFS_FSDATA_BLOCKED); - parent = dget_parent(dentry); - ret = reval(d_inode(parent), dentry, flags); - dput(parent); } - return ret; + return 0; } -static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags) +static int nfs_lookup_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { - return __nfs_lookup_revalidate(dentry, flags, nfs_do_lookup_revalidate); + if (__nfs_lookup_revalidate(dentry, flags)) + return -ECHILD; + return nfs_do_lookup_revalidate(dir, name, dentry, flags); } static void block_revalidate(struct dentry *dentry) @@ -1982,7 +1970,8 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in dir_verifier = nfs_save_change_attribute(dir); trace_nfs_lookup_enter(dir, dentry, flags); - error = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr); + error = NFS_PROTO(dir)->lookup(dir, dentry, &dentry->d_name, + fhandle, fattr); if (error == -ENOENT) { if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE)) dir_verifier = inode_peek_iversion_raw(dir); @@ -2025,7 +2014,8 @@ void nfs_d_prune_case_insensitive_aliases(struct inode *inode) EXPORT_SYMBOL_GPL(nfs_d_prune_case_insensitive_aliases); #if IS_ENABLED(CONFIG_NFS_V4) -static int nfs4_lookup_revalidate(struct dentry *, unsigned int); +static int nfs4_lookup_revalidate(struct inode *, const struct qstr *, + struct dentry *, unsigned int); const struct dentry_operations nfs4_dentry_operations = { .d_revalidate = nfs4_lookup_revalidate, @@ -2214,11 +2204,14 @@ no_open: EXPORT_SYMBOL_GPL(nfs_atomic_open); static int -nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry, - unsigned int flags) +nfs4_lookup_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { struct inode *inode; + if (__nfs_lookup_revalidate(dentry, flags)) + return -ECHILD; + trace_nfs_lookup_revalidate_enter(dir, dentry, flags); if (!(flags & LOOKUP_OPEN) || (flags & LOOKUP_DIRECTORY)) @@ -2254,16 +2247,10 @@ nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry, reval_dentry: if (flags & LOOKUP_RCU) return -ECHILD; - return nfs_lookup_revalidate_dentry(dir, dentry, inode, flags); + return nfs_lookup_revalidate_dentry(dir, name, dentry, inode, flags); full_reval: - return nfs_do_lookup_revalidate(dir, dentry, flags); -} - -static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags) -{ - return __nfs_lookup_revalidate(dentry, flags, - nfs4_do_lookup_revalidate); + return nfs_do_lookup_revalidate(dir, name, dentry, flags); } #endif /* CONFIG_NFSV4 */ @@ -2319,7 +2306,8 @@ nfs_add_or_obtain(struct dentry *dentry, struct nfs_fh *fhandle, d_drop(dentry); if (fhandle->size == 0) { - error = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr); + error = NFS_PROTO(dir)->lookup(dir, dentry, &dentry->d_name, + fhandle, fattr); if (error) goto out_error; } diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index b08dbe96bc57..f45beea92d03 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -303,6 +303,7 @@ static void nfs_read_sync_pgio_error(struct list_head *head, int error) static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr) { get_dreq(hdr->dreq); + set_bit(NFS_IOHDR_ODIRECT, &hdr->flags); } static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = { diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index f78115c6c2c1..98b45b636be3 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -164,18 +164,17 @@ decode_name(struct xdr_stream *xdr, u32 *id) } static struct nfsd_file * -ff_local_open_fh(struct nfs_client *clp, const struct cred *cred, +ff_local_open_fh(struct pnfs_layout_segment *lseg, u32 ds_idx, + struct nfs_client *clp, const struct cred *cred, struct nfs_fh *fh, fmode_t mode) { - if (mode & FMODE_WRITE) { - /* - * Always request read and write access since this corresponds - * to a rw layout. - */ - mode |= FMODE_READ; - } +#if IS_ENABLED(CONFIG_NFS_LOCALIO) + struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx); - return nfs_local_open_fh(clp, cred, fh, mode); + return nfs_local_open_fh(clp, cred, fh, &mirror->nfl, mode); +#else + return NULL; +#endif } static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1, @@ -247,6 +246,7 @@ static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags) spin_lock_init(&mirror->lock); refcount_set(&mirror->ref, 1); INIT_LIST_HEAD(&mirror->mirrors); + nfs_localio_file_init(&mirror->nfl); } return mirror; } @@ -257,6 +257,7 @@ static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror) ff_layout_remove_mirror(mirror); kfree(mirror->fh_versions); + nfs_close_local_fh(&mirror->nfl); cred = rcu_access_pointer(mirror->ro_cred); put_cred(cred); cred = rcu_access_pointer(mirror->rw_cred); @@ -847,6 +848,9 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs4_pnfs_ds *ds; u32 ds_idx; + if (NFS_SERVER(pgio->pg_inode)->flags & + (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR)) + pgio->pg_maxretrans = io_maxretrans; retry: pnfs_generic_pg_check_layout(pgio, req); /* Use full layout for now */ @@ -860,6 +864,8 @@ retry: if (!pgio->pg_lseg) goto out_nolseg; } + /* Reset wb_nio, since getting layout segment was successful */ + req->wb_nio = 0; ds = ff_layout_get_ds_for_read(pgio, &ds_idx); if (!ds) { @@ -876,14 +882,24 @@ retry: pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; pgio->pg_mirror_idx = ds_idx; - - if (NFS_SERVER(pgio->pg_inode)->flags & - (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR)) - pgio->pg_maxretrans = io_maxretrans; return; out_nolseg: - if (pgio->pg_error < 0) - return; + if (pgio->pg_error < 0) { + if (pgio->pg_error != -EAGAIN) + return; + /* Retry getting layout segment if lower layer returned -EAGAIN */ + if (pgio->pg_maxretrans && req->wb_nio++ > pgio->pg_maxretrans) { + if (NFS_SERVER(pgio->pg_inode)->flags & NFS_MOUNT_SOFTERR) + pgio->pg_error = -ETIMEDOUT; + else + pgio->pg_error = -EIO; + return; + } + pgio->pg_error = 0; + /* Sleep for 1 second before retrying */ + ssleep(1); + goto retry; + } out_mds: trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode, 0, NFS4_MAX_UINT64, IOMODE_READ, @@ -1820,7 +1836,7 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr) hdr->mds_offset = offset; /* Start IO accounting for local read */ - localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh, FMODE_READ); + localio = ff_local_open_fh(lseg, idx, ds->ds_clp, ds_cred, fh, FMODE_READ); if (localio) { hdr->task.tk_start = ktime_get(); ff_layout_read_record_layoutstats_start(&hdr->task, hdr); @@ -1896,7 +1912,7 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync) hdr->args.offset = offset; /* Start IO accounting for local write */ - localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh, + localio = ff_local_open_fh(lseg, idx, ds->ds_clp, ds_cred, fh, FMODE_READ|FMODE_WRITE); if (localio) { hdr->task.tk_start = ktime_get(); @@ -1981,7 +1997,7 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how) data->args.fh = fh; /* Start IO accounting for local commit */ - localio = ff_local_open_fh(ds->ds_clp, ds_cred, fh, + localio = ff_local_open_fh(lseg, idx, ds->ds_clp, ds_cred, fh, FMODE_READ|FMODE_WRITE); if (localio) { data->task.tk_start = ktime_get(); diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h index f84b3fb0dddd..095df09017a5 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.h +++ b/fs/nfs/flexfilelayout/flexfilelayout.h @@ -83,6 +83,7 @@ struct nfs4_ff_layout_mirror { nfs4_stateid stateid; const struct cred __rcu *ro_cred; const struct cred __rcu *rw_cred; + struct nfs_file_localio nfl; refcount_t ref; spinlock_t lock; unsigned long flags; diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c index d49e4ce27999..e278a1ad1ca3 100644 --- a/fs/nfs/fscache.c +++ b/fs/nfs/fscache.c @@ -314,8 +314,10 @@ static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq) &nfs_async_read_completion_ops); netfs = nfs_netfs_alloc(sreq); - if (!netfs) - return netfs_read_subreq_terminated(sreq, -ENOMEM, false); + if (!netfs) { + sreq->error = -ENOMEM; + return netfs_read_subreq_terminated(sreq); + } pgio.pg_netfs = netfs; /* used in completion */ diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h index 772d485e96d3..9d86868f4998 100644 --- a/fs/nfs/fscache.h +++ b/fs/nfs/fscache.h @@ -74,7 +74,8 @@ static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs) */ netfs->sreq->transferred = min_t(s64, netfs->sreq->len, atomic64_read(&netfs->transferred)); - netfs_read_subreq_terminated(netfs->sreq, netfs->error, false); + netfs->sreq->error = netfs->error; + netfs_read_subreq_terminated(netfs->sreq); kfree(netfs); } static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi) diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 596f35170137..1aa67fca69b2 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1137,6 +1137,8 @@ struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, ctx->lock_context.open_context = ctx; INIT_LIST_HEAD(&ctx->list); ctx->mdsthreshold = NULL; + nfs_localio_file_init(&ctx->nfl); + return ctx; } EXPORT_SYMBOL_GPL(alloc_nfs_open_context); @@ -1168,6 +1170,7 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync) nfs_sb_deactive(sb); put_rpccred(rcu_dereference_protected(ctx->ll_cred, 1)); kfree(ctx->mdsthreshold); + nfs_close_local_fh(&ctx->nfl); kfree_rcu(ctx, rcu_head); } diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index e564bd11ba60..fae2c7ae4acc 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -455,11 +455,13 @@ extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode); #if IS_ENABLED(CONFIG_NFS_LOCALIO) /* localio.c */ -extern void nfs_local_disable(struct nfs_client *); extern void nfs_local_probe(struct nfs_client *); +extern void nfs_local_probe_async(struct nfs_client *); +extern void nfs_local_probe_async_work(struct work_struct *); extern struct nfsd_file *nfs_local_open_fh(struct nfs_client *, const struct cred *, struct nfs_fh *, + struct nfs_file_localio *, const fmode_t); extern int nfs_local_doio(struct nfs_client *, struct nfsd_file *, @@ -471,11 +473,12 @@ extern int nfs_local_commit(struct nfsd_file *, extern bool nfs_server_is_local(const struct nfs_client *clp); #else /* CONFIG_NFS_LOCALIO */ -static inline void nfs_local_disable(struct nfs_client *clp) {} static inline void nfs_local_probe(struct nfs_client *clp) {} +static inline void nfs_local_probe_async(struct nfs_client *clp) {} static inline struct nfsd_file * nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred, - struct nfs_fh *fh, const fmode_t mode) + struct nfs_fh *fh, struct nfs_file_localio *nfl, + const fmode_t mode) { return NULL; } diff --git a/fs/nfs/localio.c b/fs/nfs/localio.c index 4b8618cf114c..5c21caeae075 100644 --- a/fs/nfs/localio.c +++ b/fs/nfs/localio.c @@ -35,6 +35,7 @@ struct nfs_local_kiocb { struct bio_vec *bvec; struct nfs_pgio_header *hdr; struct work_struct work; + void (*aio_complete_work)(struct work_struct *); struct nfsd_file *localio; }; @@ -48,9 +49,14 @@ struct nfs_local_fsync_ctx { static bool localio_enabled __read_mostly = true; module_param(localio_enabled, bool, 0644); +static bool localio_O_DIRECT_semantics __read_mostly = false; +module_param(localio_O_DIRECT_semantics, bool, 0644); +MODULE_PARM_DESC(localio_O_DIRECT_semantics, + "LOCALIO will use O_DIRECT semantics to filesystem."); + static inline bool nfs_client_is_local(const struct nfs_client *clp) { - return !!test_bit(NFS_CS_LOCAL_IO, &clp->cl_flags); + return !!rcu_access_pointer(clp->cl_uuid.net); } bool nfs_server_is_local(const struct nfs_client *clp) @@ -116,30 +122,6 @@ const struct rpc_program nfslocalio_program = { }; /* - * nfs_local_enable - enable local i/o for an nfs_client - */ -static void nfs_local_enable(struct nfs_client *clp) -{ - spin_lock(&clp->cl_localio_lock); - set_bit(NFS_CS_LOCAL_IO, &clp->cl_flags); - trace_nfs_local_enable(clp); - spin_unlock(&clp->cl_localio_lock); -} - -/* - * nfs_local_disable - disable local i/o for an nfs_client - */ -void nfs_local_disable(struct nfs_client *clp) -{ - spin_lock(&clp->cl_localio_lock); - if (test_and_clear_bit(NFS_CS_LOCAL_IO, &clp->cl_flags)) { - trace_nfs_local_disable(clp); - nfs_uuid_invalidate_one_client(&clp->cl_uuid); - } - spin_unlock(&clp->cl_localio_lock); -} - -/* * nfs_init_localioclient - Initialise an NFS localio client connection */ static struct rpc_clnt *nfs_init_localioclient(struct nfs_client *clp) @@ -178,7 +160,7 @@ static bool nfs_server_uuid_is_local(struct nfs_client *clp) rpc_shutdown_client(rpcclient_localio); /* Server is only local if it initialized required struct members */ - if (status || !clp->cl_uuid.net || !clp->cl_uuid.dom) + if (status || !rcu_access_pointer(clp->cl_uuid.net) || !clp->cl_uuid.dom) return false; return true; @@ -194,44 +176,64 @@ void nfs_local_probe(struct nfs_client *clp) /* Disallow localio if disabled via sysfs or AUTH_SYS isn't used */ if (!localio_enabled || clp->cl_rpcclient->cl_auth->au_flavor != RPC_AUTH_UNIX) { - nfs_local_disable(clp); + nfs_localio_disable_client(clp); return; } if (nfs_client_is_local(clp)) { /* If already enabled, disable and re-enable */ - nfs_local_disable(clp); + nfs_localio_disable_client(clp); } if (!nfs_uuid_begin(&clp->cl_uuid)) return; if (nfs_server_uuid_is_local(clp)) - nfs_local_enable(clp); + nfs_localio_enable_client(clp); nfs_uuid_end(&clp->cl_uuid); } EXPORT_SYMBOL_GPL(nfs_local_probe); +void nfs_local_probe_async_work(struct work_struct *work) +{ + struct nfs_client *clp = + container_of(work, struct nfs_client, cl_local_probe_work); + + nfs_local_probe(clp); +} + +void nfs_local_probe_async(struct nfs_client *clp) +{ + queue_work(nfsiod_workqueue, &clp->cl_local_probe_work); +} +EXPORT_SYMBOL_GPL(nfs_local_probe_async); + +static inline struct nfsd_file *nfs_local_file_get(struct nfsd_file *nf) +{ + return nfs_to->nfsd_file_get(nf); +} + +static inline void nfs_local_file_put(struct nfsd_file *nf) +{ + nfs_to->nfsd_file_put(nf); +} + /* - * nfs_local_open_fh - open a local filehandle in terms of nfsd_file + * __nfs_local_open_fh - open a local filehandle in terms of nfsd_file. * - * Returns a pointer to a struct nfsd_file or NULL + * Returns a pointer to a struct nfsd_file or ERR_PTR. + * Caller must release returned nfsd_file with nfs_to_nfsd_file_put_local(). */ -struct nfsd_file * -nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred, - struct nfs_fh *fh, const fmode_t mode) +static struct nfsd_file * +__nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred, + struct nfs_fh *fh, struct nfs_file_localio *nfl, + const fmode_t mode) { struct nfsd_file *localio; - int status; - - if (!nfs_server_is_local(clp)) - return NULL; - if (mode & ~(FMODE_READ | FMODE_WRITE)) - return NULL; localio = nfs_open_local_fh(&clp->cl_uuid, clp->cl_rpcclient, - cred, fh, mode); + cred, fh, nfl, mode); if (IS_ERR(localio)) { - status = PTR_ERR(localio); + int status = PTR_ERR(localio); trace_nfs_local_open_fh(fh, mode, status); switch (status) { case -ENOMEM: @@ -240,10 +242,59 @@ nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred, /* Revalidate localio, will disable if unsupported */ nfs_local_probe(clp); } - return NULL; } return localio; } + +/* + * nfs_local_open_fh - open a local filehandle in terms of nfsd_file. + * First checking if the open nfsd_file is already cached, otherwise + * must __nfs_local_open_fh and insert the nfsd_file in nfs_file_localio. + * + * Returns a pointer to a struct nfsd_file or NULL. + */ +struct nfsd_file * +nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred, + struct nfs_fh *fh, struct nfs_file_localio *nfl, + const fmode_t mode) +{ + struct nfsd_file *nf, *new, __rcu **pnf; + + if (!nfs_server_is_local(clp)) + return NULL; + if (mode & ~(FMODE_READ | FMODE_WRITE)) + return NULL; + + if (mode & FMODE_WRITE) + pnf = &nfl->rw_file; + else + pnf = &nfl->ro_file; + + new = NULL; + rcu_read_lock(); + nf = rcu_dereference(*pnf); + if (!nf) { + rcu_read_unlock(); + new = __nfs_local_open_fh(clp, cred, fh, nfl, mode); + if (IS_ERR(new)) + return NULL; + /* try to swap in the pointer */ + spin_lock(&clp->cl_uuid.lock); + nf = rcu_dereference_protected(*pnf, 1); + if (!nf) { + nf = new; + new = NULL; + rcu_assign_pointer(*pnf, nf); + } + spin_unlock(&clp->cl_uuid.lock); + rcu_read_lock(); + } + nf = nfs_local_file_get(nf); + rcu_read_unlock(); + if (new) + nfs_to_nfsd_file_put_local(new); + return nf; +} EXPORT_SYMBOL_GPL(nfs_local_open_fh); static struct bio_vec * @@ -285,10 +336,19 @@ nfs_local_iocb_alloc(struct nfs_pgio_header *hdr, kfree(iocb); return NULL; } - init_sync_kiocb(&iocb->kiocb, file); + + if (localio_O_DIRECT_semantics && + test_bit(NFS_IOHDR_ODIRECT, &hdr->flags)) { + iocb->kiocb.ki_filp = file; + iocb->kiocb.ki_flags = IOCB_DIRECT; + } else + init_sync_kiocb(&iocb->kiocb, file); + iocb->kiocb.ki_pos = hdr->args.offset; iocb->hdr = hdr; iocb->kiocb.ki_flags &= ~IOCB_APPEND; + iocb->aio_complete_work = NULL; + return iocb; } @@ -328,7 +388,7 @@ nfs_local_pgio_done(struct nfs_pgio_header *hdr, long status) hdr->res.op_status = NFS4_OK; hdr->task.tk_status = 0; } else { - hdr->res.op_status = nfs4_stat_to_errno(status); + hdr->res.op_status = nfs_localio_errno_to_nfs4_stat(status); hdr->task.tk_status = status; } } @@ -338,11 +398,23 @@ nfs_local_pgio_release(struct nfs_local_kiocb *iocb) { struct nfs_pgio_header *hdr = iocb->hdr; - nfs_to_nfsd_file_put_local(iocb->localio); + nfs_local_file_put(iocb->localio); nfs_local_iocb_free(iocb); nfs_local_hdr_release(hdr, hdr->task.tk_ops); } +/* + * Complete the I/O from iocb->kiocb.ki_complete() + * + * Note that this function can be called from a bottom half context, + * hence we need to queue the rpc_call_done() etc to a workqueue + */ +static inline void nfs_local_pgio_aio_complete(struct nfs_local_kiocb *iocb) +{ + INIT_WORK(&iocb->work, iocb->aio_complete_work); + queue_work(nfsiod_workqueue, &iocb->work); +} + static void nfs_local_read_done(struct nfs_local_kiocb *iocb, long status) { @@ -365,6 +437,23 @@ nfs_local_read_done(struct nfs_local_kiocb *iocb, long status) status > 0 ? status : 0, hdr->res.eof); } +static void nfs_local_read_aio_complete_work(struct work_struct *work) +{ + struct nfs_local_kiocb *iocb = + container_of(work, struct nfs_local_kiocb, work); + + nfs_local_pgio_release(iocb); +} + +static void nfs_local_read_aio_complete(struct kiocb *kiocb, long ret) +{ + struct nfs_local_kiocb *iocb = + container_of(kiocb, struct nfs_local_kiocb, kiocb); + + nfs_local_read_done(iocb, ret); + nfs_local_pgio_aio_complete(iocb); /* Calls nfs_local_read_aio_complete_work */ +} + static void nfs_local_call_read(struct work_struct *work) { struct nfs_local_kiocb *iocb = @@ -379,10 +468,10 @@ static void nfs_local_call_read(struct work_struct *work) nfs_local_iter_init(&iter, iocb, READ); status = filp->f_op->read_iter(&iocb->kiocb, &iter); - WARN_ON_ONCE(status == -EIOCBQUEUED); - - nfs_local_read_done(iocb, status); - nfs_local_pgio_release(iocb); + if (status != -EIOCBQUEUED) { + nfs_local_read_done(iocb, status); + nfs_local_pgio_release(iocb); + } revert_creds(save_cred); } @@ -410,6 +499,11 @@ nfs_do_local_read(struct nfs_pgio_header *hdr, nfs_local_pgio_init(hdr, call_ops); hdr->res.eof = false; + if (iocb->kiocb.ki_flags & IOCB_DIRECT) { + iocb->kiocb.ki_complete = nfs_local_read_aio_complete; + iocb->aio_complete_work = nfs_local_read_aio_complete_work; + } + INIT_WORK(&iocb->work, nfs_local_call_read); queue_work(nfslocaliod_workqueue, &iocb->work); @@ -534,6 +628,24 @@ nfs_local_write_done(struct nfs_local_kiocb *iocb, long status) nfs_local_pgio_done(hdr, status); } +static void nfs_local_write_aio_complete_work(struct work_struct *work) +{ + struct nfs_local_kiocb *iocb = + container_of(work, struct nfs_local_kiocb, work); + + nfs_local_vfs_getattr(iocb); + nfs_local_pgio_release(iocb); +} + +static void nfs_local_write_aio_complete(struct kiocb *kiocb, long ret) +{ + struct nfs_local_kiocb *iocb = + container_of(kiocb, struct nfs_local_kiocb, kiocb); + + nfs_local_write_done(iocb, ret); + nfs_local_pgio_aio_complete(iocb); /* Calls nfs_local_write_aio_complete_work */ +} + static void nfs_local_call_write(struct work_struct *work) { struct nfs_local_kiocb *iocb = @@ -552,11 +664,11 @@ static void nfs_local_call_write(struct work_struct *work) file_start_write(filp); status = filp->f_op->write_iter(&iocb->kiocb, &iter); file_end_write(filp); - WARN_ON_ONCE(status == -EIOCBQUEUED); - - nfs_local_write_done(iocb, status); - nfs_local_vfs_getattr(iocb); - nfs_local_pgio_release(iocb); + if (status != -EIOCBQUEUED) { + nfs_local_write_done(iocb, status); + nfs_local_vfs_getattr(iocb); + nfs_local_pgio_release(iocb); + } revert_creds(save_cred); current->flags = old_flags; @@ -592,10 +704,16 @@ nfs_do_local_write(struct nfs_pgio_header *hdr, case NFS_FILE_SYNC: iocb->kiocb.ki_flags |= IOCB_DSYNC|IOCB_SYNC; } + nfs_local_pgio_init(hdr, call_ops); nfs_set_local_verifier(hdr->inode, hdr->res.verf, hdr->args.stable); + if (iocb->kiocb.ki_flags & IOCB_DIRECT) { + iocb->kiocb.ki_complete = nfs_local_write_aio_complete; + iocb->aio_complete_work = nfs_local_write_aio_complete_work; + } + INIT_WORK(&iocb->work, nfs_local_call_write); queue_work(nfslocaliod_workqueue, &iocb->work); @@ -626,8 +744,8 @@ int nfs_local_doio(struct nfs_client *clp, struct nfsd_file *localio, if (status != 0) { if (status == -EAGAIN) - nfs_local_disable(clp); - nfs_to_nfsd_file_put_local(localio); + nfs_localio_disable_client(clp); + nfs_local_file_put(localio); hdr->task.tk_status = status; nfs_local_hdr_release(hdr, call_ops); } @@ -668,7 +786,7 @@ nfs_local_commit_done(struct nfs_commit_data *data, int status) data->task.tk_status = 0; } else { nfs_reset_boot_verifier(data->inode); - data->res.op_status = nfs4_stat_to_errno(status); + data->res.op_status = nfs_localio_errno_to_nfs4_stat(status); data->task.tk_status = status; } } @@ -678,7 +796,7 @@ nfs_local_release_commit_data(struct nfsd_file *localio, struct nfs_commit_data *data, const struct rpc_call_ops *call_ops) { - nfs_to_nfsd_file_put_local(localio); + nfs_local_file_put(localio); call_ops->rpc_call_done(&data->task, data); call_ops->rpc_release(data); } diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index 2d53574da605..973aed9cc5fe 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -308,7 +308,7 @@ int nfs_submount(struct fs_context *fc, struct nfs_server *server) int err; /* Look it up again to get its attributes */ - err = server->nfs_client->rpc_ops->lookup(d_inode(parent), dentry, + err = server->nfs_client->rpc_ops->lookup(d_inode(parent), dentry, &dentry->d_name, ctx->mntfh, ctx->clone_data.fattr); dput(parent); if (err != 0) diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 1566163c6d85..0c3bc98cd999 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -192,7 +192,7 @@ __nfs3_proc_lookup(struct inode *dir, const char *name, size_t len, } static int -nfs3_proc_lookup(struct inode *dir, struct dentry *dentry, +nfs3_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { unsigned short task_flags = 0; @@ -202,8 +202,7 @@ nfs3_proc_lookup(struct inode *dir, struct dentry *dentry, task_flags |= RPC_TASK_TIMEOUT; dprintk("NFS call lookup %pd2\n", dentry); - return __nfs3_proc_lookup(dir, dentry->d_name.name, - dentry->d_name.len, fhandle, fattr, + return __nfs3_proc_lookup(dir, name->name, name->len, fhandle, fattr, task_flags); } @@ -844,6 +843,41 @@ nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, return status; } +#if IS_ENABLED(CONFIG_NFS_LOCALIO) + +static unsigned nfs3_localio_probe_throttle __read_mostly = 0; +module_param(nfs3_localio_probe_throttle, uint, 0644); +MODULE_PARM_DESC(nfs3_localio_probe_throttle, + "Probe for NFSv3 LOCALIO every N IO requests. Must be power-of-2, defaults to 0 (probing disabled)."); + +static void nfs3_localio_probe(struct nfs_server *server) +{ + struct nfs_client *clp = server->nfs_client; + + /* Throttled to reduce nfs_local_probe_async() frequency */ + if (!nfs3_localio_probe_throttle || nfs_server_is_local(clp)) + return; + + /* + * Try (re)enabling LOCALIO if isn't enabled -- admin deems + * it worthwhile to periodically check if LOCALIO possible by + * setting the 'nfs3_localio_probe_throttle' module parameter. + * + * This is useful if LOCALIO was previously enabled, but was + * disabled due to server restart, and IO has successfully + * completed in terms of normal RPC. + */ + if ((clp->cl_uuid.nfs3_localio_probe_count++ & + (nfs3_localio_probe_throttle - 1)) == 0) { + if (!nfs_server_is_local(clp)) + nfs_local_probe_async(clp); + } +} + +#else +static void nfs3_localio_probe(struct nfs_server *server) {} +#endif + static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) { struct inode *inode = hdr->inode; @@ -855,8 +889,11 @@ static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) if (nfs3_async_handle_jukebox(task, inode)) return -EAGAIN; - if (task->tk_status >= 0 && !server->read_hdrsize) - cmpxchg(&server->read_hdrsize, 0, hdr->res.replen); + if (task->tk_status >= 0) { + if (!server->read_hdrsize) + cmpxchg(&server->read_hdrsize, 0, hdr->res.replen); + nfs3_localio_probe(server); + } nfs_invalidate_atime(inode); nfs_refresh_inode(inode, &hdr->fattr); @@ -886,8 +923,10 @@ static int nfs3_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) if (nfs3_async_handle_jukebox(task, inode)) return -EAGAIN; - if (task->tk_status >= 0) + if (task->tk_status >= 0) { nfs_writeback_update_inode(hdr); + nfs3_localio_probe(NFS_SERVER(inode)); + } return 0; } diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 531c9c20ef1d..1924c4a2077b 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -498,15 +498,15 @@ out_put_src_lock: return err; } -struct nfs42_offloadcancel_data { +struct nfs42_offload_data { struct nfs_server *seq_server; struct nfs42_offload_status_args args; struct nfs42_offload_status_res res; }; -static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata) +static void nfs42_offload_prepare(struct rpc_task *task, void *calldata) { - struct nfs42_offloadcancel_data *data = calldata; + struct nfs42_offload_data *data = calldata; nfs4_setup_sequence(data->seq_server->nfs_client, &data->args.osa_seq_args, @@ -515,7 +515,7 @@ static void nfs42_offload_cancel_prepare(struct rpc_task *task, void *calldata) static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata) { - struct nfs42_offloadcancel_data *data = calldata; + struct nfs42_offload_data *data = calldata; trace_nfs4_offload_cancel(&data->args, task->tk_status); nfs41_sequence_done(task, &data->res.osr_seq_res); @@ -525,22 +525,22 @@ static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata) rpc_restart_call_prepare(task); } -static void nfs42_free_offloadcancel_data(void *data) +static void nfs42_offload_release(void *data) { kfree(data); } static const struct rpc_call_ops nfs42_offload_cancel_ops = { - .rpc_call_prepare = nfs42_offload_cancel_prepare, + .rpc_call_prepare = nfs42_offload_prepare, .rpc_call_done = nfs42_offload_cancel_done, - .rpc_release = nfs42_free_offloadcancel_data, + .rpc_release = nfs42_offload_release, }; static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *stateid) { struct nfs_server *dst_server = NFS_SERVER(file_inode(dst)); - struct nfs42_offloadcancel_data *data = NULL; + struct nfs42_offload_data *data = NULL; struct nfs_open_context *ctx = nfs_file_open_context(dst); struct rpc_task *task; struct rpc_message msg = { @@ -552,14 +552,14 @@ static int nfs42_do_offload_cancel_async(struct file *dst, .rpc_message = &msg, .callback_ops = &nfs42_offload_cancel_ops, .workqueue = nfsiod_workqueue, - .flags = RPC_TASK_ASYNC, + .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, }; int status; if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL)) return -EOPNOTSUPP; - data = kzalloc(sizeof(struct nfs42_offloadcancel_data), GFP_KERNEL); + data = kzalloc(sizeof(struct nfs42_offload_data), GFP_KERNEL); if (data == NULL) return -ENOMEM; @@ -861,7 +861,7 @@ int nfs42_proc_layoutstats_generic(struct nfs_server *server, .rpc_message = &msg, .callback_ops = &nfs42_layoutstat_ops, .callback_data = data, - .flags = RPC_TASK_ASYNC, + .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, }; struct rpc_task *task; @@ -1016,7 +1016,7 @@ int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg, struct rpc_task_setup task_setup = { .rpc_message = &msg, .callback_ops = &nfs42_layouterror_ops, - .flags = RPC_TASK_ASYNC, + .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, }; unsigned int i; diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c index 9e3ae53e2205..5072d7ea72e9 100644 --- a/fs/nfs/nfs42xdr.c +++ b/fs/nfs/nfs42xdr.c @@ -144,9 +144,11 @@ decode_putfh_maxsz + \ decode_offload_cancel_maxsz) #define NFS4_enc_copy_notify_sz (compound_encode_hdr_maxsz + \ + encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_copy_notify_maxsz) #define NFS4_dec_copy_notify_sz (compound_decode_hdr_maxsz + \ + decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_copy_notify_maxsz) #define NFS4_enc_deallocate_sz (compound_encode_hdr_maxsz + \ @@ -549,7 +551,7 @@ static void nfs4_xdr_enc_copy(struct rpc_rqst *req, } /* - * Encode OFFLOAD_CANEL request + * Encode OFFLOAD_CANCEL request */ static void nfs4_xdr_enc_offload_cancel(struct rpc_rqst *req, struct xdr_stream *xdr, diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 405f17e6e0b4..df9669d4ded7 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -114,6 +114,7 @@ static inline struct nfs4_label * nfs4_label_init_security(struct inode *dir, struct dentry *dentry, struct iattr *sattr, struct nfs4_label *label) { + struct lsm_context shim; int err; if (label == NULL) @@ -128,18 +129,25 @@ nfs4_label_init_security(struct inode *dir, struct dentry *dentry, label->label = NULL; err = security_dentry_init_security(dentry, sattr->ia_mode, - &dentry->d_name, NULL, - (void **)&label->label, &label->len); - if (err == 0) - return label; + &dentry->d_name, NULL, &shim); + if (err) + return NULL; - return NULL; + label->label = shim.context; + label->len = shim.len; + return label; } static inline void nfs4_label_release_security(struct nfs4_label *label) { - if (label) - security_release_secctx(label->label, label->len); + struct lsm_context shim; + + if (label) { + shim.context = label->label; + shim.len = label->len; + shim.id = LSM_ID_UNDEF; + security_release_secctx(&shim); + } } static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label) { @@ -4536,15 +4544,15 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, } static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, - struct dentry *dentry, struct nfs_fh *fhandle, - struct nfs_fattr *fattr) + struct dentry *dentry, const struct qstr *name, + struct nfs_fh *fhandle, struct nfs_fattr *fattr) { struct nfs_server *server = NFS_SERVER(dir); int status; struct nfs4_lookup_arg args = { .bitmask = server->attr_bitmask, .dir_fh = NFS_FH(dir), - .name = &dentry->d_name, + .name = name, }; struct nfs4_lookup_res res = { .server = server, @@ -4586,17 +4594,16 @@ static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) } static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, - struct dentry *dentry, struct nfs_fh *fhandle, - struct nfs_fattr *fattr) + struct dentry *dentry, const struct qstr *name, + struct nfs_fh *fhandle, struct nfs_fattr *fattr) { struct nfs4_exception exception = { .interruptible = true, }; struct rpc_clnt *client = *clnt; - const struct qstr *name = &dentry->d_name; int err; do { - err = _nfs4_proc_lookup(client, dir, dentry, fhandle, fattr); + err = _nfs4_proc_lookup(client, dir, dentry, name, fhandle, fattr); trace_nfs4_lookup(dir, name, err); switch (err) { case -NFS4ERR_BADNAME: @@ -4631,13 +4638,13 @@ out: return err; } -static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry, +static int nfs4_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { int status; struct rpc_clnt *client = NFS_CLIENT(dir); - status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr); + status = nfs4_proc_lookup_common(&client, dir, dentry, name, fhandle, fattr); if (client != NFS_CLIENT(dir)) { rpc_shutdown_client(client); nfs_fixup_secinfo_attributes(fattr); @@ -4652,7 +4659,8 @@ nfs4_proc_lookup_mountpoint(struct inode *dir, struct dentry *dentry, struct rpc_clnt *client = NFS_CLIENT(dir); int status; - status = nfs4_proc_lookup_common(&client, dir, dentry, fhandle, fattr); + status = nfs4_proc_lookup_common(&client, dir, dentry, &dentry->d_name, + fhandle, fattr); if (status < 0) return ERR_PTR(status); return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 9a9f60a2291b..542cdf71229f 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1955,6 +1955,7 @@ restart: } rcu_read_unlock(); nfs4_free_state_owners(&freeme); + nfs_local_probe_async(clp); if (lost_locks) pr_warn("NFS: %s: lost %d locks\n", clp->cl_hostname, lost_locks); diff --git a/fs/nfs/nfs4sysctl.c b/fs/nfs/nfs4sysctl.c index 886a7c4c60b3..d1a92d8f8ba4 100644 --- a/fs/nfs/nfs4sysctl.c +++ b/fs/nfs/nfs4sysctl.c @@ -17,7 +17,7 @@ static const int nfs_set_port_min; static const int nfs_set_port_max = 65535; static struct ctl_table_header *nfs4_callback_sysctl_table; -static struct ctl_table nfs4_cb_sysctls[] = { +static const struct ctl_table nfs4_cb_sysctls[] = { { .procname = "nfs_callback_tcpport", .data = &nfs_callback_set_tcpport, diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index 1eab98c277fa..7a058bd8c566 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -1714,38 +1714,6 @@ TRACE_EVENT(nfs_local_open_fh, ) ); -DECLARE_EVENT_CLASS(nfs_local_client_event, - TP_PROTO( - const struct nfs_client *clp - ), - - TP_ARGS(clp), - - TP_STRUCT__entry( - __field(unsigned int, protocol) - __string(server, clp->cl_hostname) - ), - - TP_fast_assign( - __entry->protocol = clp->rpc_ops->version; - __assign_str(server); - ), - - TP_printk( - "server=%s NFSv%u", __get_str(server), __entry->protocol - ) -); - -#define DEFINE_NFS_LOCAL_CLIENT_EVENT(name) \ - DEFINE_EVENT(nfs_local_client_event, name, \ - TP_PROTO( \ - const struct nfs_client *clp \ - ), \ - TP_ARGS(clp)) - -DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_local_enable); -DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_local_disable); - DECLARE_EVENT_CLASS(nfs_xdr_event, TP_PROTO( const struct xdr_stream *xdr, diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index e27c07bd8929..11968dcb7243 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -961,8 +961,9 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc) struct nfs_client *clp = NFS_SERVER(hdr->inode)->nfs_client; struct nfsd_file *localio = - nfs_local_open_fh(clp, hdr->cred, - hdr->args.fh, hdr->args.context->mode); + nfs_local_open_fh(clp, hdr->cred, hdr->args.fh, + &hdr->args.context->nfl, + hdr->args.context->mode); if (NFS_SERVER(hdr->inode)->nfs_client->cl_minorversion) task_flags = RPC_TASK_MOVEABLE; diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index 6c09cd090c34..77920a2e3cef 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -153,13 +153,13 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, } static int -nfs_proc_lookup(struct inode *dir, struct dentry *dentry, +nfs_proc_lookup(struct inode *dir, struct dentry *dentry, const struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { struct nfs_diropargs arg = { .fh = NFS_FH(dir), - .name = dentry->d_name.name, - .len = dentry->d_name.len + .name = name->name, + .len = name->len }; struct nfs_diropok res = { .fh = fhandle, diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c index e645be1a3381..f579df0e8d67 100644 --- a/fs/nfs/sysctl.c +++ b/fs/nfs/sysctl.c @@ -14,7 +14,7 @@ static struct ctl_table_header *nfs_callback_sysctl_table; -static struct ctl_table nfs_cb_sysctls[] = { +static const struct ctl_table nfs_cb_sysctls[] = { { .procname = "nfs_mountpoint_timeout", .data = &nfs_mountpoint_expiry_timeout, diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c index bf378ecd5d9f..7b59a40d40c0 100644 --- a/fs/nfs/sysfs.c +++ b/fs/nfs/sysfs.c @@ -280,9 +280,9 @@ void nfs_sysfs_link_rpc_client(struct nfs_server *server, char name[RPC_CLIENT_NAME_SIZE]; int ret; - strcpy(name, clnt->cl_program->name); - strcat(name, uniq ? uniq : ""); - strcat(name, "_client"); + strscpy(name, clnt->cl_program->name, sizeof(name)); + strncat(name, uniq ? uniq : "", sizeof(name) - strlen(name) - 1); + strncat(name, "_client", sizeof(name) - strlen(name) - 1); ret = sysfs_create_link_nowarn(&server->kobj, &clnt->cl_sysfs->kobject, name); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 50fa539611f5..aa3d8bea3ec0 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1826,7 +1826,8 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how, task_flags = RPC_TASK_MOVEABLE; localio = nfs_local_open_fh(NFS_SERVER(inode)->nfs_client, data->cred, - data->args.fh, data->context->mode); + data->args.fh, &data->context->nfl, + data->context->mode); return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode), data->mds_ops, how, RPC_TASK_CRED_NOREF | task_flags, localio); diff --git a/fs/nfs_common/Makefile b/fs/nfs_common/Makefile index a5e54809701e..c10ead273ff2 100644 --- a/fs/nfs_common/Makefile +++ b/fs/nfs_common/Makefile @@ -6,8 +6,9 @@ obj-$(CONFIG_NFS_ACL_SUPPORT) += nfs_acl.o nfs_acl-objs := nfsacl.o +CFLAGS_localio_trace.o += -I$(src) obj-$(CONFIG_NFS_COMMON_LOCALIO_SUPPORT) += nfs_localio.o -nfs_localio-objs := nfslocalio.o +nfs_localio-objs := nfslocalio.o localio_trace.o obj-$(CONFIG_GRACE_PERIOD) += grace.o obj-$(CONFIG_NFS_V4_2_SSC_HELPER) += nfs_ssc.o diff --git a/fs/nfs_common/common.c b/fs/nfs_common/common.c index 34a115176f97..af09aed09fd2 100644 --- a/fs/nfs_common/common.c +++ b/fs/nfs_common/common.c @@ -15,7 +15,7 @@ static const struct { { NFS_OK, 0 }, { NFSERR_PERM, -EPERM }, { NFSERR_NOENT, -ENOENT }, - { NFSERR_IO, -errno_NFSERR_IO}, + { NFSERR_IO, -EIO }, { NFSERR_NXIO, -ENXIO }, /* { NFSERR_EAGAIN, -EAGAIN }, */ { NFSERR_ACCES, -EACCES }, @@ -45,7 +45,6 @@ static const struct { { NFSERR_SERVERFAULT, -EREMOTEIO }, { NFSERR_BADTYPE, -EBADTYPE }, { NFSERR_JUKEBOX, -EJUKEBOX }, - { -1, -EIO } }; /** @@ -59,26 +58,29 @@ int nfs_stat_to_errno(enum nfs_stat status) { int i; - for (i = 0; nfs_errtbl[i].stat != -1; i++) { + for (i = 0; i < ARRAY_SIZE(nfs_errtbl); i++) { if (nfs_errtbl[i].stat == (int)status) return nfs_errtbl[i].errno; } - return nfs_errtbl[i].errno; + return -EIO; } EXPORT_SYMBOL_GPL(nfs_stat_to_errno); /* * We need to translate between nfs v4 status return values and * the local errno values which may not be the same. + * + * nfs4_errtbl_common[] is used before more specialized mappings + * available in nfs4_errtbl[] or nfs4_errtbl_localio[]. */ static const struct { int stat; int errno; -} nfs4_errtbl[] = { +} nfs4_errtbl_common[] = { { NFS4_OK, 0 }, { NFS4ERR_PERM, -EPERM }, { NFS4ERR_NOENT, -ENOENT }, - { NFS4ERR_IO, -errno_NFSERR_IO}, + { NFS4ERR_IO, -EIO }, { NFS4ERR_NXIO, -ENXIO }, { NFS4ERR_ACCESS, -EACCES }, { NFS4ERR_EXIST, -EEXIST }, @@ -98,15 +100,20 @@ static const struct { { NFS4ERR_BAD_COOKIE, -EBADCOOKIE }, { NFS4ERR_NOTSUPP, -ENOTSUPP }, { NFS4ERR_TOOSMALL, -ETOOSMALL }, - { NFS4ERR_SERVERFAULT, -EREMOTEIO }, { NFS4ERR_BADTYPE, -EBADTYPE }, - { NFS4ERR_LOCKED, -EAGAIN }, { NFS4ERR_SYMLINK, -ELOOP }, - { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP }, { NFS4ERR_DEADLOCK, -EDEADLK }, +}; + +static const struct { + int stat; + int errno; +} nfs4_errtbl[] = { + { NFS4ERR_SERVERFAULT, -EREMOTEIO }, + { NFS4ERR_LOCKED, -EAGAIN }, + { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP }, { NFS4ERR_NOXATTR, -ENODATA }, { NFS4ERR_XATTR2BIG, -E2BIG }, - { -1, -EIO } }; /* @@ -116,7 +123,14 @@ static const struct { int nfs4_stat_to_errno(int stat) { int i; - for (i = 0; nfs4_errtbl[i].stat != -1; i++) { + + /* First check nfs4_errtbl_common */ + for (i = 0; i < ARRAY_SIZE(nfs4_errtbl_common); i++) { + if (nfs4_errtbl_common[i].stat == stat) + return nfs4_errtbl_common[i].errno; + } + /* Then check nfs4_errtbl */ + for (i = 0; i < ARRAY_SIZE(nfs4_errtbl); i++) { if (nfs4_errtbl[i].stat == stat) return nfs4_errtbl[i].errno; } @@ -132,3 +146,56 @@ int nfs4_stat_to_errno(int stat) return -stat; } EXPORT_SYMBOL_GPL(nfs4_stat_to_errno); + +/* + * This table is useful for conversion from local errno to NFS error. + * It provides more logically correct mappings for use with LOCALIO + * (which is focused on converting from errno to NFS status). + */ +static const struct { + int stat; + int errno; +} nfs4_errtbl_localio[] = { + /* Map errors differently than nfs4_errtbl */ + { NFS4ERR_IO, -EREMOTEIO }, + { NFS4ERR_DELAY, -EAGAIN }, + { NFS4ERR_FBIG, -E2BIG }, + /* Map errors not handled by nfs4_errtbl */ + { NFS4ERR_STALE, -EBADF }, + { NFS4ERR_STALE, -EOPENSTALE }, + { NFS4ERR_DELAY, -ETIMEDOUT }, + { NFS4ERR_DELAY, -ERESTARTSYS }, + { NFS4ERR_DELAY, -ENOMEM }, + { NFS4ERR_IO, -ETXTBSY }, + { NFS4ERR_IO, -EBUSY }, + { NFS4ERR_SERVERFAULT, -ESERVERFAULT }, + { NFS4ERR_SERVERFAULT, -ENFILE }, + { NFS4ERR_IO, -EUCLEAN }, + { NFS4ERR_PERM, -ENOKEY }, +}; + +/* + * Convert an errno to an NFS error code for LOCALIO. + */ +__u32 nfs_localio_errno_to_nfs4_stat(int errno) +{ + int i; + + /* First check nfs4_errtbl_common */ + for (i = 0; i < ARRAY_SIZE(nfs4_errtbl_common); i++) { + if (nfs4_errtbl_common[i].errno == errno) + return nfs4_errtbl_common[i].stat; + } + /* Then check nfs4_errtbl_localio */ + for (i = 0; i < ARRAY_SIZE(nfs4_errtbl_localio); i++) { + if (nfs4_errtbl_localio[i].errno == errno) + return nfs4_errtbl_localio[i].stat; + } + /* If we cannot translate the error, the recovery routines should + * handle it. + * Note: remaining NFSv4 error codes have values > 10000, so should + * not conflict with native Linux error codes. + */ + return NFS4ERR_SERVERFAULT; +} +EXPORT_SYMBOL_GPL(nfs_localio_errno_to_nfs4_stat); diff --git a/fs/nfs_common/localio_trace.c b/fs/nfs_common/localio_trace.c new file mode 100644 index 000000000000..7decfe57abeb --- /dev/null +++ b/fs/nfs_common/localio_trace.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2024 Trond Myklebust <trond.myklebust@hammerspace.com> + * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com> + */ +#include <linux/nfs_fs.h> +#include <linux/namei.h> + +#define CREATE_TRACE_POINTS +#include "localio_trace.h" diff --git a/fs/nfs_common/localio_trace.h b/fs/nfs_common/localio_trace.h new file mode 100644 index 000000000000..4055aec9ff8d --- /dev/null +++ b/fs/nfs_common/localio_trace.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2024 Trond Myklebust <trond.myklebust@hammerspace.com> + * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com> + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM nfs_localio + +#if !defined(_TRACE_NFS_COMMON_LOCALIO_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_NFS_COMMON_LOCALIO_H + +#include <linux/tracepoint.h> + +#include <trace/misc/fs.h> +#include <trace/misc/nfs.h> +#include <trace/misc/sunrpc.h> + +DECLARE_EVENT_CLASS(nfs_local_client_event, + TP_PROTO( + const struct nfs_client *clp + ), + + TP_ARGS(clp), + + TP_STRUCT__entry( + __field(unsigned int, protocol) + __string(server, clp->cl_hostname) + ), + + TP_fast_assign( + __entry->protocol = clp->rpc_ops->version; + __assign_str(server); + ), + + TP_printk( + "server=%s NFSv%u", __get_str(server), __entry->protocol + ) +); + +#define DEFINE_NFS_LOCAL_CLIENT_EVENT(name) \ + DEFINE_EVENT(nfs_local_client_event, name, \ + TP_PROTO( \ + const struct nfs_client *clp \ + ), \ + TP_ARGS(clp)) + +DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_localio_enable_client); +DEFINE_NFS_LOCAL_CLIENT_EVENT(nfs_localio_disable_client); + +#endif /* _TRACE_NFS_COMMON_LOCALIO_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE localio_trace +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/fs/nfs_common/nfslocalio.c b/fs/nfs_common/nfslocalio.c index a74ec08f6c96..6a0bdea6d644 100644 --- a/fs/nfs_common/nfslocalio.c +++ b/fs/nfs_common/nfslocalio.c @@ -7,38 +7,67 @@ #include <linux/module.h> #include <linux/list.h> #include <linux/nfslocalio.h> +#include <linux/nfs3.h> +#include <linux/nfs4.h> +#include <linux/nfs_fs.h> #include <net/netns/generic.h> +#include "localio_trace.h" + MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("NFS localio protocol bypass support"); -static DEFINE_SPINLOCK(nfs_uuid_lock); +static DEFINE_SPINLOCK(nfs_uuids_lock); /* * Global list of nfs_uuid_t instances - * that is protected by nfs_uuid_lock. + * that is protected by nfs_uuids_lock. */ static LIST_HEAD(nfs_uuids); +/* + * Lock ordering: + * 1: nfs_uuid->lock + * 2: nfs_uuids_lock + * 3: nfs_uuid->list_lock (aka nn->local_clients_lock) + * + * May skip locks in select cases, but never hold multiple + * locks out of order. + */ + void nfs_uuid_init(nfs_uuid_t *nfs_uuid) { - nfs_uuid->net = NULL; + RCU_INIT_POINTER(nfs_uuid->net, NULL); nfs_uuid->dom = NULL; + nfs_uuid->list_lock = NULL; INIT_LIST_HEAD(&nfs_uuid->list); + INIT_LIST_HEAD(&nfs_uuid->files); + spin_lock_init(&nfs_uuid->lock); + nfs_uuid->nfs3_localio_probe_count = 0; } EXPORT_SYMBOL_GPL(nfs_uuid_init); bool nfs_uuid_begin(nfs_uuid_t *nfs_uuid) { - spin_lock(&nfs_uuid_lock); - /* Is this nfs_uuid already in use? */ + spin_lock(&nfs_uuid->lock); + if (rcu_access_pointer(nfs_uuid->net)) { + /* This nfs_uuid is already in use */ + spin_unlock(&nfs_uuid->lock); + return false; + } + + spin_lock(&nfs_uuids_lock); if (!list_empty(&nfs_uuid->list)) { - spin_unlock(&nfs_uuid_lock); + /* This nfs_uuid is already in use */ + spin_unlock(&nfs_uuids_lock); + spin_unlock(&nfs_uuid->lock); return false; } - uuid_gen(&nfs_uuid->uuid); list_add_tail(&nfs_uuid->list, &nfs_uuids); - spin_unlock(&nfs_uuid_lock); + spin_unlock(&nfs_uuids_lock); + + uuid_gen(&nfs_uuid->uuid); + spin_unlock(&nfs_uuid->lock); return true; } @@ -46,12 +75,16 @@ EXPORT_SYMBOL_GPL(nfs_uuid_begin); void nfs_uuid_end(nfs_uuid_t *nfs_uuid) { - if (nfs_uuid->net == NULL) { - spin_lock(&nfs_uuid_lock); - if (nfs_uuid->net == NULL) + if (!rcu_access_pointer(nfs_uuid->net)) { + spin_lock(&nfs_uuid->lock); + if (!rcu_access_pointer(nfs_uuid->net)) { + /* Not local, remove from nfs_uuids */ + spin_lock(&nfs_uuids_lock); list_del_init(&nfs_uuid->list); - spin_unlock(&nfs_uuid_lock); - } + spin_unlock(&nfs_uuids_lock); + } + spin_unlock(&nfs_uuid->lock); + } } EXPORT_SYMBOL_GPL(nfs_uuid_end); @@ -69,68 +102,142 @@ static nfs_uuid_t * nfs_uuid_lookup_locked(const uuid_t *uuid) static struct module *nfsd_mod; void nfs_uuid_is_local(const uuid_t *uuid, struct list_head *list, - struct net *net, struct auth_domain *dom, - struct module *mod) + spinlock_t *list_lock, struct net *net, + struct auth_domain *dom, struct module *mod) { nfs_uuid_t *nfs_uuid; - spin_lock(&nfs_uuid_lock); + spin_lock(&nfs_uuids_lock); nfs_uuid = nfs_uuid_lookup_locked(uuid); - if (nfs_uuid) { - kref_get(&dom->ref); - nfs_uuid->dom = dom; - /* - * We don't hold a ref on the net, but instead put - * ourselves on a list so the net pointer can be - * invalidated. - */ - list_move(&nfs_uuid->list, list); - rcu_assign_pointer(nfs_uuid->net, net); - - __module_get(mod); - nfsd_mod = mod; + if (!nfs_uuid) { + spin_unlock(&nfs_uuids_lock); + return; } - spin_unlock(&nfs_uuid_lock); + + /* + * We don't hold a ref on the net, but instead put + * ourselves on @list (nn->local_clients) so the net + * pointer can be invalidated. + */ + spin_lock(list_lock); /* list_lock is nn->local_clients_lock */ + list_move(&nfs_uuid->list, list); + spin_unlock(list_lock); + + spin_unlock(&nfs_uuids_lock); + /* Once nfs_uuid is parented to @list, avoid global nfs_uuids_lock */ + spin_lock(&nfs_uuid->lock); + + __module_get(mod); + nfsd_mod = mod; + + nfs_uuid->list_lock = list_lock; + kref_get(&dom->ref); + nfs_uuid->dom = dom; + rcu_assign_pointer(nfs_uuid->net, net); + spin_unlock(&nfs_uuid->lock); } EXPORT_SYMBOL_GPL(nfs_uuid_is_local); -static void nfs_uuid_put_locked(nfs_uuid_t *nfs_uuid) +void nfs_localio_enable_client(struct nfs_client *clp) +{ + /* nfs_uuid_is_local() does the actual enablement */ + trace_nfs_localio_enable_client(clp); +} +EXPORT_SYMBOL_GPL(nfs_localio_enable_client); + +/* + * Cleanup the nfs_uuid_t embedded in an nfs_client. + * This is the long-form of nfs_uuid_init(). + */ +static bool nfs_uuid_put(nfs_uuid_t *nfs_uuid) { - if (nfs_uuid->net) { - module_put(nfsd_mod); - nfs_uuid->net = NULL; + LIST_HEAD(local_files); + struct nfs_file_localio *nfl, *tmp; + + spin_lock(&nfs_uuid->lock); + if (unlikely(!rcu_access_pointer(nfs_uuid->net))) { + spin_unlock(&nfs_uuid->lock); + return false; } + RCU_INIT_POINTER(nfs_uuid->net, NULL); + if (nfs_uuid->dom) { auth_domain_put(nfs_uuid->dom); nfs_uuid->dom = NULL; } - list_del_init(&nfs_uuid->list); + + list_splice_init(&nfs_uuid->files, &local_files); + spin_unlock(&nfs_uuid->lock); + + /* Walk list of files and ensure their last references dropped */ + list_for_each_entry_safe(nfl, tmp, &local_files, list) { + nfs_close_local_fh(nfl); + cond_resched(); + } + + spin_lock(&nfs_uuid->lock); + BUG_ON(!list_empty(&nfs_uuid->files)); + + /* Remove client from nn->local_clients */ + if (nfs_uuid->list_lock) { + spin_lock(nfs_uuid->list_lock); + BUG_ON(list_empty(&nfs_uuid->list)); + list_del_init(&nfs_uuid->list); + spin_unlock(nfs_uuid->list_lock); + nfs_uuid->list_lock = NULL; + } + + module_put(nfsd_mod); + spin_unlock(&nfs_uuid->lock); + + return true; } -void nfs_uuid_invalidate_clients(struct list_head *list) +void nfs_localio_disable_client(struct nfs_client *clp) { + if (nfs_uuid_put(&clp->cl_uuid)) + trace_nfs_localio_disable_client(clp); +} +EXPORT_SYMBOL_GPL(nfs_localio_disable_client); + +void nfs_localio_invalidate_clients(struct list_head *nn_local_clients, + spinlock_t *nn_local_clients_lock) +{ + LIST_HEAD(local_clients); nfs_uuid_t *nfs_uuid, *tmp; + struct nfs_client *clp; - spin_lock(&nfs_uuid_lock); - list_for_each_entry_safe(nfs_uuid, tmp, list, list) - nfs_uuid_put_locked(nfs_uuid); - spin_unlock(&nfs_uuid_lock); + spin_lock(nn_local_clients_lock); + list_splice_init(nn_local_clients, &local_clients); + spin_unlock(nn_local_clients_lock); + list_for_each_entry_safe(nfs_uuid, tmp, &local_clients, list) { + if (WARN_ON(nfs_uuid->list_lock != nn_local_clients_lock)) + break; + clp = container_of(nfs_uuid, struct nfs_client, cl_uuid); + nfs_localio_disable_client(clp); + } } -EXPORT_SYMBOL_GPL(nfs_uuid_invalidate_clients); +EXPORT_SYMBOL_GPL(nfs_localio_invalidate_clients); -void nfs_uuid_invalidate_one_client(nfs_uuid_t *nfs_uuid) +static void nfs_uuid_add_file(nfs_uuid_t *nfs_uuid, struct nfs_file_localio *nfl) { - if (nfs_uuid->net) { - spin_lock(&nfs_uuid_lock); - nfs_uuid_put_locked(nfs_uuid); - spin_unlock(&nfs_uuid_lock); + /* Add nfl to nfs_uuid->files if it isn't already */ + spin_lock(&nfs_uuid->lock); + if (list_empty(&nfl->list)) { + rcu_assign_pointer(nfl->nfs_uuid, nfs_uuid); + list_add_tail(&nfl->list, &nfs_uuid->files); } + spin_unlock(&nfs_uuid->lock); } -EXPORT_SYMBOL_GPL(nfs_uuid_invalidate_one_client); +/* + * Caller is responsible for calling nfsd_net_put and + * nfsd_file_put (via nfs_to_nfsd_file_put_local). + */ struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid, struct rpc_clnt *rpc_clnt, const struct cred *cred, - const struct nfs_fh *nfs_fh, const fmode_t fmode) + const struct nfs_fh *nfs_fh, struct nfs_file_localio *nfl, + const fmode_t fmode) { struct net *net; struct nfsd_file *localio; @@ -139,7 +246,7 @@ struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid, * Not running in nfsd context, so must safely get reference on nfsd_serv. * But the server may already be shutting down, if so disallow new localio. * uuid->net is NOT a counted reference, but rcu_read_lock() ensures that - * if uuid->net is not NULL, then calling nfsd_serv_try_get() is safe + * if uuid->net is not NULL, then calling nfsd_net_try_get() is safe * and if it succeeds we will have an implied reference to the net. * * Otherwise NFS may not have ref on NFSD and therefore cannot safely @@ -147,21 +254,62 @@ struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *uuid, */ rcu_read_lock(); net = rcu_dereference(uuid->net); - if (!net || !nfs_to->nfsd_serv_try_get(net)) { + if (!net || !nfs_to->nfsd_net_try_get(net)) { rcu_read_unlock(); return ERR_PTR(-ENXIO); } rcu_read_unlock(); - /* We have an implied reference to net thanks to nfsd_serv_try_get */ + /* We have an implied reference to net thanks to nfsd_net_try_get */ localio = nfs_to->nfsd_open_local_fh(net, uuid->dom, rpc_clnt, cred, nfs_fh, fmode); if (IS_ERR(localio)) nfs_to_nfsd_net_put(net); + else + nfs_uuid_add_file(uuid, nfl); return localio; } EXPORT_SYMBOL_GPL(nfs_open_local_fh); +void nfs_close_local_fh(struct nfs_file_localio *nfl) +{ + struct nfsd_file *ro_nf = NULL; + struct nfsd_file *rw_nf = NULL; + nfs_uuid_t *nfs_uuid; + + rcu_read_lock(); + nfs_uuid = rcu_dereference(nfl->nfs_uuid); + if (!nfs_uuid) { + /* regular (non-LOCALIO) NFS will hammer this */ + rcu_read_unlock(); + return; + } + + ro_nf = rcu_access_pointer(nfl->ro_file); + rw_nf = rcu_access_pointer(nfl->rw_file); + if (ro_nf || rw_nf) { + spin_lock(&nfs_uuid->lock); + if (ro_nf) + ro_nf = rcu_dereference_protected(xchg(&nfl->ro_file, NULL), 1); + if (rw_nf) + rw_nf = rcu_dereference_protected(xchg(&nfl->rw_file, NULL), 1); + + /* Remove nfl from nfs_uuid->files list */ + RCU_INIT_POINTER(nfl->nfs_uuid, NULL); + list_del_init(&nfl->list); + spin_unlock(&nfs_uuid->lock); + rcu_read_unlock(); + + if (ro_nf) + nfs_to_nfsd_file_put_local(ro_nf); + if (rw_nf) + nfs_to_nfsd_file_put_local(rw_nf); + return; + } + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(nfs_close_local_fh); + /* * The NFS LOCALIO code needs to call into NFSD using various symbols, * but cannot be statically linked, because that will make the NFS diff --git a/fs/nfsd/Makefile b/fs/nfsd/Makefile index 18cbd3fa7691..2f687619f65b 100644 --- a/fs/nfsd/Makefile +++ b/fs/nfsd/Makefile @@ -18,9 +18,23 @@ nfsd-$(CONFIG_NFSD_V2) += nfsproc.o nfsxdr.o nfsd-$(CONFIG_NFSD_V2_ACL) += nfs2acl.o nfsd-$(CONFIG_NFSD_V3_ACL) += nfs3acl.o nfsd-$(CONFIG_NFSD_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4idmap.o \ - nfs4acl.o nfs4callback.o nfs4recover.o + nfs4acl.o nfs4callback.o nfs4recover.o nfs4xdr_gen.o nfsd-$(CONFIG_NFSD_PNFS) += nfs4layouts.o nfsd-$(CONFIG_NFSD_BLOCKLAYOUT) += blocklayout.o blocklayoutxdr.o nfsd-$(CONFIG_NFSD_SCSILAYOUT) += blocklayout.o blocklayoutxdr.o nfsd-$(CONFIG_NFSD_FLEXFILELAYOUT) += flexfilelayout.o flexfilelayoutxdr.o nfsd-$(CONFIG_NFS_LOCALIO) += localio.o + + +.PHONY: xdrgen + +xdrgen: ../../include/linux/sunrpc/xdrgen/nfs4_1.h nfs4xdr_gen.h nfs4xdr_gen.c + +../../include/linux/sunrpc/xdrgen/nfs4_1.h: ../../Documentation/sunrpc/xdr/nfs4_1.x + ../../tools/net/sunrpc/xdrgen/xdrgen definitions $< > $@ + +nfs4xdr_gen.h: ../../Documentation/sunrpc/xdr/nfs4_1.x + ../../tools/net/sunrpc/xdrgen/xdrgen declarations $< > $@ + +nfs4xdr_gen.c: ../../Documentation/sunrpc/xdr/nfs4_1.x + ../../tools/net/sunrpc/xdrgen/xdrgen source $< > $@ diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c index 93e33d1ee891..4dc327e02456 100644 --- a/fs/nfsd/auth.c +++ b/fs/nfsd/auth.c @@ -27,7 +27,7 @@ int nfsd_setuser(struct svc_cred *cred, struct svc_export *exp) int flags = nfsexp_flags(cred, exp); /* discard any old override before preparing the new set */ - revert_creds(get_cred(current_real_cred())); + put_cred(revert_creds(get_cred(current_real_cred()))); new = prepare_creds(); if (!new) return -ENOMEM; @@ -80,7 +80,6 @@ int nfsd_setuser(struct svc_cred *cred, struct svc_export *exp) new->cap_effective = cap_raise_nfsd_set(new->cap_effective, new->cap_permitted); put_cred(override_creds(new)); - put_cred(new); return 0; oom: diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index aa4712362b3b..0363720280d4 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -355,16 +355,25 @@ static void export_stats_destroy(struct export_stats *stats) EXP_STATS_COUNTERS_NUM); } -static void svc_export_put(struct kref *ref) +static void svc_export_release(struct rcu_head *rcu_head) { - struct svc_export *exp = container_of(ref, struct svc_export, h.ref); - path_put(&exp->ex_path); - auth_domain_put(exp->ex_client); + struct svc_export *exp = container_of(rcu_head, struct svc_export, + ex_rcu); + nfsd4_fslocs_free(&exp->ex_fslocs); export_stats_destroy(exp->ex_stats); kfree(exp->ex_stats); kfree(exp->ex_uuid); - kfree_rcu(exp, ex_rcu); + kfree(exp); +} + +static void svc_export_put(struct kref *ref) +{ + struct svc_export *exp = container_of(ref, struct svc_export, h.ref); + + path_put(&exp->ex_path); + auth_domain_put(exp->ex_client); + call_rcu(&exp->ex_rcu, svc_export_release); } static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h) @@ -1425,13 +1434,9 @@ static int e_show(struct seq_file *m, void *p) return 0; } - if (!cache_get_rcu(&exp->h)) - return 0; - - if (cache_check(cd, &exp->h, NULL)) + if (cache_check_rcu(cd, &exp->h, NULL)) return 0; - exp_put(exp); return svc_export_show(m, cd, cp); } diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index a1cdba42c4fa..fb9b1656a287 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -39,6 +39,7 @@ #include <linux/fsnotify.h> #include <linux/seq_file.h> #include <linux/rhashtable.h> +#include <linux/nfslocalio.h> #include "vfs.h" #include "nfsd.h" @@ -391,7 +392,7 @@ nfsd_file_put(struct nfsd_file *nf) } /** - * nfsd_file_put_local - put nfsd_file reference and arm nfsd_serv_put in caller + * nfsd_file_put_local - put nfsd_file reference and arm nfsd_net_put in caller * @nf: nfsd_file of which to put the reference * * First save the associated net to return to caller, then put @@ -445,11 +446,20 @@ nfsd_file_dispose_list_delayed(struct list_head *dispose) struct nfsd_file, nf_gc); struct nfsd_net *nn = net_generic(nf->nf_net, nfsd_net_id); struct nfsd_fcache_disposal *l = nn->fcache_disposal; + struct svc_serv *serv; spin_lock(&l->lock); list_move_tail(&nf->nf_gc, &l->freeme); spin_unlock(&l->lock); - svc_wake_up(nn->nfsd_serv); + + /* + * The filecache laundrette is shut down after the + * nn->nfsd_serv pointer is cleared, but before the + * svc_serv is freed. + */ + serv = nn->nfsd_serv; + if (serv) + svc_wake_up(serv); } } @@ -833,6 +843,14 @@ __nfsd_file_cache_purge(struct net *net) struct nfsd_file *nf; LIST_HEAD(dispose); +#if IS_ENABLED(CONFIG_NFS_LOCALIO) + if (net) { + struct nfsd_net *nn = net_generic(net, nfsd_net_id); + nfs_localio_invalidate_clients(&nn->local_clients, + &nn->local_clients_lock); + } +#endif + rhltable_walk_enter(&nfsd_file_rhltable, &iter); do { rhashtable_walk_start(&iter); @@ -1222,10 +1240,9 @@ nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp, * a file. The security implications of this should be carefully * considered before use. * - * The nfsd_file object returned by this API is reference-counted - * and garbage-collected. The object is retained for a few - * seconds after the final nfsd_file_put() in case the caller - * wants to re-use it. + * The nfsd_file_object returned by this API is reference-counted + * but not garbage-collected. The object is unhashed after the + * final nfsd_file_put(). * * Return values: * %nfs_ok - @pnf points to an nfsd_file with its reference @@ -1247,8 +1264,8 @@ nfsd_file_acquire_local(struct net *net, struct svc_cred *cred, __be32 beres; beres = nfsd_file_do_acquire(NULL, net, cred, client, - fhp, may_flags, NULL, pnf, true); - revert_creds(save_cred); + fhp, may_flags, NULL, pnf, false); + put_cred(revert_creds(save_cred)); return beres; } diff --git a/fs/nfsd/localio.c b/fs/nfsd/localio.c index f441cb9f74d5..238647fa379e 100644 --- a/fs/nfsd/localio.c +++ b/fs/nfsd/localio.c @@ -25,10 +25,12 @@ #include "cache.h" static const struct nfsd_localio_operations nfsd_localio_ops = { - .nfsd_serv_try_get = nfsd_serv_try_get, - .nfsd_serv_put = nfsd_serv_put, + .nfsd_net_try_get = nfsd_net_try_get, + .nfsd_net_put = nfsd_net_put, .nfsd_open_local_fh = nfsd_open_local_fh, .nfsd_file_put_local = nfsd_file_put_local, + .nfsd_file_get = nfsd_file_get, + .nfsd_file_put = nfsd_file_put, .nfsd_file_file = nfsd_file_file, }; @@ -52,7 +54,7 @@ void nfsd_localio_ops_init(void) * avoid all the NFS overhead with reads, writes and commits. * * On successful return, returned nfsd_file will have its nf_net member - * set. Caller (NFS client) is responsible for calling nfsd_serv_put and + * set. Caller (NFS client) is responsible for calling nfsd_net_put and * nfsd_file_put (via nfs_to_nfsd_file_put_local). */ struct nfsd_file * @@ -114,6 +116,7 @@ static __be32 localio_proc_uuid_is_local(struct svc_rqst *rqstp) struct nfsd_net *nn = net_generic(net, nfsd_net_id); nfs_uuid_is_local(&argp->uuid, &nn->local_clients, + &nn->local_clients_lock, net, rqstp->rq_client, THIS_MODULE); return rpc_success; diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h index 26f7b34d1a03..3e2d0fde80a7 100644 --- a/fs/nfsd/netns.h +++ b/fs/nfsd/netns.h @@ -128,21 +128,16 @@ struct nfsd_net { seqlock_t writeverf_lock; unsigned char writeverf[8]; - /* - * Max number of connections this nfsd container will allow. Defaults - * to '0' which is means that it bases this on the number of threads. - */ - unsigned int max_connections; - u32 clientid_base; u32 clientid_counter; u32 clverifier_counter; struct svc_info nfsd_info; #define nfsd_serv nfsd_info.serv - struct percpu_ref nfsd_serv_ref; - struct completion nfsd_serv_confirm_done; - struct completion nfsd_serv_free_done; + + struct percpu_ref nfsd_net_ref; + struct completion nfsd_net_confirm_done; + struct completion nfsd_net_free_done; /* * clientid and stateid data for construction of net unique COPY @@ -219,6 +214,7 @@ struct nfsd_net { #if IS_ENABLED(CONFIG_NFS_LOCALIO) /* Local clients to be invalidated when net is shut down */ + spinlock_t local_clients_lock; struct list_head local_clients; #endif }; @@ -229,8 +225,8 @@ struct nfsd_net { extern bool nfsd_support_version(int vers); extern unsigned int nfsd_net_id; -bool nfsd_serv_try_get(struct net *net); -void nfsd_serv_put(struct net *net); +bool nfsd_net_try_get(struct net *net); +void nfsd_net_put(struct net *net); void nfsd_copy_write_verifier(__be32 verf[2], struct nfsd_net *nn); void nfsd_reset_write_verifier(struct nfsd_net *nn); diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c index 4e3be7201b1c..5fb202acb0fd 100644 --- a/fs/nfsd/nfs2acl.c +++ b/fs/nfsd/nfs2acl.c @@ -84,6 +84,8 @@ out: fail: posix_acl_release(resp->acl_access); posix_acl_release(resp->acl_default); + resp->acl_access = NULL; + resp->acl_default = NULL; goto out; } diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c index 5e34e98db969..7b5433bd3019 100644 --- a/fs/nfsd/nfs3acl.c +++ b/fs/nfsd/nfs3acl.c @@ -76,6 +76,8 @@ out: fail: posix_acl_release(resp->acl_access); posix_acl_release(resp->acl_default); + resp->acl_access = NULL; + resp->acl_default = NULL; goto out; } diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index c083e539e898..484077200c5d 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -42,6 +42,7 @@ #include "trace.h" #include "xdr4cb.h" #include "xdr4.h" +#include "nfs4xdr_gen.h" #define NFSDDBG_FACILITY NFSDDBG_PROC @@ -93,12 +94,35 @@ static int decode_cb_fattr4(struct xdr_stream *xdr, uint32_t *bitmap, { fattr->ncf_cb_change = 0; fattr->ncf_cb_fsize = 0; + fattr->ncf_cb_atime.tv_sec = 0; + fattr->ncf_cb_atime.tv_nsec = 0; + fattr->ncf_cb_mtime.tv_sec = 0; + fattr->ncf_cb_mtime.tv_nsec = 0; + if (bitmap[0] & FATTR4_WORD0_CHANGE) if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_change) < 0) return -NFSERR_BAD_XDR; if (bitmap[0] & FATTR4_WORD0_SIZE) if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_fsize) < 0) return -NFSERR_BAD_XDR; + if (bitmap[2] & FATTR4_WORD2_TIME_DELEG_ACCESS) { + fattr4_time_deleg_access access; + + if (!xdrgen_decode_fattr4_time_deleg_access(xdr, &access)) + return -NFSERR_BAD_XDR; + fattr->ncf_cb_atime.tv_sec = access.seconds; + fattr->ncf_cb_atime.tv_nsec = access.nseconds; + + } + if (bitmap[2] & FATTR4_WORD2_TIME_DELEG_MODIFY) { + fattr4_time_deleg_modify modify; + + if (!xdrgen_decode_fattr4_time_deleg_modify(xdr, &modify)) + return -NFSERR_BAD_XDR; + fattr->ncf_cb_mtime.tv_sec = modify.seconds; + fattr->ncf_cb_mtime.tv_nsec = modify.nseconds; + + } return 0; } @@ -361,16 +385,24 @@ static void encode_cb_getattr4args(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr, struct nfs4_cb_fattr *fattr) { - struct nfs4_delegation *dp = - container_of(fattr, struct nfs4_delegation, dl_cb_fattr); + struct nfs4_delegation *dp = container_of(fattr, struct nfs4_delegation, dl_cb_fattr); struct knfsd_fh *fh = &dp->dl_stid.sc_file->fi_fhandle; - u32 bmap[1]; - - bmap[0] = FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE; - + struct nfs4_cb_fattr *ncf = &dp->dl_cb_fattr; + u32 bmap_size = 1; + u32 bmap[3]; + + bmap[0] = FATTR4_WORD0_SIZE; + if (!ncf->ncf_file_modified) + bmap[0] |= FATTR4_WORD0_CHANGE; + + if (deleg_attrs_deleg(dp->dl_type)) { + bmap[1] = 0; + bmap[2] = FATTR4_WORD2_TIME_DELEG_ACCESS | FATTR4_WORD2_TIME_DELEG_MODIFY; + bmap_size = 3; + } encode_nfs_cb_opnum4(xdr, OP_CB_GETATTR); encode_nfs_fh4(xdr, fh); - encode_bitmap4(xdr, bmap, ARRAY_SIZE(bmap)); + encode_bitmap4(xdr, bmap, bmap_size); hdr->nops++; } @@ -634,7 +666,7 @@ static int nfs4_xdr_dec_cb_getattr(struct rpc_rqst *rqstp, struct nfs4_cb_compound_hdr hdr; int status; u32 bitmap[3] = {0}; - u32 attrlen; + u32 attrlen, maxlen; struct nfs4_cb_fattr *ncf = container_of(cb, struct nfs4_cb_fattr, ncf_getattr); @@ -647,13 +679,17 @@ static int nfs4_xdr_dec_cb_getattr(struct rpc_rqst *rqstp, return status; status = decode_cb_op_status(xdr, OP_CB_GETATTR, &cb->cb_status); - if (status) + if (unlikely(status || cb->cb_status)) return status; if (xdr_stream_decode_uint32_array(xdr, bitmap, 3) < 0) return -NFSERR_BAD_XDR; if (xdr_stream_decode_u32(xdr, &attrlen) < 0) return -NFSERR_BAD_XDR; - if (attrlen > (sizeof(ncf->ncf_cb_change) + sizeof(ncf->ncf_cb_fsize))) + maxlen = sizeof(ncf->ncf_cb_change) + sizeof(ncf->ncf_cb_fsize); + if (bitmap[2] != 0) + maxlen += (sizeof(ncf->ncf_cb_mtime.tv_sec) + + sizeof(ncf->ncf_cb_mtime.tv_nsec)) * 2; + if (attrlen > maxlen) return -NFSERR_BAD_XDR; status = decode_cb_fattr4(xdr, bitmap, ncf); return status; @@ -1036,8 +1072,7 @@ static void nfsd41_cb_inflight_begin(struct nfs4_client *clp) static void nfsd41_cb_inflight_end(struct nfs4_client *clp) { - if (atomic_dec_and_test(&clp->cl_cb_inflight)) - wake_up_var(&clp->cl_cb_inflight); + atomic_dec_and_wake_up(&clp->cl_cb_inflight); } static void nfsd41_cb_inflight_wait_complete(struct nfs4_client *clp) @@ -1397,8 +1432,9 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata) return; if (cb->cb_status) { - WARN_ONCE(task->tk_status, "cb_status=%d tk_status=%d", - cb->cb_status, task->tk_status); + WARN_ONCE(task->tk_status, + "cb_status=%d tk_status=%d cb_opcode=%d", + cb->cb_status, task->tk_status, cb->cb_ops->opcode); task->tk_status = cb->cb_status; } @@ -1547,8 +1583,11 @@ nfsd4_run_cb_work(struct work_struct *work) nfsd4_process_cb_update(cb); clnt = clp->cl_cb_client; - if (!clnt) { - /* Callback channel broken, or client killed; give up: */ + if (!clnt || clp->cl_state == NFSD4_COURTESY) { + /* + * Callback channel broken, client killed or + * nfs4_client in courtesy state; give up. + */ nfsd41_destroy_cb(cb); return; } diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index ad44ad49274f..f6e06c779d09 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -1135,18 +1135,43 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, .na_iattr = &setattr->sa_iattr, .na_seclabel = &setattr->sa_label, }; + bool save_no_wcc, deleg_attrs; + struct nfs4_stid *st = NULL; struct inode *inode; __be32 status = nfs_ok; - bool save_no_wcc; int err; - if (setattr->sa_iattr.ia_valid & ATTR_SIZE) { + deleg_attrs = setattr->sa_bmval[2] & (FATTR4_WORD2_TIME_DELEG_ACCESS | + FATTR4_WORD2_TIME_DELEG_MODIFY); + + if (deleg_attrs || (setattr->sa_iattr.ia_valid & ATTR_SIZE)) { + int flags = WR_STATE; + + if (setattr->sa_bmval[2] & FATTR4_WORD2_TIME_DELEG_ACCESS) + flags |= RD_STATE; + status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh, &setattr->sa_stateid, - WR_STATE, NULL, NULL); + flags, NULL, &st); if (status) return status; } + + if (deleg_attrs) { + status = nfserr_bad_stateid; + if (st->sc_type & SC_TYPE_DELEG) { + struct nfs4_delegation *dp = delegstateid(st); + + /* Only for *_ATTRS_DELEG flavors */ + if (deleg_attrs_deleg(dp->dl_type)) + status = nfs_ok; + } + } + if (st) + nfs4_put_stid(st); + if (status) + return status; + err = fh_want_write(&cstate->current_fh); if (err) return nfserrno(err); diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 4a765555bf84..28f4d5311c40 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -82,14 +82,13 @@ nfs4_save_creds(const struct cred **original_creds) new->fsuid = GLOBAL_ROOT_UID; new->fsgid = GLOBAL_ROOT_GID; *original_creds = override_creds(new); - put_cred(new); return 0; } static void nfs4_reset_creds(const struct cred *original) { - revert_creds(original); + put_cred(revert_creds(original)); } static void @@ -2052,7 +2051,6 @@ static inline int check_for_legacy_methods(int status, struct net *net) path_put(&path); if (status) return -ENOTDIR; - status = nn->client_tracking_ops->init(net); } return status; } diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 741b9449f727..153eeea2c7c9 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1909,17 +1909,86 @@ gen_sessionid(struct nfsd4_session *ses) */ #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44) +static struct shrinker *nfsd_slot_shrinker; +static DEFINE_SPINLOCK(nfsd_session_list_lock); +static LIST_HEAD(nfsd_session_list); +/* The sum of "target_slots-1" on every session. The shrinker can push this + * down, though it can take a little while for the memory to actually + * be freed. The "-1" is because we can never free slot 0 while the + * session is active. + */ +static atomic_t nfsd_total_target_slots = ATOMIC_INIT(0); + static void -free_session_slots(struct nfsd4_session *ses) +free_session_slots(struct nfsd4_session *ses, int from) { int i; - for (i = 0; i < ses->se_fchannel.maxreqs; i++) { - free_svc_cred(&ses->se_slots[i]->sl_cred); - kfree(ses->se_slots[i]); + if (from >= ses->se_fchannel.maxreqs) + return; + + for (i = from; i < ses->se_fchannel.maxreqs; i++) { + struct nfsd4_slot *slot = xa_load(&ses->se_slots, i); + + /* + * Save the seqid in case we reactivate this slot. + * This will never require a memory allocation so GFP + * flag is irrelevant + */ + xa_store(&ses->se_slots, i, xa_mk_value(slot->sl_seqid), 0); + free_svc_cred(&slot->sl_cred); + kfree(slot); + } + ses->se_fchannel.maxreqs = from; + if (ses->se_target_maxslots > from) { + int new_target = from ?: 1; + atomic_sub(ses->se_target_maxslots - new_target, &nfsd_total_target_slots); + ses->se_target_maxslots = new_target; } } +/** + * reduce_session_slots - reduce the target max-slots of a session if possible + * @ses: The session to affect + * @dec: how much to decrease the target by + * + * This interface can be used by a shrinker to reduce the target max-slots + * for a session so that some slots can eventually be freed. + * It uses spin_trylock() as it may be called in a context where another + * spinlock is held that has a dependency on client_lock. As shrinkers are + * best-effort, skiping a session is client_lock is already held has no + * great coast + * + * Return value: + * The number of slots that the target was reduced by. + */ +static int +reduce_session_slots(struct nfsd4_session *ses, int dec) +{ + struct nfsd_net *nn = net_generic(ses->se_client->net, + nfsd_net_id); + int ret = 0; + + if (ses->se_target_maxslots <= 1) + return ret; + if (!spin_trylock(&nn->client_lock)) + return ret; + ret = min(dec, ses->se_target_maxslots-1); + ses->se_target_maxslots -= ret; + atomic_sub(ret, &nfsd_total_target_slots); + ses->se_slot_gen += 1; + if (ses->se_slot_gen == 0) { + int i; + ses->se_slot_gen = 1; + for (i = 0; i < ses->se_fchannel.maxreqs; i++) { + struct nfsd4_slot *slot = xa_load(&ses->se_slots, i); + slot->sl_generation = 0; + } + } + spin_unlock(&nn->client_lock); + return ret; +} + /* * We don't actually need to cache the rpc and session headers, so we * can allocate a little less for each slot: @@ -1935,89 +2004,46 @@ static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca) return size + sizeof(struct nfsd4_slot); } -/* - * XXX: If we run out of reserved DRC memory we could (up to a point) - * re-negotiate active sessions and reduce their slot usage to make - * room for new connections. For now we just fail the create session. - */ -static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn) -{ - u32 slotsize = slot_bytes(ca); - u32 num = ca->maxreqs; - unsigned long avail, total_avail; - unsigned int scale_factor; - - spin_lock(&nfsd_drc_lock); - if (nfsd_drc_max_mem > nfsd_drc_mem_used) - total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used; - else - /* We have handed out more space than we chose in - * set_max_drc() to allow. That isn't really a - * problem as long as that doesn't make us think we - * have lots more due to integer overflow. - */ - total_avail = 0; - avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail); - /* - * Never use more than a fraction of the remaining memory, - * unless it's the only way to give this client a slot. - * The chosen fraction is either 1/8 or 1/number of threads, - * whichever is smaller. This ensures there are adequate - * slots to support multiple clients per thread. - * Give the client one slot even if that would require - * over-allocation--it is better than failure. - */ - scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads); - - avail = clamp_t(unsigned long, avail, slotsize, - total_avail/scale_factor); - num = min_t(int, num, avail / slotsize); - num = max_t(int, num, 1); - nfsd_drc_mem_used += num * slotsize; - spin_unlock(&nfsd_drc_lock); - - return num; -} - -static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca) -{ - int slotsize = slot_bytes(ca); - - spin_lock(&nfsd_drc_lock); - nfsd_drc_mem_used -= slotsize * ca->maxreqs; - spin_unlock(&nfsd_drc_lock); -} - static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs, struct nfsd4_channel_attrs *battrs) { int numslots = fattrs->maxreqs; int slotsize = slot_bytes(fattrs); struct nfsd4_session *new; + struct nfsd4_slot *slot; int i; - BUILD_BUG_ON(struct_size(new, se_slots, NFSD_MAX_SLOTS_PER_SESSION) - > PAGE_SIZE); - - new = kzalloc(struct_size(new, se_slots, numslots), GFP_KERNEL); + new = kzalloc(sizeof(*new), GFP_KERNEL); if (!new) return NULL; + xa_init(&new->se_slots); /* allocate each struct nfsd4_slot and data cache in one piece */ - for (i = 0; i < numslots; i++) { - new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL); - if (!new->se_slots[i]) - goto out_free; - } + slot = kzalloc(slotsize, GFP_KERNEL); + if (!slot || xa_is_err(xa_store(&new->se_slots, 0, slot, GFP_KERNEL))) + goto out_free; + for (i = 1; i < numslots; i++) { + const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; + slot = kzalloc(slotsize, gfp); + if (!slot) + break; + if (xa_is_err(xa_store(&new->se_slots, i, slot, gfp))) { + kfree(slot); + break; + } + } + fattrs->maxreqs = i; memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs)); + new->se_target_maxslots = i; + atomic_add(i - 1, &nfsd_total_target_slots); new->se_cb_slot_avail = ~0U; new->se_cb_highest_slot = min(battrs->maxreqs - 1, NFSD_BC_SLOT_TABLE_SIZE - 1); spin_lock_init(&new->se_lock); return new; out_free: - while (i--) - kfree(new->se_slots[i]); + kfree(slot); + xa_destroy(&new->se_slots); kfree(new); return NULL; } @@ -2123,17 +2149,47 @@ static void nfsd4_del_conns(struct nfsd4_session *s) static void __free_session(struct nfsd4_session *ses) { - free_session_slots(ses); + free_session_slots(ses, 0); + xa_destroy(&ses->se_slots); kfree(ses); } static void free_session(struct nfsd4_session *ses) { nfsd4_del_conns(ses); - nfsd4_put_drc_mem(&ses->se_fchannel); __free_session(ses); } +static unsigned long +nfsd_slot_count(struct shrinker *s, struct shrink_control *sc) +{ + unsigned long cnt = atomic_read(&nfsd_total_target_slots); + + return cnt ? cnt : SHRINK_EMPTY; +} + +static unsigned long +nfsd_slot_scan(struct shrinker *s, struct shrink_control *sc) +{ + struct nfsd4_session *ses; + unsigned long scanned = 0; + unsigned long freed = 0; + + spin_lock(&nfsd_session_list_lock); + list_for_each_entry(ses, &nfsd_session_list, se_all_sessions) { + freed += reduce_session_slots(ses, 1); + scanned += 1; + if (scanned >= sc->nr_to_scan) { + /* Move starting point for next scan */ + list_move(&nfsd_session_list, &ses->se_all_sessions); + break; + } + } + spin_unlock(&nfsd_session_list_lock); + sc->nr_scanned = scanned; + return freed; +} + static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses) { int idx; @@ -2158,6 +2214,10 @@ static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, stru list_add(&new->se_perclnt, &clp->cl_sessions); spin_unlock(&clp->cl_lock); + spin_lock(&nfsd_session_list_lock); + list_add_tail(&new->se_all_sessions, &nfsd_session_list); + spin_unlock(&nfsd_session_list_lock); + { struct sockaddr *sa = svc_addr(rqstp); /* @@ -2227,6 +2287,9 @@ unhash_session(struct nfsd4_session *ses) spin_lock(&ses->se_client->cl_lock); list_del(&ses->se_perclnt); spin_unlock(&ses->se_client->cl_lock); + spin_lock(&nfsd_session_list_lock); + list_del(&ses->se_all_sessions); + spin_unlock(&nfsd_session_list_lock); } /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */ @@ -2362,8 +2425,12 @@ unhash_client_locked(struct nfs4_client *clp) } list_del_init(&clp->cl_lru); spin_lock(&clp->cl_lock); - list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) + spin_lock(&nfsd_session_list_lock); + list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) { list_del_init(&ses->se_hash); + list_del_init(&ses->se_all_sessions); + } + spin_unlock(&nfsd_session_list_lock); spin_unlock(&clp->cl_lock); } @@ -2685,6 +2752,7 @@ static const char *cb_state2str(int state) static int client_info_show(struct seq_file *m, void *v) { struct inode *inode = file_inode(m->file); + struct nfsd4_session *ses; struct nfs4_client *clp; u64 clid; @@ -2721,6 +2789,16 @@ static int client_info_show(struct seq_file *m, void *v) seq_printf(m, "callback address: \"%pISpc\"\n", &clp->cl_cb_conn.cb_addr); seq_printf(m, "admin-revoked states: %d\n", atomic_read(&clp->cl_admin_revoked)); + spin_lock(&clp->cl_lock); + seq_printf(m, "session slots:"); + list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) + seq_printf(m, " %u", ses->se_fchannel.maxreqs); + seq_printf(m, "\nsession target slots:"); + list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) + seq_printf(m, " %u", ses->se_target_maxslots); + spin_unlock(&clp->cl_lock); + seq_puts(m, "\n"); + drop_client(clp); return 0; @@ -2873,6 +2951,21 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st) return 0; } +static char *nfs4_show_deleg_type(u32 dl_type) +{ + switch (dl_type) { + case OPEN_DELEGATE_READ: + return "r"; + case OPEN_DELEGATE_WRITE: + return "w"; + case OPEN_DELEGATE_READ_ATTRS_DELEG: + return "ra"; + case OPEN_DELEGATE_WRITE_ATTRS_DELEG: + return "wa"; + } + return "?"; +} + static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) { struct nfs4_delegation *ds; @@ -2886,8 +2979,7 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) nfs4_show_stateid(s, &st->sc_stateid); seq_puts(s, ": { type: deleg, "); - seq_printf(s, "access: %s", - ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w"); + seq_printf(s, "access: %s", nfs4_show_deleg_type(ds->dl_type)); /* XXX: lease time, whether it's being recalled. */ @@ -3708,10 +3800,10 @@ nfsd4_exchange_id_release(union nfsd4_op_u *u) kfree(exid->server_impl_name); } -static __be32 check_slot_seqid(u32 seqid, u32 slot_seqid, bool slot_inuse) +static __be32 check_slot_seqid(u32 seqid, u32 slot_seqid, u8 flags) { /* The slot is in use, and no response has been sent. */ - if (slot_inuse) { + if (flags & NFSD4_SLOT_INUSE) { if (seqid == slot_seqid) return nfserr_jukebox; else @@ -3720,6 +3812,8 @@ static __be32 check_slot_seqid(u32 seqid, u32 slot_seqid, bool slot_inuse) /* Note unsigned 32-bit arithmetic handles wraparound: */ if (likely(seqid == slot_seqid + 1)) return nfs_ok; + if ((flags & NFSD4_SLOT_REUSED) && seqid == 1) + return nfs_ok; if (seqid == slot_seqid) return nfserr_replay_cache; return nfserr_seq_misordered; @@ -3778,17 +3872,6 @@ static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfs ca->maxresp_cached = min_t(u32, ca->maxresp_cached, NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ); ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION); - /* - * Note decreasing slot size below client's request may make it - * difficult for client to function correctly, whereas - * decreasing the number of slots will (just?) affect - * performance. When short on memory we therefore prefer to - * decrease number of slots instead of their size. Clients that - * request larger slots than they need will get poor results: - * Note that we always allow at least one slot, because our - * accounting is soft and provides no guarantees either way. - */ - ca->maxreqs = nfsd4_get_drc_mem(ca, nn); return nfs_ok; } @@ -3866,11 +3949,11 @@ nfsd4_create_session(struct svc_rqst *rqstp, return status; status = check_backchannel_attrs(&cr_ses->back_channel); if (status) - goto out_release_drc_mem; + goto out_err; status = nfserr_jukebox; new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel); if (!new) - goto out_release_drc_mem; + goto out_err; conn = alloc_conn_from_crses(rqstp, cr_ses); if (!conn) goto out_free_session; @@ -3979,8 +4062,7 @@ out_free_conn: free_conn(conn); out_free_session: __free_session(new); -out_release_drc_mem: - nfsd4_put_drc_mem(&cr_ses->fore_channel); +out_err: return status; } @@ -4278,17 +4360,11 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, if (seq->slotid >= session->se_fchannel.maxreqs) goto out_put_session; - slot = session->se_slots[seq->slotid]; + slot = xa_load(&session->se_slots, seq->slotid); dprintk("%s: slotid %d\n", __func__, seq->slotid); - /* We do not negotiate the number of slots yet, so set the - * maxslots to the session maxreqs which is used to encode - * sr_highest_slotid and the sr_target_slot id to maxslots */ - seq->maxslots = session->se_fchannel.maxreqs; - trace_nfsd_slot_seqid_sequence(clp, seq, slot); - status = check_slot_seqid(seq->seqid, slot->sl_seqid, - slot->sl_flags & NFSD4_SLOT_INUSE); + status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_flags); if (status == nfserr_replay_cache) { status = nfserr_seq_misordered; if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) @@ -4313,6 +4389,12 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, if (status) goto out_put_session; + if (session->se_target_maxslots < session->se_fchannel.maxreqs && + slot->sl_generation == session->se_slot_gen && + seq->maxslots <= session->se_target_maxslots) + /* Client acknowledged our reduce maxreqs */ + free_session_slots(session, session->se_target_maxslots); + buflen = (seq->cachethis) ? session->se_fchannel.maxresp_cached : session->se_fchannel.maxresp_sz; @@ -4323,9 +4405,11 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, svc_reserve(rqstp, buflen); status = nfs_ok; - /* Success! bump slot seqid */ + /* Success! accept new slot seqid */ slot->sl_seqid = seq->seqid; + slot->sl_flags &= ~NFSD4_SLOT_REUSED; slot->sl_flags |= NFSD4_SLOT_INUSE; + slot->sl_generation = session->se_slot_gen; if (seq->cachethis) slot->sl_flags |= NFSD4_SLOT_CACHETHIS; else @@ -4335,7 +4419,51 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, cstate->session = session; cstate->clp = clp; + /* + * If the client ever uses the highest available slot, + * gently try to allocate another 20%. This allows + * fairly quick growth without grossly over-shooting what + * the client might use. + */ + if (seq->slotid == session->se_fchannel.maxreqs - 1 && + session->se_target_maxslots >= session->se_fchannel.maxreqs && + session->se_fchannel.maxreqs < NFSD_MAX_SLOTS_PER_SESSION) { + int s = session->se_fchannel.maxreqs; + int cnt = DIV_ROUND_UP(s, 5); + void *prev_slot; + + do { + /* + * GFP_NOWAIT both allows allocation under a + * spinlock, and only succeeds if there is + * plenty of memory. + */ + slot = kzalloc(slot_bytes(&session->se_fchannel), + GFP_NOWAIT); + prev_slot = xa_load(&session->se_slots, s); + if (xa_is_value(prev_slot) && slot) { + slot->sl_seqid = xa_to_value(prev_slot); + slot->sl_flags |= NFSD4_SLOT_REUSED; + } + if (slot && + !xa_is_err(xa_store(&session->se_slots, s, slot, + GFP_NOWAIT))) { + s += 1; + session->se_fchannel.maxreqs = s; + atomic_add(s - session->se_target_maxslots, + &nfsd_total_target_slots); + session->se_target_maxslots = s; + } else { + kfree(slot); + slot = NULL; + } + } while (slot && --cnt > 0); + } + out: + seq->maxslots = max(session->se_target_maxslots, seq->maxslots); + seq->target_maxslots = session->se_target_maxslots; + switch (clp->cl_cb_state) { case NFSD4_CB_DOWN: seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN; @@ -4739,7 +4867,7 @@ static void init_nfs4_replay(struct nfs4_replay *rp) rp->rp_status = nfserr_serverfault; rp->rp_buflen = 0; rp->rp_buf = rp->rp_ibuf; - atomic_set(&rp->rp_locked, RP_UNLOCKED); + rp->rp_locked = RP_UNLOCKED; } static int nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate, @@ -4747,9 +4875,9 @@ static int nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate, { if (!nfsd4_has_session(cstate)) { wait_var_event(&so->so_replay.rp_locked, - atomic_cmpxchg(&so->so_replay.rp_locked, - RP_UNLOCKED, RP_LOCKED) != RP_LOCKED); - if (atomic_read(&so->so_replay.rp_locked) == RP_UNHASHED) + cmpxchg(&so->so_replay.rp_locked, + RP_UNLOCKED, RP_LOCKED) != RP_LOCKED); + if (so->so_replay.rp_locked == RP_UNHASHED) return -EAGAIN; cstate->replay_owner = nfs4_get_stateowner(so); } @@ -4762,9 +4890,7 @@ void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate) if (so != NULL) { cstate->replay_owner = NULL; - atomic_set(&so->so_replay.rp_locked, RP_UNLOCKED); - smp_mb__after_atomic(); - wake_up_var(&so->so_replay.rp_locked); + store_release_wake_up(&so->so_replay.rp_locked, RP_UNLOCKED); nfs4_put_stateowner(so); } } @@ -5069,9 +5195,7 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net) * Some threads with a reference might be waiting for rp_locked, * so tell them to stop waiting. */ - atomic_set(&oo->oo_owner.so_replay.rp_locked, RP_UNHASHED); - smp_mb__after_atomic(); - wake_up_var(&oo->oo_owner.so_replay.rp_locked); + store_release_wake_up(&oo->oo_owner.so_replay.rp_locked, RP_UNHASHED); wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2); release_all_access(s); @@ -5472,7 +5596,7 @@ retry: static inline __be32 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) { - if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ)) + if (!(flags & RD_STATE) && deleg_is_read(dp->dl_type)) return nfserr_openmode; else return nfs_ok; @@ -5704,8 +5828,7 @@ static bool nfsd4_cb_channel_good(struct nfs4_client *clp) return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; } -static struct file_lease *nfs4_alloc_init_lease(struct nfs4_delegation *dp, - int flag) +static struct file_lease *nfs4_alloc_init_lease(struct nfs4_delegation *dp) { struct file_lease *fl; @@ -5714,7 +5837,7 @@ static struct file_lease *nfs4_alloc_init_lease(struct nfs4_delegation *dp, return NULL; fl->fl_lmops = &nfsd_lease_mng_ops; fl->c.flc_flags = FL_DELEG; - fl->c.flc_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK; + fl->c.flc_type = deleg_is_read(dp->dl_type) ? F_RDLCK : F_WRLCK; fl->c.flc_owner = (fl_owner_t)dp; fl->c.flc_pid = current->tgid; fl->c.flc_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file; @@ -5829,13 +5952,14 @@ static struct nfs4_delegation * nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, struct svc_fh *parent) { - int status = 0; + bool deleg_ts = open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS; struct nfs4_client *clp = stp->st_stid.sc_client; struct nfs4_file *fp = stp->st_stid.sc_file; struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate; struct nfs4_delegation *dp; struct nfsd_file *nf = NULL; struct file_lease *fl; + int status = 0; u32 dl_type; /* @@ -5860,7 +5984,7 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, */ if ((open->op_share_access & NFS4_SHARE_ACCESS_BOTH) == NFS4_SHARE_ACCESS_BOTH) { nf = find_rw_file(fp); - dl_type = NFS4_OPEN_DELEGATE_WRITE; + dl_type = deleg_ts ? OPEN_DELEGATE_WRITE_ATTRS_DELEG : OPEN_DELEGATE_WRITE; } /* @@ -5869,7 +5993,7 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, */ if (!nf && (open->op_share_access & NFS4_SHARE_ACCESS_READ)) { nf = find_readable_file(fp); - dl_type = NFS4_OPEN_DELEGATE_READ; + dl_type = deleg_ts ? OPEN_DELEGATE_READ_ATTRS_DELEG : OPEN_DELEGATE_READ; } if (!nf) @@ -5901,7 +6025,7 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, if (!dp) goto out_delegees; - fl = nfs4_alloc_init_lease(dp, dl_type); + fl = nfs4_alloc_init_lease(dp); if (!fl) goto out_clnt_odstate; @@ -5958,20 +6082,20 @@ out_delegees: static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status) { - open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; + open->op_delegate_type = OPEN_DELEGATE_NONE_EXT; if (status == -EAGAIN) open->op_why_no_deleg = WND4_CONTENTION; else { open->op_why_no_deleg = WND4_RESOURCE; switch (open->op_deleg_want) { - case NFS4_SHARE_WANT_READ_DELEG: - case NFS4_SHARE_WANT_WRITE_DELEG: - case NFS4_SHARE_WANT_ANY_DELEG: + case OPEN4_SHARE_ACCESS_WANT_READ_DELEG: + case OPEN4_SHARE_ACCESS_WANT_WRITE_DELEG: + case OPEN4_SHARE_ACCESS_WANT_ANY_DELEG: break; - case NFS4_SHARE_WANT_CANCEL: + case OPEN4_SHARE_ACCESS_WANT_CANCEL: open->op_why_no_deleg = WND4_CANCELLED; break; - case NFS4_SHARE_WANT_NO_DELEG: + case OPEN4_SHARE_ACCESS_WANT_NO_DELEG: WARN_ON_ONCE(1); } } @@ -6027,13 +6151,14 @@ static void nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, struct svc_fh *currentfh) { - struct nfs4_delegation *dp; + bool deleg_ts = open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS; struct nfs4_openowner *oo = openowner(stp->st_stateowner); struct nfs4_client *clp = stp->st_stid.sc_client; struct svc_fh *parent = NULL; - int cb_up; - int status = 0; + struct nfs4_delegation *dp; struct kstat stat; + int status = 0; + int cb_up; cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client); open->op_recall = false; @@ -6074,20 +6199,22 @@ nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, destroy_delegation(dp); goto out_no_deleg; } - open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE; + open->op_delegate_type = deleg_ts ? OPEN_DELEGATE_WRITE_ATTRS_DELEG : + OPEN_DELEGATE_WRITE; dp->dl_cb_fattr.ncf_cur_fsize = stat.size; dp->dl_cb_fattr.ncf_initial_cinfo = nfsd4_change_attribute(&stat); trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid); } else { - open->op_delegate_type = NFS4_OPEN_DELEGATE_READ; + open->op_delegate_type = deleg_ts ? OPEN_DELEGATE_READ_ATTRS_DELEG : + OPEN_DELEGATE_READ; trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid); } nfs4_put_stid(&dp->dl_stid); return; out_no_deleg: - open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE; + open->op_delegate_type = OPEN_DELEGATE_NONE; if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS && - open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) { + open->op_delegate_type != OPEN_DELEGATE_NONE) { dprintk("NFSD: WARNING: refusing delegation reclaim\n"); open->op_recall = true; } @@ -6101,21 +6228,32 @@ out_no_deleg: static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open, struct nfs4_delegation *dp) { - if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG && - dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { - open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; - open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE; - } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG && - dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) { - open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; - open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE; + if (deleg_is_write(dp->dl_type)) { + if (open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_READ_DELEG) { + open->op_delegate_type = OPEN_DELEGATE_NONE_EXT; + open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE; + } else if (open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_WRITE_DELEG) { + open->op_delegate_type = OPEN_DELEGATE_NONE_EXT; + open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE; + } } /* Otherwise the client must be confused wanting a delegation * it already has, therefore we don't return - * NFS4_OPEN_DELEGATE_NONE_EXT and reason. + * OPEN_DELEGATE_NONE_EXT and reason. */ } +/* Are we returning only a delegation stateid? */ +static bool open_xor_delegation(struct nfsd4_open *open) +{ + if (!(open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION)) + return false; + /* Did we actually get a delegation? */ + if (!deleg_is_read(open->op_delegate_type) && !deleg_is_write(open->op_delegate_type)) + return false; + return true; +} + /** * nfsd4_process_open2 - finish open processing * @rqstp: the RPC transaction being executed @@ -6201,8 +6339,8 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf mutex_unlock(&stp->st_mutex); if (nfsd4_has_session(&resp->cstate)) { - if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { - open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT; + if (open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_NO_DELEG) { + open->op_delegate_type = OPEN_DELEGATE_NONE_EXT; open->op_why_no_deleg = WND4_NOT_WANTED; goto nodeleg; } @@ -6213,12 +6351,23 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf * OPEN succeeds even if we fail. */ nfs4_open_delegation(open, stp, &resp->cstate.current_fh); + + /* + * If there is an existing open stateid, it must be updated and + * returned. Only respect WANT_OPEN_XOR_DELEGATION when a new + * open stateid would have to be created. + */ + if (new_stp && open_xor_delegation(open)) { + memcpy(&open->op_stateid, &zero_stateid, sizeof(open->op_stateid)); + open->op_rflags |= OPEN4_RESULT_NO_OPEN_STATEID; + release_open_stateid(stp); + } nodeleg: status = nfs_ok; trace_nfsd_open(&stp->st_stid.sc_stateid); out: /* 4.1 client trying to upgrade/downgrade delegation? */ - if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp && + if (open->op_delegate_type == OPEN_DELEGATE_NONE && dp && open->op_deleg_want) nfsd4_deleg_xgrade_none_ext(open, dp); @@ -6229,7 +6378,7 @@ out: /* * To finish the open response, we just need to set the rflags. */ - open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX; + open->op_rflags |= NFS4_OPEN_RESULT_LOCKTYPE_POSIX; if (nfsd4_has_session(&resp->cstate)) open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK; else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED)) @@ -7966,7 +8115,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_blocked_lock *nbl = NULL; struct file_lock *file_lock = NULL; struct file_lock *conflock = NULL; - struct super_block *sb; __be32 status = 0; int lkflg; int err; @@ -7986,7 +8134,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); if (status != nfs_ok) return status; - sb = cstate->current_fh.fh_dentry->d_sb; if (lock->lk_is_new) { if (nfsd4_has_session(cstate)) @@ -8721,7 +8868,6 @@ skip_grace: } /* initialization to perform when the nfsd service is started: */ - int nfs4_state_start(void) { @@ -8731,6 +8877,15 @@ nfs4_state_start(void) if (ret) return ret; + nfsd_slot_shrinker = shrinker_alloc(0, "nfsd-DRC-slot"); + if (!nfsd_slot_shrinker) { + rhltable_destroy(&nfs4_file_rhltable); + return -ENOMEM; + } + nfsd_slot_shrinker->count_objects = nfsd_slot_count; + nfsd_slot_shrinker->scan_objects = nfsd_slot_scan; + shrinker_register(nfsd_slot_shrinker); + set_max_delegations(); return 0; } @@ -8772,6 +8927,7 @@ void nfs4_state_shutdown(void) { rhltable_destroy(&nfs4_file_rhltable); + shrinker_free(nfsd_slot_shrinker); } static void @@ -8889,6 +9045,78 @@ nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, } /** + * set_cb_time - vet and set the timespec for a cb_getattr update + * @cb: timestamp from the CB_GETATTR response + * @orig: original timestamp in the inode + * @now: current time + * + * Given a timestamp in a CB_GETATTR response, check it against the + * current timestamp in the inode and the current time. Returns true + * if the inode's timestamp needs to be updated, and false otherwise. + * @cb may also be changed if the timestamp needs to be clamped. + */ +static bool set_cb_time(struct timespec64 *cb, const struct timespec64 *orig, + const struct timespec64 *now) +{ + + /* + * "When the time presented is before the original time, then the + * update is ignored." Also no need to update if there is no change. + */ + if (timespec64_compare(cb, orig) <= 0) + return false; + + /* + * "When the time presented is in the future, the server can either + * clamp the new time to the current time, or it may + * return NFS4ERR_DELAY to the client, allowing it to retry." + */ + if (timespec64_compare(cb, now) > 0) { + /* clamp it */ + *cb = *now; + } + + return true; +} + +static int cb_getattr_update_times(struct dentry *dentry, struct nfs4_delegation *dp) +{ + struct inode *inode = d_inode(dentry); + struct timespec64 now = current_time(inode); + struct nfs4_cb_fattr *ncf = &dp->dl_cb_fattr; + struct iattr attrs = { }; + int ret; + + if (deleg_attrs_deleg(dp->dl_type)) { + struct timespec64 atime = inode_get_atime(inode); + struct timespec64 mtime = inode_get_mtime(inode); + + attrs.ia_atime = ncf->ncf_cb_atime; + attrs.ia_mtime = ncf->ncf_cb_mtime; + + if (set_cb_time(&attrs.ia_atime, &atime, &now)) + attrs.ia_valid |= ATTR_ATIME | ATTR_ATIME_SET; + + if (set_cb_time(&attrs.ia_mtime, &mtime, &now)) { + attrs.ia_valid |= ATTR_CTIME | ATTR_MTIME | ATTR_MTIME_SET; + attrs.ia_ctime = attrs.ia_mtime; + } + } else { + attrs.ia_valid |= ATTR_MTIME | ATTR_CTIME; + attrs.ia_mtime = attrs.ia_ctime = now; + } + + if (!attrs.ia_valid) + return 0; + + attrs.ia_valid |= ATTR_DELEG; + inode_lock(inode); + ret = notify_change(&nop_mnt_idmap, dentry, &attrs, NULL); + inode_unlock(inode); + return ret; +} + +/** * nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict * @rqstp: RPC transaction context * @dentry: dentry of inode to be checked for a conflict @@ -8914,7 +9142,6 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry, struct file_lock_context *ctx; struct nfs4_delegation *dp = NULL; struct file_lease *fl; - struct iattr attrs; struct nfs4_cb_fattr *ncf; struct inode *inode = d_inode(dentry); @@ -8976,11 +9203,7 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry, * not update the file's metadata with the client's * modified size */ - attrs.ia_mtime = attrs.ia_ctime = current_time(inode); - attrs.ia_valid = ATTR_MTIME | ATTR_CTIME | ATTR_DELEG; - inode_lock(inode); - err = notify_change(&nop_mnt_idmap, dentry, &attrs, NULL); - inode_unlock(inode); + err = cb_getattr_update_times(dentry, dp); if (err) { status = nfserrno(err); goto out_status; diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 53fac037611c..e67420729ecd 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -55,6 +55,7 @@ #include "netns.h" #include "pnfs.h" #include "filecache.h" +#include "nfs4xdr_gen.h" #include "trace.h" @@ -520,6 +521,26 @@ nfsd4_decode_fattr4(struct nfsd4_compoundargs *argp, u32 *bmval, u32 bmlen, *umask = mask & S_IRWXUGO; iattr->ia_valid |= ATTR_MODE; } + if (bmval[2] & FATTR4_WORD2_TIME_DELEG_ACCESS) { + fattr4_time_deleg_access access; + + if (!xdrgen_decode_fattr4_time_deleg_access(argp->xdr, &access)) + return nfserr_bad_xdr; + iattr->ia_atime.tv_sec = access.seconds; + iattr->ia_atime.tv_nsec = access.nseconds; + iattr->ia_valid |= ATTR_ATIME | ATTR_ATIME_SET | ATTR_DELEG; + } + if (bmval[2] & FATTR4_WORD2_TIME_DELEG_MODIFY) { + fattr4_time_deleg_modify modify; + + if (!xdrgen_decode_fattr4_time_deleg_modify(argp->xdr, &modify)) + return nfserr_bad_xdr; + iattr->ia_mtime.tv_sec = modify.seconds; + iattr->ia_mtime.tv_nsec = modify.nseconds; + iattr->ia_ctime.tv_sec = modify.seconds; + iattr->ia_ctime.tv_nsec = modify.seconds; + iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME | ATTR_MTIME_SET | ATTR_DELEG; + } /* request sanity: did attrlist4 contain the expected number of words? */ if (attrlist4_count != xdr_stream_pos(argp->xdr) - starting_pos) @@ -1066,13 +1087,13 @@ static __be32 nfsd4_decode_share_access(struct nfsd4_compoundargs *argp, u32 *sh return nfs_ok; if (!argp->minorversion) return nfserr_bad_xdr; - switch (w & NFS4_SHARE_WANT_MASK) { - case NFS4_SHARE_WANT_NO_PREFERENCE: - case NFS4_SHARE_WANT_READ_DELEG: - case NFS4_SHARE_WANT_WRITE_DELEG: - case NFS4_SHARE_WANT_ANY_DELEG: - case NFS4_SHARE_WANT_NO_DELEG: - case NFS4_SHARE_WANT_CANCEL: + switch (w & NFS4_SHARE_WANT_TYPE_MASK) { + case OPEN4_SHARE_ACCESS_WANT_NO_PREFERENCE: + case OPEN4_SHARE_ACCESS_WANT_READ_DELEG: + case OPEN4_SHARE_ACCESS_WANT_WRITE_DELEG: + case OPEN4_SHARE_ACCESS_WANT_ANY_DELEG: + case OPEN4_SHARE_ACCESS_WANT_NO_DELEG: + case OPEN4_SHARE_ACCESS_WANT_CANCEL: break; default: return nfserr_bad_xdr; @@ -1884,7 +1905,8 @@ nfsd4_decode_sequence(struct nfsd4_compoundargs *argp, return nfserr_bad_xdr; seq->seqid = be32_to_cpup(p++); seq->slotid = be32_to_cpup(p++); - seq->maxslots = be32_to_cpup(p++); + /* sa_highest_slotid counts from 0 but maxslots counts from 1 ... */ + seq->maxslots = be32_to_cpup(p++) + 1; seq->cachethis = be32_to_cpup(p); seq->status_flags = 0; @@ -2818,11 +2840,11 @@ static __be32 nfsd4_encode_nfsace4(struct xdr_stream *xdr, struct svc_rqst *rqst #ifdef CONFIG_NFSD_V4_SECURITY_LABEL static inline __be32 nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp, - void *context, int len) + const struct lsm_context *context) { __be32 *p; - p = xdr_reserve_space(xdr, len + 4 + 4 + 4); + p = xdr_reserve_space(xdr, context->len + 4 + 4 + 4); if (!p) return nfserr_resource; @@ -2832,13 +2854,13 @@ nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp, */ *p++ = cpu_to_be32(0); /* lfs */ *p++ = cpu_to_be32(0); /* pi */ - p = xdr_encode_opaque(p, context, len); + p = xdr_encode_opaque(p, context->context, context->len); return 0; } #else static inline __be32 nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp, - void *context, int len) + struct lsm_context *context) { return 0; } #endif @@ -2919,9 +2941,9 @@ struct nfsd4_fattr_args { struct kstat stat; struct kstatfs statfs; struct nfs4_acl *acl; + u64 change_attr; #ifdef CONFIG_NFSD_V4_SECURITY_LABEL - void *context; - int contextlen; + struct lsm_context context; #endif u32 rdattr_err; bool contextsupport; @@ -3018,7 +3040,6 @@ static __be32 nfsd4_encode_fattr4_change(struct xdr_stream *xdr, const struct nfsd4_fattr_args *args) { const struct svc_export *exp = args->exp; - u64 c; if (unlikely(exp->ex_flags & NFSEXP_V4ROOT)) { u32 flush_time = convert_to_wallclock(exp->cd->flush_time); @@ -3029,9 +3050,7 @@ static __be32 nfsd4_encode_fattr4_change(struct xdr_stream *xdr, return nfserr_resource; return nfs_ok; } - - c = nfsd4_change_attribute(&args->stat); - return nfsd4_encode_changeid4(xdr, c); + return nfsd4_encode_changeid4(xdr, args->change_attr); } static __be32 nfsd4_encode_fattr4_size(struct xdr_stream *xdr, @@ -3376,8 +3395,7 @@ static __be32 nfsd4_encode_fattr4_suppattr_exclcreat(struct xdr_stream *xdr, static __be32 nfsd4_encode_fattr4_sec_label(struct xdr_stream *xdr, const struct nfsd4_fattr_args *args) { - return nfsd4_encode_security_label(xdr, args->rqstp, - args->context, args->contextlen); + return nfsd4_encode_security_label(xdr, args->rqstp, &args->context); } #endif @@ -3389,6 +3407,56 @@ static __be32 nfsd4_encode_fattr4_xattr_support(struct xdr_stream *xdr, return nfsd4_encode_bool(xdr, err == 0); } +#define NFSD_OA_SHARE_ACCESS (BIT(OPEN_ARGS_SHARE_ACCESS_READ) | \ + BIT(OPEN_ARGS_SHARE_ACCESS_WRITE) | \ + BIT(OPEN_ARGS_SHARE_ACCESS_BOTH)) + +#define NFSD_OA_SHARE_DENY (BIT(OPEN_ARGS_SHARE_DENY_NONE) | \ + BIT(OPEN_ARGS_SHARE_DENY_READ) | \ + BIT(OPEN_ARGS_SHARE_DENY_WRITE) | \ + BIT(OPEN_ARGS_SHARE_DENY_BOTH)) + +#define NFSD_OA_SHARE_ACCESS_WANT (BIT(OPEN_ARGS_SHARE_ACCESS_WANT_ANY_DELEG) | \ + BIT(OPEN_ARGS_SHARE_ACCESS_WANT_NO_DELEG) | \ + BIT(OPEN_ARGS_SHARE_ACCESS_WANT_CANCEL) | \ + BIT(OPEN_ARGS_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS) | \ + BIT(OPEN_ARGS_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION)) + +#define NFSD_OA_OPEN_CLAIM (BIT(OPEN_ARGS_OPEN_CLAIM_NULL) | \ + BIT(OPEN_ARGS_OPEN_CLAIM_PREVIOUS) | \ + BIT(OPEN_ARGS_OPEN_CLAIM_DELEGATE_CUR) | \ + BIT(OPEN_ARGS_OPEN_CLAIM_DELEGATE_PREV)| \ + BIT(OPEN_ARGS_OPEN_CLAIM_FH) | \ + BIT(OPEN_ARGS_OPEN_CLAIM_DELEG_CUR_FH) | \ + BIT(OPEN_ARGS_OPEN_CLAIM_DELEG_PREV_FH)) + +#define NFSD_OA_CREATE_MODE (BIT(OPEN_ARGS_CREATEMODE_UNCHECKED4) | \ + BIT(OPEN_ARGS_CREATE_MODE_GUARDED) | \ + BIT(OPEN_ARGS_CREATEMODE_EXCLUSIVE4) | \ + BIT(OPEN_ARGS_CREATE_MODE_EXCLUSIVE4_1)) + +static uint32_t oa_share_access = NFSD_OA_SHARE_ACCESS; +static uint32_t oa_share_deny = NFSD_OA_SHARE_DENY; +static uint32_t oa_share_access_want = NFSD_OA_SHARE_ACCESS_WANT; +static uint32_t oa_open_claim = NFSD_OA_OPEN_CLAIM; +static uint32_t oa_create_mode = NFSD_OA_CREATE_MODE; + +static const struct open_arguments4 nfsd_open_arguments = { + .oa_share_access = { .count = 1, .element = &oa_share_access }, + .oa_share_deny = { .count = 1, .element = &oa_share_deny }, + .oa_share_access_want = { .count = 1, .element = &oa_share_access_want }, + .oa_open_claim = { .count = 1, .element = &oa_open_claim }, + .oa_create_mode = { .count = 1, .element = &oa_create_mode }, +}; + +static __be32 nfsd4_encode_fattr4_open_arguments(struct xdr_stream *xdr, + const struct nfsd4_fattr_args *args) +{ + if (!xdrgen_encode_fattr4_open_arguments(xdr, &nfsd_open_arguments)) + return nfserr_resource; + return nfs_ok; +} + static const nfsd4_enc_attr nfsd4_enc_fattr4_encode_ops[] = { [FATTR4_SUPPORTED_ATTRS] = nfsd4_encode_fattr4_supported_attrs, [FATTR4_TYPE] = nfsd4_encode_fattr4_type, @@ -3489,6 +3557,7 @@ static const nfsd4_enc_attr nfsd4_enc_fattr4_encode_ops[] = { [FATTR4_MODE_UMASK] = nfsd4_encode_fattr4__noop, [FATTR4_XATTR_SUPPORT] = nfsd4_encode_fattr4_xattr_support, + [FATTR4_OPEN_ARGUMENTS] = nfsd4_encode_fattr4_open_arguments, }; /* @@ -3506,8 +3575,8 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct nfsd4_fattr_args args; struct svc_fh *tempfh = NULL; int starting_len = xdr->buf->len; - __be32 *attrlen_p, status; - int attrlen_offset; + unsigned int attrlen_offset; + __be32 attrlen, status; u32 attrmask[3]; int err; struct nfsd4_compoundres *resp = rqstp->rq_resp; @@ -3527,7 +3596,7 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr, args.ignore_crossmnt = (ignore_crossmnt != 0); args.acl = NULL; #ifdef CONFIG_NFSD_V4_SECURITY_LABEL - args.context = NULL; + args.context.context = NULL; #endif /* @@ -3544,7 +3613,11 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr, if (status) goto out; } - if (attrmask[0] & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) { + if ((attrmask[0] & (FATTR4_WORD0_CHANGE | + FATTR4_WORD0_SIZE)) || + (attrmask[1] & (FATTR4_WORD1_TIME_ACCESS | + FATTR4_WORD1_TIME_MODIFY | + FATTR4_WORD1_TIME_METADATA))) { status = nfsd4_deleg_getattr_conflict(rqstp, dentry, &dp); if (status) goto out; @@ -3556,11 +3629,22 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr, if (dp) { struct nfs4_cb_fattr *ncf = &dp->dl_cb_fattr; - if (ncf->ncf_file_modified) + if (ncf->ncf_file_modified) { + ++ncf->ncf_initial_cinfo; args.stat.size = ncf->ncf_cur_fsize; + if (!timespec64_is_epoch(&ncf->ncf_cb_mtime)) + args.stat.mtime = ncf->ncf_cb_mtime; + } + args.change_attr = ncf->ncf_initial_cinfo; + + if (!timespec64_is_epoch(&ncf->ncf_cb_atime)) + args.stat.atime = ncf->ncf_cb_atime; nfs4_put_stid(&dp->dl_stid); + } else { + args.change_attr = nfsd4_change_attribute(&args.stat); } + if (err) goto out_nfserr; @@ -3607,7 +3691,7 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr, attrmask[0] & FATTR4_WORD0_SUPPORTED_ATTRS) { if (exp->ex_flags & NFSEXP_SECURITY_LABEL) err = security_inode_getsecctx(d_inode(dentry), - &args.context, &args.contextlen); + &args.context); else err = -EOPNOTSUPP; args.contextsupport = (err == 0); @@ -3628,8 +3712,7 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr, /* attr_vals */ attrlen_offset = xdr->buf->len; - attrlen_p = xdr_reserve_space(xdr, XDR_UNIT); - if (!attrlen_p) + if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT))) goto out_resource; bitmap_from_arr32(attr_bitmap, attrmask, ARRAY_SIZE(nfsd4_enc_fattr4_encode_ops)); @@ -3639,13 +3722,14 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr, if (status != nfs_ok) goto out; } - *attrlen_p = cpu_to_be32(xdr->buf->len - attrlen_offset - XDR_UNIT); + attrlen = cpu_to_be32(xdr->buf->len - attrlen_offset - XDR_UNIT); + write_bytes_to_xdr_buf(xdr->buf, attrlen_offset, &attrlen, XDR_UNIT); status = nfs_ok; out: #ifdef CONFIG_NFSD_V4_SECURITY_LABEL - if (args.context) - security_release_secctx(args.context, args.contextlen); + if (args.context.context) + security_release_secctx(&args.context); #endif /* CONFIG_NFSD_V4_SECURITY_LABEL */ kfree(args.acl); if (tempfh) { @@ -4229,18 +4313,20 @@ nfsd4_encode_open_delegation4(struct xdr_stream *xdr, struct nfsd4_open *open) if (xdr_stream_encode_u32(xdr, open->op_delegate_type) != XDR_UNIT) return nfserr_resource; switch (open->op_delegate_type) { - case NFS4_OPEN_DELEGATE_NONE: + case OPEN_DELEGATE_NONE: status = nfs_ok; break; - case NFS4_OPEN_DELEGATE_READ: + case OPEN_DELEGATE_READ: + case OPEN_DELEGATE_READ_ATTRS_DELEG: /* read */ status = nfsd4_encode_open_read_delegation4(xdr, open); break; - case NFS4_OPEN_DELEGATE_WRITE: + case OPEN_DELEGATE_WRITE: + case OPEN_DELEGATE_WRITE_ATTRS_DELEG: /* write */ status = nfsd4_encode_open_write_delegation4(xdr, open); break; - case NFS4_OPEN_DELEGATE_NONE_EXT: + case OPEN_DELEGATE_NONE_EXT: /* od_whynone */ status = nfsd4_encode_open_none_delegation4(xdr, open); break; @@ -4317,6 +4403,15 @@ static __be32 nfsd4_encode_splice_read( __be32 nfserr; /* + * Splice read doesn't work if encoding has already wandered + * into the XDR buf's page array. + */ + if (unlikely(xdr->buf->page_len)) { + WARN_ON_ONCE(1); + return nfserr_serverfault; + } + + /* * Make sure there is room at the end of buf->head for * svcxdr_encode_opaque_pages() to create a tail buffer * to XDR-pad the payload. @@ -4398,25 +4493,23 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_compoundargs *argp = resp->rqstp->rq_argp; struct nfsd4_read *read = &u->read; struct xdr_stream *xdr = resp->xdr; - int starting_len = xdr->buf->len; bool splice_ok = argp->splice_ok; + unsigned int eof_offset; unsigned long maxcount; + __be32 wire_data[2]; struct file *file; - __be32 *p; if (nfserr) return nfserr; + + eof_offset = xdr->buf->len; file = read->rd_nf->nf_file; - p = xdr_reserve_space(xdr, 8); /* eof flag and byte count */ - if (!p) { + /* Reserve space for the eof flag and byte count */ + if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT * 2))) { WARN_ON_ONCE(splice_ok); return nfserr_resource; } - if (resp->xdr->buf->page_len && splice_ok) { - WARN_ON_ONCE(1); - return nfserr_serverfault; - } xdr_commit_encode(xdr); maxcount = min_t(unsigned long, read->rd_length, @@ -4427,12 +4520,13 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr, else nfserr = nfsd4_encode_readv(resp, read, file, maxcount); if (nfserr) { - xdr_truncate_encode(xdr, starting_len); + xdr_truncate_encode(xdr, eof_offset); return nfserr; } - p = xdr_encode_bool(p, read->rd_eof); - *p = cpu_to_be32(read->rd_length); + wire_data[0] = read->rd_eof ? xdr_one : xdr_zero; + wire_data[1] = cpu_to_be32(read->rd_length); + write_bytes_to_xdr_buf(xdr->buf, eof_offset, &wire_data, XDR_UNIT * 2); return nfs_ok; } @@ -4441,25 +4535,21 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, union nfsd4_op_u *u) { struct nfsd4_readlink *readlink = &u->readlink; - __be32 *p, *maxcount_p, zero = xdr_zero; + __be32 *p, wire_count, zero = xdr_zero; struct xdr_stream *xdr = resp->xdr; - int length_offset = xdr->buf->len; + unsigned int length_offset; int maxcount, status; - maxcount_p = xdr_reserve_space(xdr, XDR_UNIT); - if (!maxcount_p) + /* linktext4.count */ + length_offset = xdr->buf->len; + if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT))) return nfserr_resource; - maxcount = PAGE_SIZE; + /* linktext4.data */ + maxcount = PAGE_SIZE; p = xdr_reserve_space(xdr, maxcount); if (!p) return nfserr_resource; - /* - * XXX: By default, vfs_readlink() will truncate symlinks if they - * would overflow the buffer. Is this kosher in NFSv4? If not, one - * easy fix is: if vfs_readlink() precisely fills the buffer, assume - * that truncation occurred, and return NFS4ERR_RESOURCE. - */ nfserr = nfsd_readlink(readlink->rl_rqstp, readlink->rl_fhp, (char *)p, &maxcount); if (nfserr == nfserr_isdir) @@ -4472,7 +4562,9 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, nfserr = nfserrno(status); goto out_err; } - *maxcount_p = cpu_to_be32(maxcount); + + wire_count = cpu_to_be32(maxcount); + write_bytes_to_xdr_buf(xdr->buf, length_offset, &wire_count, XDR_UNIT); xdr_truncate_encode(xdr, length_offset + 4 + xdr_align_size(maxcount)); write_bytes_to_xdr_buf(xdr->buf, length_offset + 4 + maxcount, &zero, xdr_pad_size(maxcount)); @@ -4607,14 +4699,42 @@ nfsd4_encode_rpcsec_gss_info(struct xdr_stream *xdr, } static __be32 -nfsd4_do_encode_secinfo(struct xdr_stream *xdr, struct svc_export *exp) +nfsd4_encode_secinfo4(struct xdr_stream *xdr, rpc_authflavor_t pf, + u32 *supported) +{ + struct rpcsec_gss_info info; + __be32 status; + + if (rpcauth_get_gssinfo(pf, &info) == 0) { + (*supported)++; + + /* flavor */ + status = nfsd4_encode_uint32_t(xdr, RPC_AUTH_GSS); + if (status != nfs_ok) + return status; + /* flavor_info */ + status = nfsd4_encode_rpcsec_gss_info(xdr, &info); + if (status != nfs_ok) + return status; + } else if (pf < RPC_AUTH_MAXFLAVOR) { + (*supported)++; + + /* flavor */ + status = nfsd4_encode_uint32_t(xdr, pf); + if (status != nfs_ok) + return status; + } + return nfs_ok; +} + +static __be32 +nfsd4_encode_SECINFO4resok(struct xdr_stream *xdr, struct svc_export *exp) { u32 i, nflavs, supported; struct exp_flavor_info *flavs; struct exp_flavor_info def_flavs[2]; - static bool report = true; - __be32 *flavorsp; - __be32 status; + unsigned int count_offset; + __be32 status, wire_count; if (exp->ex_nflavors) { flavs = exp->ex_flavors; @@ -4636,43 +4756,20 @@ nfsd4_do_encode_secinfo(struct xdr_stream *xdr, struct svc_export *exp) } } - supported = 0; - flavorsp = xdr_reserve_space(xdr, XDR_UNIT); - if (!flavorsp) + count_offset = xdr->buf->len; + if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT))) return nfserr_resource; - for (i = 0; i < nflavs; i++) { - rpc_authflavor_t pf = flavs[i].pseudoflavor; - struct rpcsec_gss_info info; - - if (rpcauth_get_gssinfo(pf, &info) == 0) { - supported++; - - /* flavor */ - status = nfsd4_encode_uint32_t(xdr, RPC_AUTH_GSS); - if (status != nfs_ok) - return status; - /* flavor_info */ - status = nfsd4_encode_rpcsec_gss_info(xdr, &info); - if (status != nfs_ok) - return status; - } else if (pf < RPC_AUTH_MAXFLAVOR) { - supported++; - - /* flavor */ - status = nfsd4_encode_uint32_t(xdr, pf); - if (status != nfs_ok) - return status; - } else { - if (report) - pr_warn("NFS: SECINFO: security flavor %u " - "is not supported\n", pf); - } + for (i = 0, supported = 0; i < nflavs; i++) { + status = nfsd4_encode_secinfo4(xdr, flavs[i].pseudoflavor, + &supported); + if (status != nfs_ok) + return status; } - if (nflavs != supported) - report = false; - *flavorsp = cpu_to_be32(supported); + wire_count = cpu_to_be32(supported); + write_bytes_to_xdr_buf(xdr->buf, count_offset, &wire_count, + XDR_UNIT); return 0; } @@ -4683,7 +4780,7 @@ nfsd4_encode_secinfo(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_secinfo *secinfo = &u->secinfo; struct xdr_stream *xdr = resp->xdr; - return nfsd4_do_encode_secinfo(xdr, secinfo->si_exp); + return nfsd4_encode_SECINFO4resok(xdr, secinfo->si_exp); } static __be32 @@ -4693,7 +4790,7 @@ nfsd4_encode_secinfo_no_name(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_secinfo_no_name *secinfo = &u->secinfo_no_name; struct xdr_stream *xdr = resp->xdr; - return nfsd4_do_encode_secinfo(xdr, secinfo->sin_exp); + return nfsd4_encode_SECINFO4resok(xdr, secinfo->sin_exp); } static __be32 @@ -4968,7 +5065,7 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr, if (nfserr != nfs_ok) return nfserr; /* sr_target_highest_slotid */ - nfserr = nfsd4_encode_slotid4(xdr, seq->maxslots - 1); + nfserr = nfsd4_encode_slotid4(xdr, seq->target_maxslots - 1); if (nfserr != nfs_ok) return nfserr; /* sr_status_flags */ @@ -5296,17 +5393,20 @@ nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp, struct file *file = read->rd_nf->nf_file; struct xdr_stream *xdr = resp->xdr; bool splice_ok = argp->splice_ok; + unsigned int offset_offset; + __be32 nfserr, wire_count; unsigned long maxcount; - __be32 nfserr, *p; + __be64 wire_offset; - /* Content type, offset, byte count */ - p = xdr_reserve_space(xdr, 4 + 8 + 4); - if (!p) + if (xdr_stream_encode_u32(xdr, NFS4_CONTENT_DATA) != XDR_UNIT) return nfserr_io; - if (resp->xdr->buf->page_len && splice_ok) { - WARN_ON_ONCE(splice_ok); - return nfserr_serverfault; - } + + offset_offset = xdr->buf->len; + + /* Reserve space for the byte offset and count */ + if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT * 3))) + return nfserr_io; + xdr_commit_encode(xdr); maxcount = min_t(unsigned long, read->rd_length, (xdr->buf->buflen - xdr->buf->len)); @@ -5318,10 +5418,12 @@ nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp, if (nfserr) return nfserr; - *p++ = cpu_to_be32(NFS4_CONTENT_DATA); - p = xdr_encode_hyper(p, read->rd_offset); - *p = cpu_to_be32(read->rd_length); - + wire_offset = cpu_to_be64(read->rd_offset); + write_bytes_to_xdr_buf(xdr->buf, offset_offset, &wire_offset, + XDR_UNIT * 2); + wire_count = cpu_to_be32(read->rd_length); + write_bytes_to_xdr_buf(xdr->buf, offset_offset + XDR_UNIT * 2, + &wire_count, XDR_UNIT); return nfs_ok; } @@ -5332,16 +5434,17 @@ nfsd4_encode_read_plus(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_read *read = &u->read; struct file *file = read->rd_nf->nf_file; struct xdr_stream *xdr = resp->xdr; - int starting_len = xdr->buf->len; + unsigned int eof_offset; + __be32 wire_data[2]; u32 segments = 0; - __be32 *p; if (nfserr) return nfserr; - /* eof flag, segment count */ - p = xdr_reserve_space(xdr, 4 + 4); - if (!p) + eof_offset = xdr->buf->len; + + /* Reserve space for the eof flag and segment count */ + if (unlikely(!xdr_reserve_space(xdr, XDR_UNIT * 2))) return nfserr_io; xdr_commit_encode(xdr); @@ -5351,15 +5454,16 @@ nfsd4_encode_read_plus(struct nfsd4_compoundres *resp, __be32 nfserr, nfserr = nfsd4_encode_read_plus_data(resp, read); if (nfserr) { - xdr_truncate_encode(xdr, starting_len); + xdr_truncate_encode(xdr, eof_offset); return nfserr; } segments++; out: - p = xdr_encode_bool(p, read->rd_eof); - *p = cpu_to_be32(segments); + wire_data[0] = read->rd_eof ? xdr_one : xdr_zero; + wire_data[1] = cpu_to_be32(segments); + write_bytes_to_xdr_buf(xdr->buf, eof_offset, &wire_data, XDR_UNIT * 2); return nfserr; } @@ -5760,15 +5864,14 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op) struct nfs4_stateowner *so = resp->cstate.replay_owner; struct svc_rqst *rqstp = resp->rqstp; const struct nfsd4_operation *opdesc = op->opdesc; - int post_err_offset; + unsigned int op_status_offset; nfsd4_enc encoder; - __be32 *p; - p = xdr_reserve_space(xdr, 8); - if (!p) + if (xdr_stream_encode_u32(xdr, op->opnum) != XDR_UNIT) + goto release; + op_status_offset = xdr->buf->len; + if (!xdr_reserve_space(xdr, XDR_UNIT)) goto release; - *p++ = cpu_to_be32(op->opnum); - post_err_offset = xdr->buf->len; if (op->opnum == OP_ILLEGAL) goto status; @@ -5809,20 +5912,21 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op) * bug if we had to do this on a non-idempotent op: */ warn_on_nonidempotent_op(op); - xdr_truncate_encode(xdr, post_err_offset); + xdr_truncate_encode(xdr, op_status_offset + XDR_UNIT); } if (so) { - int len = xdr->buf->len - post_err_offset; + int len = xdr->buf->len - (op_status_offset + XDR_UNIT); so->so_replay.rp_status = op->status; so->so_replay.rp_buflen = len; - read_bytes_from_xdr_buf(xdr->buf, post_err_offset, + read_bytes_from_xdr_buf(xdr->buf, op_status_offset + XDR_UNIT, so->so_replay.rp_buf, len); } status: op->status = nfsd4_map_status(op->status, resp->cstate.minorversion); - *p = op->status; + write_bytes_to_xdr_buf(xdr->buf, op_status_offset, + &op->status, XDR_UNIT); release: if (opdesc && opdesc->op_release) opdesc->op_release(&op->u); diff --git a/fs/nfsd/nfs4xdr_gen.c b/fs/nfsd/nfs4xdr_gen.c new file mode 100644 index 000000000000..a17b5d8e60b3 --- /dev/null +++ b/fs/nfsd/nfs4xdr_gen.c @@ -0,0 +1,256 @@ +// SPDX-License-Identifier: GPL-2.0 +// Generated by xdrgen. Manual edits will be lost. +// XDR specification file: ../../Documentation/sunrpc/xdr/nfs4_1.x +// XDR specification modification time: Mon Oct 14 09:10:13 2024 + +#include <linux/sunrpc/svc.h> + +#include "nfs4xdr_gen.h" + +static bool __maybe_unused +xdrgen_decode_int64_t(struct xdr_stream *xdr, int64_t *ptr) +{ + return xdrgen_decode_hyper(xdr, ptr); +}; + +static bool __maybe_unused +xdrgen_decode_uint32_t(struct xdr_stream *xdr, uint32_t *ptr) +{ + return xdrgen_decode_unsigned_int(xdr, ptr); +}; + +static bool __maybe_unused +xdrgen_decode_bitmap4(struct xdr_stream *xdr, bitmap4 *ptr) +{ + if (xdr_stream_decode_u32(xdr, &ptr->count) < 0) + return false; + for (u32 i = 0; i < ptr->count; i++) + if (!xdrgen_decode_uint32_t(xdr, &ptr->element[i])) + return false; + return true; +}; + +static bool __maybe_unused +xdrgen_decode_nfstime4(struct xdr_stream *xdr, struct nfstime4 *ptr) +{ + if (!xdrgen_decode_int64_t(xdr, &ptr->seconds)) + return false; + if (!xdrgen_decode_uint32_t(xdr, &ptr->nseconds)) + return false; + return true; +}; + +static bool __maybe_unused +xdrgen_decode_fattr4_offline(struct xdr_stream *xdr, fattr4_offline *ptr) +{ + return xdrgen_decode_bool(xdr, ptr); +}; + +static bool __maybe_unused +xdrgen_decode_open_arguments4(struct xdr_stream *xdr, struct open_arguments4 *ptr) +{ + if (!xdrgen_decode_bitmap4(xdr, &ptr->oa_share_access)) + return false; + if (!xdrgen_decode_bitmap4(xdr, &ptr->oa_share_deny)) + return false; + if (!xdrgen_decode_bitmap4(xdr, &ptr->oa_share_access_want)) + return false; + if (!xdrgen_decode_bitmap4(xdr, &ptr->oa_open_claim)) + return false; + if (!xdrgen_decode_bitmap4(xdr, &ptr->oa_create_mode)) + return false; + return true; +}; + +static bool __maybe_unused +xdrgen_decode_open_args_share_access4(struct xdr_stream *xdr, open_args_share_access4 *ptr) +{ + u32 val; + + if (xdr_stream_decode_u32(xdr, &val) < 0) + return false; + *ptr = val; + return true; +} + +static bool __maybe_unused +xdrgen_decode_open_args_share_deny4(struct xdr_stream *xdr, open_args_share_deny4 *ptr) +{ + u32 val; + + if (xdr_stream_decode_u32(xdr, &val) < 0) + return false; + *ptr = val; + return true; +} + +static bool __maybe_unused +xdrgen_decode_open_args_share_access_want4(struct xdr_stream *xdr, open_args_share_access_want4 *ptr) +{ + u32 val; + + if (xdr_stream_decode_u32(xdr, &val) < 0) + return false; + *ptr = val; + return true; +} + +static bool __maybe_unused +xdrgen_decode_open_args_open_claim4(struct xdr_stream *xdr, open_args_open_claim4 *ptr) +{ + u32 val; + + if (xdr_stream_decode_u32(xdr, &val) < 0) + return false; + *ptr = val; + return true; +} + +static bool __maybe_unused +xdrgen_decode_open_args_createmode4(struct xdr_stream *xdr, open_args_createmode4 *ptr) +{ + u32 val; + + if (xdr_stream_decode_u32(xdr, &val) < 0) + return false; + *ptr = val; + return true; +} + +bool +xdrgen_decode_fattr4_open_arguments(struct xdr_stream *xdr, fattr4_open_arguments *ptr) +{ + return xdrgen_decode_open_arguments4(xdr, ptr); +}; + +bool +xdrgen_decode_fattr4_time_deleg_access(struct xdr_stream *xdr, fattr4_time_deleg_access *ptr) +{ + return xdrgen_decode_nfstime4(xdr, ptr); +}; + +bool +xdrgen_decode_fattr4_time_deleg_modify(struct xdr_stream *xdr, fattr4_time_deleg_modify *ptr) +{ + return xdrgen_decode_nfstime4(xdr, ptr); +}; + +static bool __maybe_unused +xdrgen_decode_open_delegation_type4(struct xdr_stream *xdr, open_delegation_type4 *ptr) +{ + u32 val; + + if (xdr_stream_decode_u32(xdr, &val) < 0) + return false; + *ptr = val; + return true; +} + +static bool __maybe_unused +xdrgen_encode_int64_t(struct xdr_stream *xdr, const int64_t value) +{ + return xdrgen_encode_hyper(xdr, value); +}; + +static bool __maybe_unused +xdrgen_encode_uint32_t(struct xdr_stream *xdr, const uint32_t value) +{ + return xdrgen_encode_unsigned_int(xdr, value); +}; + +static bool __maybe_unused +xdrgen_encode_bitmap4(struct xdr_stream *xdr, const bitmap4 value) +{ + if (xdr_stream_encode_u32(xdr, value.count) != XDR_UNIT) + return false; + for (u32 i = 0; i < value.count; i++) + if (!xdrgen_encode_uint32_t(xdr, value.element[i])) + return false; + return true; +}; + +static bool __maybe_unused +xdrgen_encode_nfstime4(struct xdr_stream *xdr, const struct nfstime4 *value) +{ + if (!xdrgen_encode_int64_t(xdr, value->seconds)) + return false; + if (!xdrgen_encode_uint32_t(xdr, value->nseconds)) + return false; + return true; +}; + +static bool __maybe_unused +xdrgen_encode_fattr4_offline(struct xdr_stream *xdr, const fattr4_offline value) +{ + return xdrgen_encode_bool(xdr, value); +}; + +static bool __maybe_unused +xdrgen_encode_open_arguments4(struct xdr_stream *xdr, const struct open_arguments4 *value) +{ + if (!xdrgen_encode_bitmap4(xdr, value->oa_share_access)) + return false; + if (!xdrgen_encode_bitmap4(xdr, value->oa_share_deny)) + return false; + if (!xdrgen_encode_bitmap4(xdr, value->oa_share_access_want)) + return false; + if (!xdrgen_encode_bitmap4(xdr, value->oa_open_claim)) + return false; + if (!xdrgen_encode_bitmap4(xdr, value->oa_create_mode)) + return false; + return true; +}; + +static bool __maybe_unused +xdrgen_encode_open_args_share_access4(struct xdr_stream *xdr, open_args_share_access4 value) +{ + return xdr_stream_encode_u32(xdr, value) == XDR_UNIT; +} + +static bool __maybe_unused +xdrgen_encode_open_args_share_deny4(struct xdr_stream *xdr, open_args_share_deny4 value) +{ + return xdr_stream_encode_u32(xdr, value) == XDR_UNIT; +} + +static bool __maybe_unused +xdrgen_encode_open_args_share_access_want4(struct xdr_stream *xdr, open_args_share_access_want4 value) +{ + return xdr_stream_encode_u32(xdr, value) == XDR_UNIT; +} + +static bool __maybe_unused +xdrgen_encode_open_args_open_claim4(struct xdr_stream *xdr, open_args_open_claim4 value) +{ + return xdr_stream_encode_u32(xdr, value) == XDR_UNIT; +} + +static bool __maybe_unused +xdrgen_encode_open_args_createmode4(struct xdr_stream *xdr, open_args_createmode4 value) +{ + return xdr_stream_encode_u32(xdr, value) == XDR_UNIT; +} + +bool +xdrgen_encode_fattr4_open_arguments(struct xdr_stream *xdr, const fattr4_open_arguments *value) +{ + return xdrgen_encode_open_arguments4(xdr, value); +}; + +bool +xdrgen_encode_fattr4_time_deleg_access(struct xdr_stream *xdr, const fattr4_time_deleg_access *value) +{ + return xdrgen_encode_nfstime4(xdr, value); +}; + +bool +xdrgen_encode_fattr4_time_deleg_modify(struct xdr_stream *xdr, const fattr4_time_deleg_modify *value) +{ + return xdrgen_encode_nfstime4(xdr, value); +}; + +static bool __maybe_unused +xdrgen_encode_open_delegation_type4(struct xdr_stream *xdr, open_delegation_type4 value) +{ + return xdr_stream_encode_u32(xdr, value) == XDR_UNIT; +} diff --git a/fs/nfsd/nfs4xdr_gen.h b/fs/nfsd/nfs4xdr_gen.h new file mode 100644 index 000000000000..41a0033b7256 --- /dev/null +++ b/fs/nfsd/nfs4xdr_gen.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Generated by xdrgen. Manual edits will be lost. */ +/* XDR specification file: ../../Documentation/sunrpc/xdr/nfs4_1.x */ +/* XDR specification modification time: Mon Oct 14 09:10:13 2024 */ + +#ifndef _LINUX_XDRGEN_NFS4_1_DECL_H +#define _LINUX_XDRGEN_NFS4_1_DECL_H + +#include <linux/types.h> + +#include <linux/sunrpc/xdr.h> +#include <linux/sunrpc/xdrgen/_defs.h> +#include <linux/sunrpc/xdrgen/_builtins.h> +#include <linux/sunrpc/xdrgen/nfs4_1.h> + +bool xdrgen_decode_fattr4_open_arguments(struct xdr_stream *xdr, fattr4_open_arguments *ptr); +bool xdrgen_encode_fattr4_open_arguments(struct xdr_stream *xdr, const fattr4_open_arguments *value); + +bool xdrgen_decode_fattr4_time_deleg_access(struct xdr_stream *xdr, fattr4_time_deleg_access *ptr); +bool xdrgen_encode_fattr4_time_deleg_access(struct xdr_stream *xdr, const fattr4_time_deleg_access *value); + +bool xdrgen_decode_fattr4_time_deleg_modify(struct xdr_stream *xdr, fattr4_time_deleg_modify *ptr); +bool xdrgen_encode_fattr4_time_deleg_modify(struct xdr_stream *xdr, const fattr4_time_deleg_modify *value); + +#endif /* _LINUX_XDRGEN_NFS4_1_DECL_H */ diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 3adbc05ebaac..ce2a71e4904c 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -48,7 +48,6 @@ enum { NFSD_Versions, NFSD_Ports, NFSD_MaxBlkSize, - NFSD_MaxConnections, NFSD_Filecache, NFSD_Leasetime, NFSD_Gracetime, @@ -68,7 +67,6 @@ static ssize_t write_pool_threads(struct file *file, char *buf, size_t size); static ssize_t write_versions(struct file *file, char *buf, size_t size); static ssize_t write_ports(struct file *file, char *buf, size_t size); static ssize_t write_maxblksize(struct file *file, char *buf, size_t size); -static ssize_t write_maxconn(struct file *file, char *buf, size_t size); #ifdef CONFIG_NFSD_V4 static ssize_t write_leasetime(struct file *file, char *buf, size_t size); static ssize_t write_gracetime(struct file *file, char *buf, size_t size); @@ -87,7 +85,6 @@ static ssize_t (*const write_op[])(struct file *, char *, size_t) = { [NFSD_Versions] = write_versions, [NFSD_Ports] = write_ports, [NFSD_MaxBlkSize] = write_maxblksize, - [NFSD_MaxConnections] = write_maxconn, #ifdef CONFIG_NFSD_V4 [NFSD_Leasetime] = write_leasetime, [NFSD_Gracetime] = write_gracetime, @@ -902,44 +899,6 @@ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size) nfsd_max_blksize); } -/* - * write_maxconn - Set or report the current max number of connections - * - * Input: - * buf: ignored - * size: zero - * OR - * - * Input: - * buf: C string containing an unsigned - * integer value representing the new - * number of max connections - * size: non-zero length of C string in @buf - * Output: - * On success: passed-in buffer filled with '\n'-terminated C string - * containing numeric value of max_connections setting - * for this net namespace; - * return code is the size in bytes of the string - * On error: return code is zero or a negative errno value - */ -static ssize_t write_maxconn(struct file *file, char *buf, size_t size) -{ - char *mesg = buf; - struct nfsd_net *nn = net_generic(netns(file), nfsd_net_id); - unsigned int maxconn = nn->max_connections; - - if (size > 0) { - int rv = get_uint(&mesg, &maxconn); - - if (rv) - return rv; - trace_nfsd_ctl_maxconn(netns(file), maxconn); - nn->max_connections = maxconn; - } - - return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%u\n", maxconn); -} - #ifdef CONFIG_NFSD_V4 static ssize_t __nfsd4_write_time(struct file *file, char *buf, size_t size, time64_t *time, struct nfsd_net *nn) @@ -1372,7 +1331,6 @@ static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc) [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR}, [NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO}, [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO}, - [NFSD_MaxConnections] = {"max_connections", &transaction_ops, S_IWUSR|S_IRUGO}, [NFSD_Filecache] = {"filecache", &nfsd_file_cache_stats_fops, S_IRUGO}, #ifdef CONFIG_NFSD_V4 [NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR}, @@ -2259,6 +2217,7 @@ static __net_init int nfsd_net_init(struct net *net) seqlock_init(&nn->writeverf_lock); nfsd_proc_stat_init(net); #if IS_ENABLED(CONFIG_NFS_LOCALIO) + spin_lock_init(&nn->local_clients_lock); INIT_LIST_HEAD(&nn->local_clients); #endif return 0; @@ -2276,14 +2235,15 @@ out_export_error: * nfsd_net_pre_exit - Disconnect localio clients from net namespace * @net: a network namespace that is about to be destroyed * - * This invalidated ->net pointers held by localio clients + * This invalidates ->net pointers held by localio clients * while they can still safely access nn->counter. */ static __net_exit void nfsd_net_pre_exit(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); - nfs_uuid_invalidate_clients(&nn->local_clients); + nfs_localio_invalidate_clients(&nn->local_clients, + &nn->local_clients_lock); } #endif diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index 4b56ba1e8e48..e2997f0ffbc5 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h @@ -88,9 +88,6 @@ struct nfsd_genl_rqstp { extern struct svc_program nfsd_programs[]; extern const struct svc_version nfsd_version2, nfsd_version3, nfsd_version4; extern struct mutex nfsd_mutex; -extern spinlock_t nfsd_drc_lock; -extern unsigned long nfsd_drc_max_mem; -extern unsigned long nfsd_drc_mem_used; extern atomic_t nfsd_th_cnt; /* number of available threads */ extern const struct seq_operations nfs_exports_op; @@ -458,7 +455,10 @@ enum { (NFSD4_1_SUPPORTED_ATTRS_WORD2 | \ FATTR4_WORD2_MODE_UMASK | \ NFSD4_2_SECURITY_ATTRS | \ - FATTR4_WORD2_XATTR_SUPPORT) + FATTR4_WORD2_XATTR_SUPPORT | \ + FATTR4_WORD2_TIME_DELEG_ACCESS | \ + FATTR4_WORD2_TIME_DELEG_MODIFY | \ + FATTR4_WORD2_OPEN_ARGUMENTS) extern const u32 nfsd_suppattrs[3][3]; @@ -528,7 +528,10 @@ static inline bool nfsd_attrs_supported(u32 minorversion, const u32 *bmval) #endif #define NFSD_WRITEABLE_ATTRS_WORD2 \ (FATTR4_WORD2_MODE_UMASK \ - | MAYBE_FATTR4_WORD2_SECURITY_LABEL) + | MAYBE_FATTR4_WORD2_SECURITY_LABEL \ + | FATTR4_WORD2_TIME_DELEG_ACCESS \ + | FATTR4_WORD2_TIME_DELEG_MODIFY \ + ) #define NFSD_SUPPATTR_EXCLCREAT_WORD0 \ NFSD_WRITEABLE_ATTRS_WORD0 diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index 6a831cb242df..aef474f1b84b 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -222,7 +222,6 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct net *net, cap_raise_nfsd_set(new->cap_effective, new->cap_permitted); put_cred(override_creds(new)); - put_cred(new); } else { error = nfsd_setuser_and_check_port(rqstp, cred, exp); if (error) @@ -381,6 +380,9 @@ __fh_verify(struct svc_rqst *rqstp, error = check_nfsd_access(exp, rqstp, may_bypass_gss); if (error) goto out; + /* During LOCALIO call to fh_verify will be called with a NULL rqstp */ + if (rqstp) + svc_xprt_set_valid(rqstp->rq_xprt); /* Finally, check access permissions. */ error = nfsd_permission(cred, exp, dentry, access); diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index 49e2f32102ab..9b3d6cff0e1e 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -70,16 +70,6 @@ static __be32 nfsd_init_request(struct svc_rqst *, */ DEFINE_MUTEX(nfsd_mutex); -/* - * nfsd_drc_lock protects nfsd_drc_max_pages and nfsd_drc_pages_used. - * nfsd_drc_max_pages limits the total amount of memory available for - * version 4.1 DRC caches. - * nfsd_drc_pages_used tracks the current version 4.1 DRC memory usage. - */ -DEFINE_SPINLOCK(nfsd_drc_lock); -unsigned long nfsd_drc_max_mem; -unsigned long nfsd_drc_mem_used; - #if IS_ENABLED(CONFIG_NFS_LOCALIO) static const struct svc_version *localio_versions[] = { [1] = &localio_version1, @@ -214,32 +204,32 @@ int nfsd_minorversion(struct nfsd_net *nn, u32 minorversion, enum vers_op change return 0; } -bool nfsd_serv_try_get(struct net *net) __must_hold(rcu) +bool nfsd_net_try_get(struct net *net) __must_hold(rcu) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); - return (nn && percpu_ref_tryget_live(&nn->nfsd_serv_ref)); + return (nn && percpu_ref_tryget_live(&nn->nfsd_net_ref)); } -void nfsd_serv_put(struct net *net) __must_hold(rcu) +void nfsd_net_put(struct net *net) __must_hold(rcu) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); - percpu_ref_put(&nn->nfsd_serv_ref); + percpu_ref_put(&nn->nfsd_net_ref); } -static void nfsd_serv_done(struct percpu_ref *ref) +static void nfsd_net_done(struct percpu_ref *ref) { - struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_serv_ref); + struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_net_ref); - complete(&nn->nfsd_serv_confirm_done); + complete(&nn->nfsd_net_confirm_done); } -static void nfsd_serv_free(struct percpu_ref *ref) +static void nfsd_net_free(struct percpu_ref *ref) { - struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_serv_ref); + struct nfsd_net *nn = container_of(ref, struct nfsd_net, nfsd_net_ref); - complete(&nn->nfsd_serv_free_done); + complete(&nn->nfsd_net_free_done); } /* @@ -436,6 +426,10 @@ static void nfsd_shutdown_net(struct net *net) if (!nn->nfsd_net_up) return; + + percpu_ref_kill_and_confirm(&nn->nfsd_net_ref, nfsd_net_done); + wait_for_completion(&nn->nfsd_net_confirm_done); + nfsd_export_flush(net); nfs4_state_shutdown_net(net); nfsd_reply_cache_shutdown(nn); @@ -444,7 +438,10 @@ static void nfsd_shutdown_net(struct net *net) lockd_down(net); nn->lockd_up = false; } - percpu_ref_exit(&nn->nfsd_serv_ref); + + wait_for_completion(&nn->nfsd_net_free_done); + percpu_ref_exit(&nn->nfsd_net_ref); + nn->nfsd_net_up = false; nfsd_shutdown_generic(); } @@ -526,11 +523,6 @@ void nfsd_destroy_serv(struct net *net) lockdep_assert_held(&nfsd_mutex); - percpu_ref_kill_and_confirm(&nn->nfsd_serv_ref, nfsd_serv_done); - wait_for_completion(&nn->nfsd_serv_confirm_done); - wait_for_completion(&nn->nfsd_serv_free_done); - /* percpu_ref_exit is called in nfsd_shutdown_net */ - spin_lock(&nfsd_notifier_lock); nn->nfsd_serv = NULL; spin_unlock(&nfsd_notifier_lock); @@ -575,27 +567,6 @@ void nfsd_reset_versions(struct nfsd_net *nn) } } -/* - * Each session guarantees a negotiated per slot memory cache for replies - * which in turn consumes memory beyond the v2/v3/v4.0 server. A dedicated - * NFSv4.1 server might want to use more memory for a DRC than a machine - * with mutiple services. - * - * Impose a hard limit on the number of pages for the DRC which varies - * according to the machines free pages. This is of course only a default. - * - * For now this is a #defined shift which could be under admin control - * in the future. - */ -static void set_max_drc(void) -{ - #define NFSD_DRC_SIZE_SHIFT 7 - nfsd_drc_max_mem = (nr_free_buffer_pages() - >> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE; - nfsd_drc_mem_used = 0; - dprintk("%s nfsd_drc_max_mem %lu \n", __func__, nfsd_drc_max_mem); -} - static int nfsd_get_default_max_blksize(void) { struct sysinfo i; @@ -652,12 +623,12 @@ int nfsd_create_serv(struct net *net) if (nn->nfsd_serv) return 0; - error = percpu_ref_init(&nn->nfsd_serv_ref, nfsd_serv_free, + error = percpu_ref_init(&nn->nfsd_net_ref, nfsd_net_free, 0, GFP_KERNEL); if (error) return error; - init_completion(&nn->nfsd_serv_free_done); - init_completion(&nn->nfsd_serv_confirm_done); + init_completion(&nn->nfsd_net_free_done); + init_completion(&nn->nfsd_net_confirm_done); if (nfsd_max_blksize == 0) nfsd_max_blksize = nfsd_get_default_max_blksize(); @@ -668,7 +639,6 @@ int nfsd_create_serv(struct net *net) if (serv == NULL) return -ENOMEM; - serv->sv_maxconn = nn->max_connections; error = svc_bind(serv, net); if (error < 0) { svc_destroy(&serv); @@ -678,7 +648,6 @@ int nfsd_create_serv(struct net *net) nn->nfsd_serv = serv; spin_unlock(&nfsd_notifier_lock); - set_max_drc(); /* check if the notifier is already set */ if (atomic_inc_return(&nfsd_notifier_refcount) == 1) { register_inetaddr_notifier(&nfsd_inetaddr_notifier); @@ -954,11 +923,7 @@ nfsd(void *vrqstp) * The main request loop */ while (!svc_thread_should_stop(rqstp)) { - /* Update sv_maxconn if it has changed */ - rqstp->rq_server->sv_maxconn = nn->max_connections; - svc_recv(rqstp); - nfsd_file_net_dispose(nn); } diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index e16bb3717fb9..74d2d7b42676 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -159,6 +159,8 @@ struct nfs4_cb_fattr { /* from CB_GETATTR reply */ u64 ncf_cb_change; u64 ncf_cb_fsize; + struct timespec64 ncf_cb_mtime; + struct timespec64 ncf_cb_atime; unsigned long ncf_cb_flags; bool ncf_file_modified; @@ -207,6 +209,22 @@ struct nfs4_delegation { struct nfs4_cb_fattr dl_cb_fattr; }; +static inline bool deleg_is_read(u32 dl_type) +{ + return (dl_type == OPEN_DELEGATE_READ || dl_type == OPEN_DELEGATE_READ_ATTRS_DELEG); +} + +static inline bool deleg_is_write(u32 dl_type) +{ + return (dl_type == OPEN_DELEGATE_WRITE || dl_type == OPEN_DELEGATE_WRITE_ATTRS_DELEG); +} + +static inline bool deleg_attrs_deleg(u32 dl_type) +{ + return dl_type == OPEN_DELEGATE_READ_ATTRS_DELEG || + dl_type == OPEN_DELEGATE_WRITE_ATTRS_DELEG; +} + #define cb_to_delegation(cb) \ container_of(cb, struct nfs4_delegation, dl_recall) @@ -227,8 +245,11 @@ static inline struct nfs4_delegation *delegstateid(struct nfs4_stid *s) return container_of(s, struct nfs4_delegation, dl_stid); } -/* Maximum number of slots per session. 160 is useful for long haul TCP */ -#define NFSD_MAX_SLOTS_PER_SESSION 160 +/* Maximum number of slots per session. This is for sanity-check only. + * It could be increased if we had a mechanism to shutdown misbehaving clients. + * A large number can be needed to get good throughput on high-latency servers. + */ +#define NFSD_MAX_SLOTS_PER_SESSION 2048 /* Maximum session per slot cache size */ #define NFSD_SLOT_CACHE_SIZE 2048 /* Maximum number of NFSD_SLOT_CACHE_SIZE slots per session */ @@ -242,10 +263,12 @@ struct nfsd4_slot { struct svc_cred sl_cred; u32 sl_datalen; u16 sl_opcnt; + u16 sl_generation; #define NFSD4_SLOT_INUSE (1 << 0) #define NFSD4_SLOT_CACHETHIS (1 << 1) #define NFSD4_SLOT_INITIALIZED (1 << 2) #define NFSD4_SLOT_CACHED (1 << 3) +#define NFSD4_SLOT_REUSED (1 << 4) u8 sl_flags; char sl_data[]; }; @@ -318,16 +341,19 @@ struct nfsd4_session { u32 se_cb_slot_avail; /* bitmap of available slots */ u32 se_cb_highest_slot; /* highest slot client wants */ u32 se_cb_prog; - bool se_dead; struct list_head se_hash; /* hash by sessionid */ struct list_head se_perclnt; + struct list_head se_all_sessions;/* global list of sessions */ struct nfs4_client *se_client; struct nfs4_sessionid se_sessionid; struct nfsd4_channel_attrs se_fchannel; struct nfsd4_cb_sec se_cb_sec; struct list_head se_conns; u32 se_cb_seq_nr[NFSD_BC_SLOT_TABLE_SIZE]; - struct nfsd4_slot *se_slots[]; /* forward channel slots */ + struct xarray se_slots; /* forward channel slots */ + u16 se_slot_gen; + bool se_dead; + u32 se_target_maxslots; }; /* formatted contents of nfs4_sessionid */ @@ -505,7 +531,7 @@ struct nfs4_replay { unsigned int rp_buflen; char *rp_buf; struct knfsd_fh rp_openfh; - atomic_t rp_locked; + int rp_locked; char rp_ibuf[NFSD4_REPLAY_ISIZE]; }; diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h index 696c89f68a9e..ad2c0c432d08 100644 --- a/fs/nfsd/trace.h +++ b/fs/nfsd/trace.h @@ -626,7 +626,6 @@ DEFINE_STATEID_EVENT(open); DEFINE_STATEID_EVENT(deleg_read); DEFINE_STATEID_EVENT(deleg_write); DEFINE_STATEID_EVENT(deleg_return); -DEFINE_STATEID_EVENT(deleg_recall); DECLARE_EVENT_CLASS(nfsd_stateseqid_class, TP_PROTO(u32 seqid, const stateid_t *stp), diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index 382cc1389396..c26ba86dbdfd 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h @@ -576,9 +576,7 @@ struct nfsd4_sequence { u32 slotid; /* request/response */ u32 maxslots; /* request/response */ u32 cachethis; /* request */ -#if 0 u32 target_maxslots; /* response */ -#endif /* not yet */ u32 status_flags; /* response */ }; diff --git a/fs/nfsd/xdr4cb.h b/fs/nfsd/xdr4cb.h index e8b00309c449..f1a315cd31b7 100644 --- a/fs/nfsd/xdr4cb.h +++ b/fs/nfsd/xdr4cb.h @@ -59,16 +59,20 @@ * 1: CB_GETATTR opcode (32-bit) * N: file_handle * 1: number of entry in attribute array (32-bit) - * 1: entry 0 in attribute array (32-bit) + * 3: entry 0-2 in attribute array (32-bit * 3) */ #define NFS4_enc_cb_getattr_sz (cb_compound_enc_hdr_sz + \ cb_sequence_enc_sz + \ - 1 + enc_nfs4_fh_sz + 1 + 1) + 1 + enc_nfs4_fh_sz + 1 + 3) /* * 4: fattr_bitmap_maxsz * 1: attribute array len * 2: change attr (64-bit) * 2: size (64-bit) + * 2: atime.seconds (64-bit) + * 1: atime.nanoseconds (32-bit) + * 2: mtime.seconds (64-bit) + * 1: mtime.nanoseconds (32-bit) */ #define NFS4_dec_cb_getattr_sz (cb_compound_dec_hdr_sz + \ - cb_sequence_dec_sz + 4 + 1 + 2 + 2 + op_dec_sz) + cb_sequence_dec_sz + 4 + 1 + 2 + 2 + 2 + 1 + 2 + 1 + op_dec_sz) diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c index ba3e1f591f36..6b506995818d 100644 --- a/fs/nilfs2/alloc.c +++ b/fs/nilfs2/alloc.c @@ -21,6 +21,8 @@ * nilfs_palloc_groups_per_desc_block - get the number of groups that a group * descriptor block can maintain * @inode: inode of metadata file using this allocator + * + * Return: Number of groups that a group descriptor block can maintain. */ static inline unsigned long nilfs_palloc_groups_per_desc_block(const struct inode *inode) @@ -32,6 +34,8 @@ nilfs_palloc_groups_per_desc_block(const struct inode *inode) /** * nilfs_palloc_groups_count - get maximum number of groups * @inode: inode of metadata file using this allocator + * + * Return: Maximum number of groups. */ static inline unsigned long nilfs_palloc_groups_count(const struct inode *inode) @@ -43,6 +47,8 @@ nilfs_palloc_groups_count(const struct inode *inode) * nilfs_palloc_init_blockgroup - initialize private variables for allocator * @inode: inode of metadata file using this allocator * @entry_size: size of the persistent object + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned int entry_size) { @@ -78,6 +84,9 @@ int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned int entry_size) * @inode: inode of metadata file using this allocator * @nr: serial number of the entry (e.g. inode number) * @offset: pointer to store offset number in the group + * + * Return: Number of the group that contains the entry with the index + * specified by @nr. */ static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr, unsigned long *offset) @@ -93,8 +102,8 @@ static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr, * @inode: inode of metadata file using this allocator * @group: group number * - * nilfs_palloc_desc_blkoff() returns block offset of the descriptor - * block which contains a descriptor of the specified group. + * Return: Index number in the metadata file of the descriptor block of + * the group specified by @group. */ static unsigned long nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group) @@ -111,6 +120,9 @@ nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group) * * nilfs_palloc_bitmap_blkoff() returns block offset of the bitmap * block used to allocate/deallocate entries in the specified group. + * + * Return: Index number in the metadata file of the bitmap block of + * the group specified by @group. */ static unsigned long nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group) @@ -125,6 +137,8 @@ nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group) * nilfs_palloc_group_desc_nfrees - get the number of free entries in a group * @desc: pointer to descriptor structure for the group * @lock: spin lock protecting @desc + * + * Return: Number of free entries written in the group descriptor @desc. */ static unsigned long nilfs_palloc_group_desc_nfrees(const struct nilfs_palloc_group_desc *desc, @@ -143,6 +157,9 @@ nilfs_palloc_group_desc_nfrees(const struct nilfs_palloc_group_desc *desc, * @desc: pointer to descriptor structure for the group * @lock: spin lock protecting @desc * @n: delta to be added + * + * Return: Number of free entries after adjusting the group descriptor + * @desc. */ static u32 nilfs_palloc_group_desc_add_entries(struct nilfs_palloc_group_desc *desc, @@ -161,6 +178,9 @@ nilfs_palloc_group_desc_add_entries(struct nilfs_palloc_group_desc *desc, * nilfs_palloc_entry_blkoff - get block offset of an entry block * @inode: inode of metadata file using this allocator * @nr: serial number of the entry (e.g. inode number) + * + * Return: Index number in the metadata file of the block containing + * the entry specified by @nr. */ static unsigned long nilfs_palloc_entry_blkoff(const struct inode *inode, __u64 nr) @@ -238,6 +258,12 @@ static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff, * @blkoff: block offset * @prev: nilfs_bh_assoc struct of the last used buffer * @lock: spin lock protecting @prev + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - Non-existent block. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_palloc_delete_block(struct inode *inode, unsigned long blkoff, struct nilfs_bh_assoc *prev, @@ -258,6 +284,8 @@ static int nilfs_palloc_delete_block(struct inode *inode, unsigned long blkoff, * @group: group number * @create: create flag * @bhp: pointer to store the resultant buffer head + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_palloc_get_desc_block(struct inode *inode, unsigned long group, @@ -277,6 +305,8 @@ static int nilfs_palloc_get_desc_block(struct inode *inode, * @group: group number * @create: create flag * @bhp: pointer to store the resultant buffer head + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_palloc_get_bitmap_block(struct inode *inode, unsigned long group, @@ -294,6 +324,8 @@ static int nilfs_palloc_get_bitmap_block(struct inode *inode, * nilfs_palloc_delete_bitmap_block - delete a bitmap block * @inode: inode of metadata file using this allocator * @group: group number + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_palloc_delete_bitmap_block(struct inode *inode, unsigned long group) @@ -312,6 +344,8 @@ static int nilfs_palloc_delete_bitmap_block(struct inode *inode, * @nr: serial number of the entry (e.g. inode number) * @create: create flag * @bhp: pointer to store the resultant buffer head + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr, int create, struct buffer_head **bhp) @@ -328,6 +362,8 @@ int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr, * nilfs_palloc_delete_entry_block - delete an entry block * @inode: inode of metadata file using this allocator * @nr: serial number of the entry + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_palloc_delete_entry_block(struct inode *inode, __u64 nr) { @@ -397,6 +433,9 @@ size_t nilfs_palloc_entry_offset(const struct inode *inode, __u64 nr, * @bsize: size in bits * @lock: spin lock protecting @bitmap * @wrap: whether to wrap around + * + * Return: Offset number within the group of the found free entry, or + * %-ENOSPC if not found. */ static int nilfs_palloc_find_available_slot(unsigned char *bitmap, unsigned long target, @@ -438,6 +477,9 @@ static int nilfs_palloc_find_available_slot(unsigned char *bitmap, * @inode: inode of metadata file using this allocator * @curr: current group number * @max: maximum number of groups + * + * Return: Number of remaining descriptors (= groups) managed by the descriptor + * block. */ static unsigned long nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode, @@ -453,6 +495,8 @@ nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode, * nilfs_palloc_count_desc_blocks - count descriptor blocks number * @inode: inode of metadata file using this allocator * @desc_blocks: descriptor blocks number [out] + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_palloc_count_desc_blocks(struct inode *inode, unsigned long *desc_blocks) @@ -473,6 +517,8 @@ static int nilfs_palloc_count_desc_blocks(struct inode *inode, * MDT file growing * @inode: inode of metadata file using this allocator * @desc_blocks: known current descriptor blocks count + * + * Return: true if a group can be added in the metadata file, false if not. */ static inline bool nilfs_palloc_mdt_file_can_grow(struct inode *inode, unsigned long desc_blocks) @@ -487,6 +533,12 @@ static inline bool nilfs_palloc_mdt_file_can_grow(struct inode *inode, * @inode: inode of metadata file using this allocator * @nused: current number of used entries * @nmaxp: max number of entries [out] + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. + * * %-ERANGE - Number of entries in use is out of range. */ int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp) { @@ -518,6 +570,13 @@ int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp) * @inode: inode of metadata file using this allocator * @req: nilfs_palloc_req structure exchanged for the allocation * @wrap: whether to wrap around + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - Entries exhausted (No entries available for allocation). + * * %-EROFS - Read only filesystem */ int nilfs_palloc_prepare_alloc_entry(struct inode *inode, struct nilfs_palloc_req *req, bool wrap) @@ -710,6 +769,8 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode, * nilfs_palloc_prepare_free_entry - prepare to deallocate a persistent object * @inode: inode of metadata file using this allocator * @req: nilfs_palloc_req structure exchanged for the removal + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_palloc_prepare_free_entry(struct inode *inode, struct nilfs_palloc_req *req) @@ -754,6 +815,8 @@ void nilfs_palloc_abort_free_entry(struct inode *inode, * @inode: inode of metadata file using this allocator * @entry_nrs: array of entry numbers to be deallocated * @nitems: number of entries stored in @entry_nrs + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) { diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h index 3f115ab7e9a7..046d876ea3e0 100644 --- a/fs/nilfs2/alloc.h +++ b/fs/nilfs2/alloc.h @@ -21,6 +21,8 @@ * * The number of entries per group is defined by the number of bits * that a bitmap block can maintain. + * + * Return: Number of entries per group. */ static inline unsigned long nilfs_palloc_entries_per_group(const struct inode *inode) diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c index c9e8d9a7d820..ccc1a7aa52d2 100644 --- a/fs/nilfs2/bmap.c +++ b/fs/nilfs2/bmap.c @@ -47,17 +47,14 @@ static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap, * @ptrp: place to store the value associated to @key * * Description: nilfs_bmap_lookup_at_level() finds a record whose key - * matches @key in the block at @level of the bmap. - * - * Return Value: On success, 0 is returned and the record associated with @key - * is stored in the place pointed by @ptrp. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - A record associated with @key does not exist. + * matches @key in the block at @level of the bmap. The record associated + * with @key is stored in the place pointed to by @ptrp. + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - A record associated with @key does not exist. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level, __u64 *ptrp) @@ -138,14 +135,11 @@ static int nilfs_bmap_do_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) * Description: nilfs_bmap_insert() inserts the new key-record pair specified * by @key and @rec into @bmap. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EEXIST - A record associated with @key already exist. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EEXIST - A record associated with @key already exists. + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_insert(struct nilfs_bmap *bmap, __u64 key, unsigned long rec) { @@ -193,14 +187,11 @@ static int nilfs_bmap_do_delete(struct nilfs_bmap *bmap, __u64 key) * Description: nilfs_bmap_seek_key() seeks a valid key on @bmap * starting from @start, and stores it to @keyp if found. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - No valid entry was found + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - No valid entry was found. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_seek_key(struct nilfs_bmap *bmap, __u64 start, __u64 *keyp) { @@ -236,14 +227,11 @@ int nilfs_bmap_last_key(struct nilfs_bmap *bmap, __u64 *keyp) * Description: nilfs_bmap_delete() deletes the key-record pair specified by * @key from @bmap. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - A record associated with @key does not exist. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - A record associated with @key does not exist. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_delete(struct nilfs_bmap *bmap, __u64 key) { @@ -290,12 +278,10 @@ static int nilfs_bmap_do_truncate(struct nilfs_bmap *bmap, __u64 key) * Description: nilfs_bmap_truncate() removes key-record pairs whose keys are * greater than or equal to @key from @bmap. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_truncate(struct nilfs_bmap *bmap, __u64 key) { @@ -330,12 +316,10 @@ void nilfs_bmap_clear(struct nilfs_bmap *bmap) * Description: nilfs_bmap_propagate() marks the buffers that directly or * indirectly refer to the block specified by @bh dirty. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh) { @@ -362,22 +346,22 @@ void nilfs_bmap_lookup_dirty_buffers(struct nilfs_bmap *bmap, /** * nilfs_bmap_assign - assign a new block number to a block - * @bmap: bmap - * @bh: pointer to buffer head + * @bmap: bmap + * @bh: place to store a pointer to the buffer head to which a block + * address is assigned (in/out) * @blocknr: block number - * @binfo: block information + * @binfo: block information * * Description: nilfs_bmap_assign() assigns the block number @blocknr to the - * buffer specified by @bh. - * - * Return Value: On success, 0 is returned and the buffer head of a newly - * create buffer and the block information associated with the buffer are - * stored in the place pointed by @bh and @binfo, respectively. On error, one - * of the following negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * buffer specified by @bh. The block information is stored in the memory + * pointed to by @binfo, and the buffer head may be replaced as a block + * address is assigned, in which case a pointer to the new buffer head is + * stored in the memory pointed to by @bh. + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_assign(struct nilfs_bmap *bmap, struct buffer_head **bh, @@ -402,12 +386,10 @@ int nilfs_bmap_assign(struct nilfs_bmap *bmap, * Description: nilfs_bmap_mark() marks the block specified by @key and @level * as dirty. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_mark(struct nilfs_bmap *bmap, __u64 key, int level) { @@ -430,7 +412,7 @@ int nilfs_bmap_mark(struct nilfs_bmap *bmap, __u64 key, int level) * Description: nilfs_test_and_clear() is the atomic operation to test and * clear the dirty state of @bmap. * - * Return Value: 1 is returned if @bmap is dirty, or 0 if clear. + * Return: 1 if @bmap is dirty, or 0 if clear. */ int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *bmap) { @@ -490,10 +472,10 @@ static struct lock_class_key nilfs_bmap_mdt_lock_key; * * Description: nilfs_bmap_read() initializes the bmap @bmap. * - * Return Value: On success, 0 is returned. On error, the following negative - * error code is returned. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (corrupted bmap). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode) { diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index 54a3fa0cf67e..568367129092 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c @@ -201,7 +201,8 @@ void nilfs_btnode_delete(struct buffer_head *bh) * Note that the current implementation does not support folio sizes larger * than the page size. * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EIO - I/O error (metadata corruption). * * %-ENOMEM - Insufficient memory available. */ diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index ef5061bb56da..0d8f7fb15c2e 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -334,7 +334,7 @@ static int nilfs_btree_node_lookup(const struct nilfs_btree_node *node, * @inode: host inode of btree * @blocknr: block number * - * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned. + * Return: 0 if normal, 1 if the node is broken. */ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, size_t size, struct inode *inode, @@ -366,7 +366,7 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, * @node: btree root node to be examined * @inode: host inode of btree * - * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned. + * Return: 0 if normal, 1 if the root node is broken. */ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node, struct inode *inode) @@ -652,8 +652,7 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree, * @minlevel: start level * @nextkey: place to store the next valid key * - * Return Value: If a next key was found, 0 is returned. Otherwise, - * -ENOENT is returned. + * Return: 0 if the next key was found, %-ENOENT if not found. */ static int nilfs_btree_get_next_key(const struct nilfs_bmap *btree, const struct nilfs_btree_path *path, diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c index c20207d7a989..bcc7d76269ac 100644 --- a/fs/nilfs2/cpfile.c +++ b/fs/nilfs2/cpfile.c @@ -191,14 +191,11 @@ static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile, * @cnop: place to store the next checkpoint number * @bhp: place to store a pointer to buffer_head struct * - * Return Value: On success, it returns 0. On error, the following negative - * error code is returned. - * - * %-ENOMEM - Insufficient memory available. - * - * %-EIO - I/O error - * - * %-ENOENT - no block exists in the range. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - no block exists in the range. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_cpfile_find_checkpoint_block(struct inode *cpfile, __u64 start_cno, __u64 end_cno, @@ -239,7 +236,8 @@ static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile, * stores it to the inode file given by @ifile and the nilfs root object * given by @root. * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EINVAL - Invalid checkpoint. * * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error (including metadata corruption). @@ -307,7 +305,8 @@ out_sem: * In either case, the buffer of the block containing the checkpoint entry * and the cpfile inode are made dirty for inclusion in the write log. * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error (including metadata corruption). * * %-EROFS - Read only filesystem @@ -376,7 +375,8 @@ out_sem: * cpfile with the data given by the arguments @root, @blkinc, @ctime, and * @minor. * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error (including metadata corruption). */ @@ -447,14 +447,11 @@ error: * the period from @start to @end, excluding @end itself. The checkpoints * which have been already deleted are ignored. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EINVAL - invalid checkpoints. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - Invalid checkpoints. + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, __u64 start, @@ -718,7 +715,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, * number to continue searching. * * Return: Count of checkpoint info items stored in the output buffer on - * success, or the following negative error code on failure. + * success, or one of the following negative error codes on failure: * * %-EINVAL - Invalid checkpoint mode. * * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error (including metadata corruption). @@ -743,7 +740,8 @@ ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode, * @cpfile: checkpoint file inode * @cno: checkpoint number to delete * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EBUSY - Checkpoint in use (snapshot specified). * * %-EIO - I/O error (including metadata corruption). * * %-ENOENT - No valid checkpoint found. @@ -1011,7 +1009,7 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno) * @cno: checkpoint number * * Return: 1 if the checkpoint specified by @cno is a snapshot, 0 if not, or - * the following negative error code on failure. + * one of the following negative error codes on failure: * * %-EIO - I/O error (including metadata corruption). * * %-ENOENT - No such checkpoint. * * %-ENOMEM - Insufficient memory available. @@ -1058,14 +1056,11 @@ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno) * Description: nilfs_change_cpmode() changes the mode of the checkpoint * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - No such checkpoint. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - No such checkpoint. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode) { @@ -1097,14 +1092,12 @@ int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode) * @cpstat: pointer to a structure of checkpoint statistics * * Description: nilfs_cpfile_get_stat() returns information about checkpoints. + * The checkpoint statistics are stored in the location pointed to by @cpstat. * - * Return Value: On success, 0 is returned, and checkpoints information is - * stored in the place pointed by @cpstat. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat) { @@ -1135,6 +1128,8 @@ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat) * @cpsize: size of a checkpoint entry * @raw_inode: on-disk cpfile inode * @inodep: buffer to store the inode + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_cpfile_read(struct super_block *sb, size_t cpsize, struct nilfs_inode *raw_inode, struct inode **inodep) diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c index e220dcb08aa6..c664daba56ae 100644 --- a/fs/nilfs2/dat.c +++ b/fs/nilfs2/dat.c @@ -276,7 +276,8 @@ void nilfs_dat_abort_update(struct inode *dat, * @dat: DAT file inode * @vblocknr: virtual block number * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EINVAL - Invalid DAT entry (internal code). * * %-EIO - I/O error (including metadata corruption). * * %-ENOMEM - Insufficient memory available. @@ -302,14 +303,11 @@ int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr) * Description: nilfs_dat_freev() frees the virtual block numbers specified by * @vblocknrs and @nitems. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - The virtual block number have not been allocated. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - The virtual block number have not been allocated. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems) { @@ -325,12 +323,10 @@ int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems) * Description: nilfs_dat_move() changes the block number associated with * @vblocknr to @blocknr. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr) { @@ -390,17 +386,14 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr) * @blocknrp: pointer to a block number * * Description: nilfs_dat_translate() maps the virtual block number @vblocknr - * to the corresponding block number. - * - * Return Value: On success, 0 is returned and the block number associated - * with @vblocknr is stored in the place pointed by @blocknrp. On error, one - * of the following negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * to the corresponding block number. The block number associated with + * @vblocknr is stored in the place pointed to by @blocknrp. * - * %-ENOENT - A block number associated with @vblocknr does not exist. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - A block number associated with @vblocknr does not exist. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp) { @@ -489,6 +482,8 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned int visz, * @entry_size: size of a dat entry * @raw_inode: on-disk dat inode * @inodep: buffer to store the inode + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_dat_read(struct super_block *sb, size_t entry_size, struct nilfs_inode *raw_inode, struct inode **inodep) diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index 0a3aea6c416b..9b7f8e9655a2 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c @@ -400,7 +400,7 @@ int nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr, ino_t *ino) return 0; } -void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, +int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, struct folio *folio, struct inode *inode) { size_t from = offset_in_folio(folio, de); @@ -410,11 +410,15 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, folio_lock(folio); err = nilfs_prepare_chunk(folio, from, to); - BUG_ON(err); + if (unlikely(err)) { + folio_unlock(folio); + return err; + } de->inode = cpu_to_le64(inode->i_ino); de->file_type = fs_umode_to_ftype(inode->i_mode); nilfs_commit_chunk(folio, mapping, from, to); inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); + return 0; } /* @@ -543,7 +547,10 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct folio *folio) from = (char *)pde - kaddr; folio_lock(folio); err = nilfs_prepare_chunk(folio, from, to); - BUG_ON(err); + if (unlikely(err)) { + folio_unlock(folio); + goto out; + } if (pde) pde->rec_len = nilfs_rec_len_to_disk(to - from); dir->inode = 0; diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index 2dbb15767df1..561c220799c7 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c @@ -46,14 +46,11 @@ * specified by @pbn to the GC pagecache with the key @blkoff. * This function sets @vbn (@pbn if @vbn is zero) in b_blocknr of the buffer. * - * Return Value: On success, 0 is returned. On Error, one of the following - * negative error code is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - The block specified with @pbn does not exist. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - The block specified with @pbn does not exist. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, sector_t pbn, __u64 vbn, @@ -114,12 +111,11 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, * specified by @vbn to the GC pagecache. @pbn can be supplied by the * caller to avoid translation of the disk block address. * - * Return Value: On success, 0 is returned. On Error, one of the following - * negative error code is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - Invalid virtual block address. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn, __u64 vbn, struct buffer_head **out_bh) diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c index e7339eb3c08a..c4cd4a4dedd0 100644 --- a/fs/nilfs2/ifile.c +++ b/fs/nilfs2/ifile.c @@ -38,17 +38,16 @@ static inline struct nilfs_ifile_info *NILFS_IFILE_I(struct inode *ifile) * @out_ino: pointer to a variable to store inode number * @out_bh: buffer_head contains newly allocated disk inode * - * Return Value: On success, 0 is returned and the newly allocated inode - * number is stored in the place pointed by @ino, and buffer_head pointer - * that contains newly allocated disk inode structure is stored in the - * place pointed by @out_bh - * On error, one of the following negative error codes is returned. + * nilfs_ifile_create_inode() allocates a new inode in the ifile metadata + * file and stores the inode number in the variable pointed to by @out_ino, + * as well as storing the ifile's buffer with the disk inode in the location + * pointed to by @out_bh. * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOSPC - No inode left. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - No inode left. */ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino, struct buffer_head **out_bh) @@ -83,14 +82,11 @@ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino, * @ifile: ifile inode * @ino: inode number * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - The inode number @ino have not been allocated. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - Inode number unallocated. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino) { @@ -150,6 +146,8 @@ int nilfs_ifile_get_inode_block(struct inode *ifile, ino_t ino, * @ifile: ifile inode * @nmaxinodes: current maximum of available inodes count [out] * @nfreeinodes: free inodes count [out] + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_ifile_count_free_inodes(struct inode *ifile, u64 *nmaxinodes, u64 *nfreeinodes) @@ -174,7 +172,8 @@ int nilfs_ifile_count_free_inodes(struct inode *ifile, * @cno: number of checkpoint entry to read * @inode_size: size of an inode * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EINVAL - Invalid checkpoint. * * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error (including metadata corruption). diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 23f3a75edd50..6613b8fcceb0 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -68,6 +68,8 @@ void nilfs_inode_sub_blocks(struct inode *inode, int n) * * This function does not issue actual read request of the specified data * block. It is done by VFS. + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_get_block(struct inode *inode, sector_t blkoff, struct buffer_head *bh_result, int create) @@ -141,6 +143,8 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff, * address_space_operations. * @file: file struct of the file to be read * @folio: the folio to be read + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_read_folio(struct file *file, struct folio *folio) { @@ -598,10 +602,7 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino, * or does nothing if the inode already has it. This function allocates * an additional inode to maintain page cache of B-tree nodes one-on-one. * - * Return Value: On success, 0 is returned. On errors, one of the following - * negative error code is returned. - * - * %-ENOMEM - Insufficient memory available. + * Return: 0 on success, or %-ENOMEM if memory is insufficient. */ int nilfs_attach_btree_node_cache(struct inode *inode) { @@ -660,11 +661,8 @@ void nilfs_detach_btree_node_cache(struct inode *inode) * in one inode and the one for b-tree node pages is set up in the * other inode, which is attached to the former inode. * - * Return Value: On success, a pointer to the inode for data pages is - * returned. On errors, one of the following negative error code is returned - * in a pointer type. - * - * %-ENOMEM - Insufficient memory available. + * Return: a pointer to the inode for data pages on success, or %-ENOMEM + * if memory is insufficient. */ struct inode *nilfs_iget_for_shadow(struct inode *inode) { @@ -1188,7 +1186,7 @@ int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, if (size) { if (phys && blkphy << blkbits == phys + size) { /* The current extent goes on */ - size += n << blkbits; + size += (u64)n << blkbits; } else { /* Terminate the current extent */ ret = fiemap_fill_next_extent( @@ -1201,14 +1199,14 @@ int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, flags = FIEMAP_EXTENT_MERGED; logical = blkoff << blkbits; phys = blkphy << blkbits; - size = n << blkbits; + size = (u64)n << blkbits; } } else { /* Start a new extent */ flags = FIEMAP_EXTENT_MERGED; logical = blkoff << blkbits; phys = blkphy << blkbits; - size = n << blkbits; + size = (u64)n << blkbits; } blkoff += n; } diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index fa77f78df681..a66d62a51f77 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -33,17 +33,14 @@ * @dofunc: concrete function of get/set metadata info * * Description: nilfs_ioctl_wrap_copy() gets/sets metadata info by means of - * calling dofunc() function on the basis of @argv argument. - * - * Return Value: On success, 0 is returned and requested metadata info - * is copied into userspace. On error, one of the following - * negative error codes is returned. - * - * %-EINVAL - Invalid arguments from userspace. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EFAULT - Failure during execution of requested operation. + * calling dofunc() function on the basis of @argv argument. If successful, + * the requested metadata information is copied to userspace memory. + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EFAULT - Failure during execution of requested operation. + * * %-EINVAL - Invalid arguments from userspace. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs, struct nilfs_argv *argv, int dir, @@ -190,13 +187,10 @@ static int nilfs_ioctl_getversion(struct inode *inode, void __user *argp) * given checkpoint between checkpoint and snapshot state. This ioctl * is used in chcp and mkcp utilities. * - * Return Value: On success, 0 is returned and mode of a checkpoint is - * changed. On error, one of the following negative error codes - * is returned. - * - * %-EPERM - Operation not permitted. - * - * %-EFAULT - Failure during checkpoint mode changing. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * %-EFAULT - Failure during checkpoint mode changing. + * %-EPERM - Operation not permitted. */ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) @@ -244,13 +238,10 @@ out: * checkpoint from NILFS2 file system. This ioctl is used in rmcp * utility. * - * Return Value: On success, 0 is returned and a checkpoint is - * removed. On error, one of the following negative error codes - * is returned. - * - * %-EPERM - Operation not permitted. - * - * %-EFAULT - Failure during checkpoint removing. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * %-EFAULT - Failure during checkpoint removing. + * %-EPERM - Operation not permitted. */ static int nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp, @@ -296,7 +287,7 @@ out: * requested checkpoints. The NILFS_IOCTL_GET_CPINFO ioctl is used in * lscp utility and by nilfs_cleanerd daemon. * - * Return value: count of nilfs_cpinfo structures in output buffer. + * Return: Count of nilfs_cpinfo structures in output buffer. */ static ssize_t nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, @@ -320,17 +311,14 @@ nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, * * Description: nilfs_ioctl_get_cpstat() returns information about checkpoints. * The NILFS_IOCTL_GET_CPSTAT ioctl is used by lscp, rmcp utilities - * and by nilfs_cleanerd daemon. + * and by nilfs_cleanerd daemon. The checkpoint statistics are copied to + * the userspace memory pointed to by @argp. * - * Return Value: On success, 0 is returned, and checkpoints information is - * copied into userspace pointer @argp. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EFAULT - Failure during getting checkpoints statistics. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EFAULT - Failure during getting checkpoints statistics. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) @@ -363,7 +351,8 @@ static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp, * info about requested segments. The NILFS_IOCTL_GET_SUINFO ioctl is used * in lssu, nilfs_resize utilities and by nilfs_cleanerd daemon. * - * Return value: count of nilfs_suinfo structures in output buffer. + * Return: Count of nilfs_suinfo structures in output buffer on success, + * or a negative error code on failure. */ static ssize_t nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, @@ -387,17 +376,14 @@ nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, * * Description: nilfs_ioctl_get_sustat() returns segment usage statistics. * The NILFS_IOCTL_GET_SUSTAT ioctl is used in lssu, nilfs_resize utilities - * and by nilfs_cleanerd daemon. - * - * Return Value: On success, 0 is returned, and segment usage information is - * copied into userspace pointer @argp. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * and by nilfs_cleanerd daemon. The requested segment usage information is + * copied to the userspace memory pointed to by @argp. * - * %-EFAULT - Failure during getting segment usage statistics. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EFAULT - Failure during getting segment usage statistics. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) @@ -430,7 +416,8 @@ static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp, * on virtual block addresses. The NILFS_IOCTL_GET_VINFO ioctl is used * by nilfs_cleanerd daemon. * - * Return value: count of nilfs_vinfo structures in output buffer. + * Return: Count of nilfs_vinfo structures in output buffer on success, or + * a negative error code on failure. */ static ssize_t nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, @@ -457,7 +444,8 @@ nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, * about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl * is used by nilfs_cleanerd daemon. * - * Return value: count of nilfs_bdescs structures in output buffer. + * Return: Count of nilfs_bdescs structures in output buffer on success, or + * a negative error code on failure. */ static ssize_t nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags, @@ -494,19 +482,15 @@ nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags, * * Description: nilfs_ioctl_do_get_bdescs() function returns information * about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl - * is used by nilfs_cleanerd daemon. - * - * Return Value: On success, 0 is returned, and disk block descriptors are - * copied into userspace pointer @argp. On error, one of the following - * negative error codes is returned. - * - * %-EINVAL - Invalid arguments from userspace. + * is used by nilfs_cleanerd daemon. If successful, disk block descriptors + * are copied to userspace pointer @argp. * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EFAULT - Failure during getting disk block descriptors. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EFAULT - Failure during getting disk block descriptors. + * * %-EINVAL - Invalid arguments from userspace. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) @@ -540,16 +524,12 @@ static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp, * Description: nilfs_ioctl_move_inode_block() function registers data/node * buffer in the GC pagecache and submit read request. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - Requested block doesn't exist. - * - * %-EEXIST - Blocks conflict is detected. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EEXIST - Block conflict detected. + * * %-EIO - I/O error. + * * %-ENOENT - Requested block doesn't exist. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_move_inode_block(struct inode *inode, struct nilfs_vdesc *vdesc, @@ -604,8 +584,8 @@ static int nilfs_ioctl_move_inode_block(struct inode *inode, * blocks that garbage collector specified with the array of nilfs_vdesc * structures and stores them into page caches of GC inodes. * - * Return Value: Number of processed nilfs_vdesc structures or - * error code, otherwise. + * Return: Number of processed nilfs_vdesc structures on success, or + * a negative error code on failure. */ static int nilfs_ioctl_move_blocks(struct super_block *sb, struct nilfs_argv *argv, void *buf) @@ -682,14 +662,11 @@ static int nilfs_ioctl_move_blocks(struct super_block *sb, * in the period from p_start to p_end, excluding p_end itself. The checkpoints * which have been already deleted are ignored. * - * Return Value: Number of processed nilfs_period structures or - * error code, otherwise. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EINVAL - invalid checkpoints. + * Return: Number of processed nilfs_period structures on success, or one of + * the following negative error codes on failure: + * * %-EINVAL - invalid checkpoints. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs, struct nilfs_argv *argv, void *buf) @@ -717,14 +694,11 @@ static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs, * Description: nilfs_ioctl_free_vblocknrs() function frees * the virtual block numbers specified by @buf and @argv->v_nmembs. * - * Return Value: Number of processed virtual block numbers or - * error code, otherwise. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - The virtual block number have not been allocated. + * Return: Number of processed virtual block numbers on success, or one of the + * following negative error codes on failure: + * * %-EIO - I/O error. + * * %-ENOENT - Unallocated virtual block number. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs, struct nilfs_argv *argv, void *buf) @@ -746,14 +720,11 @@ static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs, * Description: nilfs_ioctl_mark_blocks_dirty() function marks * metadata file or data blocks as dirty. * - * Return Value: Number of processed block descriptors or - * error code, otherwise. - * - * %-ENOMEM - Insufficient memory available. - * - * %-EIO - I/O error - * - * %-ENOENT - the specified block does not exist (hole block) + * Return: Number of processed block descriptors on success, or one of the + * following negative error codes on failure: + * * %-EIO - I/O error. + * * %-ENOENT - Non-existent block (hole block). + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_mark_blocks_dirty(struct the_nilfs *nilfs, struct nilfs_argv *argv, void *buf) @@ -852,7 +823,7 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs, * from userspace. The NILFS_IOCTL_CLEAN_SEGMENTS ioctl is used by * nilfs_cleanerd daemon. * - * Return Value: On success, 0 is returned or error code, otherwise. + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) @@ -976,20 +947,14 @@ out: * and metadata are written out to the device when it successfully * returned. * - * Return Value: On success, 0 is retured. On errors, one of the following - * negative error code is returned. - * - * %-EROFS - Read only filesystem. - * - * %-EIO - I/O error - * - * %-ENOSPC - No space left on device (only in a panic state). - * - * %-ERESTARTSYS - Interrupted. - * - * %-ENOMEM - Insufficient memory available. - * - * %-EFAULT - Failure during execution of requested operation. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EFAULT - Failure during execution of requested operation. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - No space left on device (only in a panic state). + * * %-ERESTARTSYS - Interrupted. + * * %-EROFS - Read only filesystem. */ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) @@ -1023,7 +988,7 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp, * @filp: file object * @argp: pointer on argument from userspace * - * Return Value: On success, 0 is returned or error code, otherwise. + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_ioctl_resize(struct inode *inode, struct file *filp, void __user *argp) @@ -1059,7 +1024,7 @@ out: * checks the arguments from userspace and calls nilfs_sufile_trim_fs, which * performs the actual trim operation. * - * Return Value: On success, 0 is returned or negative error code, otherwise. + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_ioctl_trim_fs(struct inode *inode, void __user *argp) { @@ -1101,7 +1066,7 @@ static int nilfs_ioctl_trim_fs(struct inode *inode, void __user *argp) * of segments in bytes and upper limit of segments in bytes. * The NILFS_IOCTL_SET_ALLOC_RANGE is used by nilfs_resize utility. * - * Return Value: On success, 0 is returned or error code, otherwise. + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp) { @@ -1152,17 +1117,15 @@ out: * @dofunc: concrete function of getting metadata info * * Description: nilfs_ioctl_get_info() gets metadata info by means of - * calling dofunc() function. + * calling dofunc() function. The requested metadata information is copied + * to userspace memory @argp. * - * Return Value: On success, 0 is returned and requested metadata info - * is copied into userspace. On error, one of the following - * negative error codes is returned. - * - * %-EINVAL - Invalid arguments from userspace. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EFAULT - Failure during execution of requested operation. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EFAULT - Failure during execution of requested operation. + * * %-EINVAL - Invalid arguments from userspace. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp, @@ -1202,18 +1165,14 @@ static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp, * encapsulated in nilfs_argv and updates the segment usage info * according to the flags in nilfs_suinfo_update. * - * Return Value: On success, 0 is returned. On error, one of the - * following negative error codes is returned. - * - * %-EPERM - Not enough permissions - * - * %-EFAULT - Error copying input data - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EINVAL - Invalid values in input (segment number, flags or nblocks) + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EEXIST - Block conflict detected. + * * %-EFAULT - Error copying input data. + * * %-EINVAL - Invalid values in input (segment number, flags or nblocks). + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. + * * %-EPERM - Not enough permissions. */ static int nilfs_ioctl_set_suinfo(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) @@ -1309,7 +1268,8 @@ static int nilfs_ioctl_get_fslabel(struct super_block *sb, void __user *argp) * @filp: file object * @argp: pointer to userspace memory that contains the volume name * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EFAULT - Error copying input data. * * %-EINVAL - Label length exceeds record size in superblock. * * %-EIO - I/O error. diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 965b5ad1c0df..2f850a18d6e7 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -226,20 +226,21 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block, * @out_bh: output of a pointer to the buffer_head * * nilfs_mdt_get_block() looks up the specified buffer and tries to create - * a new buffer if @create is not zero. On success, the returned buffer is - * assured to be either existing or formatted using a buffer lock on success. - * @out_bh is substituted only when zero is returned. + * a new buffer if @create is not zero. If (and only if) this function + * succeeds, it stores a pointer to the retrieved buffer head in the location + * pointed to by @out_bh. * - * Return Value: On success, it returns 0. On error, the following negative - * error code is returned. + * The retrieved buffer may be either an existing one or a newly allocated one. + * For a newly created buffer, if the callback function argument @init_block + * is non-NULL, the callback will be called with the buffer locked to format + * the block. * - * %-ENOMEM - Insufficient memory available. - * - * %-EIO - I/O error - * - * %-ENOENT - the specified block does not exist (hole block) - * - * %-EROFS - Read only filesystem (for create mode) + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - The specified block does not exist (hole block). + * * %-ENOMEM - Insufficient memory available. + * * %-EROFS - Read only filesystem (for create mode). */ int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create, void (*init_block)(struct inode *, @@ -275,14 +276,11 @@ int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create, * @out_bh, and block offset to @blkoff, respectively. @out_bh and * @blkoff are substituted only when zero is returned. * - * Return Value: On success, it returns 0. On error, the following negative - * error code is returned. - * - * %-ENOMEM - Insufficient memory available. - * - * %-EIO - I/O error - * - * %-ENOENT - no block was found in the range + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - No block was found in the range. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_mdt_find_block(struct inode *inode, unsigned long start, unsigned long end, unsigned long *blkoff, @@ -321,12 +319,11 @@ out: * @inode: inode of the meta data file * @block: block offset * - * Return Value: On success, zero is returned. - * On error, one of the following negative error code is returned. - * - * %-ENOMEM - Insufficient memory available. - * - * %-EIO - I/O error + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - Non-existent block. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_mdt_delete_block(struct inode *inode, unsigned long block) { @@ -349,12 +346,10 @@ int nilfs_mdt_delete_block(struct inode *inode, unsigned long block) * nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and * tries to release the page including the buffer from a page cache. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error code is returned. - * - * %-EBUSY - page has an active buffer. - * - * %-ENOENT - page cache has no page addressed by the offset. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EBUSY - Page has an active buffer. + * * %-ENOENT - Page cache has no page addressed by the offset. */ int nilfs_mdt_forget_block(struct inode *inode, unsigned long block) { @@ -524,6 +519,8 @@ void nilfs_mdt_set_entry_size(struct inode *inode, unsigned int entry_size, * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file * @inode: inode of the metadata file * @shadow: shadow mapping + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_mdt_setup_shadow_map(struct inode *inode, struct nilfs_shadow_map *shadow) @@ -545,6 +542,8 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode, /** * nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map * @inode: inode of the metadata file + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_mdt_save_to_shadow_map(struct inode *inode) { diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 1d836a5540f3..953fbd5f0851 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -370,6 +370,7 @@ static int nilfs_rename(struct mnt_idmap *idmap, struct folio *old_folio; struct nilfs_dir_entry *old_de; struct nilfs_transaction_info ti; + bool old_is_dir = S_ISDIR(old_inode->i_mode); int err; if (flags & ~RENAME_NOREPLACE) @@ -385,7 +386,7 @@ static int nilfs_rename(struct mnt_idmap *idmap, goto out; } - if (S_ISDIR(old_inode->i_mode)) { + if (old_is_dir && old_dir != new_dir) { err = -EIO; dir_de = nilfs_dotdot(old_inode, &dir_folio); if (!dir_de) @@ -397,7 +398,7 @@ static int nilfs_rename(struct mnt_idmap *idmap, struct nilfs_dir_entry *new_de; err = -ENOTEMPTY; - if (dir_de && !nilfs_empty_dir(new_inode)) + if (old_is_dir && !nilfs_empty_dir(new_inode)) goto out_dir; new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, @@ -406,11 +407,13 @@ static int nilfs_rename(struct mnt_idmap *idmap, err = PTR_ERR(new_de); goto out_dir; } - nilfs_set_link(new_dir, new_de, new_folio, old_inode); + err = nilfs_set_link(new_dir, new_de, new_folio, old_inode); folio_release_kmap(new_folio, new_de); + if (unlikely(err)) + goto out_dir; nilfs_mark_inode_dirty(new_dir); inode_set_ctime_current(new_inode); - if (dir_de) + if (old_is_dir) drop_nlink(new_inode); drop_nlink(new_inode); nilfs_mark_inode_dirty(new_inode); @@ -418,7 +421,7 @@ static int nilfs_rename(struct mnt_idmap *idmap, err = nilfs_add_link(new_dentry, old_inode); if (err) goto out_dir; - if (dir_de) { + if (old_is_dir) { inc_nlink(new_dir); nilfs_mark_inode_dirty(new_dir); } @@ -430,28 +433,28 @@ static int nilfs_rename(struct mnt_idmap *idmap, */ inode_set_ctime_current(old_inode); - nilfs_delete_entry(old_de, old_folio); - - if (dir_de) { - nilfs_set_link(old_inode, dir_de, dir_folio, new_dir); - folio_release_kmap(dir_folio, dir_de); - drop_nlink(old_dir); + err = nilfs_delete_entry(old_de, old_folio); + if (likely(!err)) { + if (old_is_dir) { + if (old_dir != new_dir) + err = nilfs_set_link(old_inode, dir_de, + dir_folio, new_dir); + drop_nlink(old_dir); + } + nilfs_mark_inode_dirty(old_dir); } - folio_release_kmap(old_folio, old_de); - - nilfs_mark_inode_dirty(old_dir); nilfs_mark_inode_dirty(old_inode); - err = nilfs_transaction_commit(old_dir->i_sb); - return err; - out_dir: if (dir_de) folio_release_kmap(dir_folio, dir_de); out_old: folio_release_kmap(old_folio, old_de); out: - nilfs_transaction_abort(old_dir->i_sb); + if (likely(!err)) + err = nilfs_transaction_commit(old_dir->i_sb); + else + nilfs_transaction_abort(old_dir->i_sb); return err; } diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index dff241c53fc5..cb6ed54accd7 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -261,8 +261,8 @@ struct nilfs_dir_entry *nilfs_find_entry(struct inode *, const struct qstr *, int nilfs_delete_entry(struct nilfs_dir_entry *, struct folio *); int nilfs_empty_dir(struct inode *); struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct folio **); -void nilfs_set_link(struct inode *, struct nilfs_dir_entry *, - struct folio *, struct inode *); +int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, + struct folio *folio, struct inode *inode); /* file.c */ extern int nilfs_sync_file(struct file *, loff_t, loff_t, int); diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 9de2a494a069..806b056d2260 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -135,8 +135,7 @@ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh) * nilfs_folio_buffers_clean - Check if a folio has dirty buffers or not. * @folio: Folio to be checked. * - * nilfs_folio_buffers_clean() returns false if the folio has dirty buffers. - * Otherwise, it returns true. + * Return: false if the folio has dirty buffers, true otherwise. */ bool nilfs_folio_buffers_clean(struct folio *folio) { @@ -392,6 +391,11 @@ void nilfs_clear_dirty_pages(struct address_space *mapping) /** * nilfs_clear_folio_dirty - discard dirty folio * @folio: dirty folio that will be discarded + * + * nilfs_clear_folio_dirty() clears working states including dirty state for + * the folio and its buffers. If the folio has buffers, clear only if it is + * confirmed that none of the buffer heads are busy (none have valid + * references and none are locked). */ void nilfs_clear_folio_dirty(struct folio *folio) { @@ -399,10 +403,6 @@ void nilfs_clear_folio_dirty(struct folio *folio) BUG_ON(!folio_test_locked(folio)); - folio_clear_uptodate(folio); - folio_clear_mappedtodisk(folio); - folio_clear_checked(folio); - head = folio_buffers(folio); if (head) { const unsigned long clear_bits = @@ -410,6 +410,25 @@ void nilfs_clear_folio_dirty(struct folio *folio) BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected) | BIT(BH_Delay)); + bool busy, invalidated = false; + +recheck_buffers: + busy = false; + bh = head; + do { + if (atomic_read(&bh->b_count) | buffer_locked(bh)) { + busy = true; + break; + } + } while (bh = bh->b_this_page, bh != head); + + if (busy) { + if (invalidated) + return; + invalidate_bh_lrus(); + invalidated = true; + goto recheck_buffers; + } bh = head; do { @@ -419,6 +438,9 @@ void nilfs_clear_folio_dirty(struct folio *folio) } while (bh = bh->b_this_page, bh != head); } + folio_clear_uptodate(folio); + folio_clear_mappedtodisk(folio); + folio_clear_checked(folio); __nilfs_clear_folio_dirty(folio); } @@ -477,8 +499,9 @@ void __nilfs_clear_folio_dirty(struct folio *folio) * This function searches an extent of buffers marked "delayed" which * starts from a block offset equal to or larger than @start_blk. If * such an extent was found, this will store the start offset in - * @blkoff and return its length in blocks. Otherwise, zero is - * returned. + * @blkoff and return its length in blocks. + * + * Return: Length in blocks of found extent, 0 otherwise. */ unsigned long nilfs_find_uncommitted_extent(struct inode *inode, sector_t start_blk, diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index e43405bf521e..22aecf6e2344 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -88,6 +88,8 @@ static int nilfs_warn_segment_error(struct super_block *sb, int err) * @check_bytes: number of bytes to be checked * @start: DBN of start block * @nblock: number of blocks to be checked + * + * Return: 0 on success, or %-EIO if an I/O error occurs. */ static int nilfs_compute_checksum(struct the_nilfs *nilfs, struct buffer_head *bhs, u32 *sum, @@ -126,6 +128,11 @@ static int nilfs_compute_checksum(struct the_nilfs *nilfs, * @sr_block: disk block number of the super root block * @pbh: address of a buffer_head pointer to return super root buffer * @check: CRC check flag + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - Super root block corrupted. + * * %-EIO - I/O error. */ int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block, struct buffer_head **pbh, int check) @@ -176,6 +183,8 @@ int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block, * @nilfs: nilfs object * @start_blocknr: start block number of the log * @sum: pointer to return segment summary structure + * + * Return: Buffer head pointer, or NULL if an I/O error occurs. */ static struct buffer_head * nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr, @@ -195,6 +204,13 @@ nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr, * @seg_seq: sequence number of segment * @bh_sum: buffer head of summary block * @sum: segment summary struct + * + * Return: 0 on success, or one of the following internal codes on failure: + * * %NILFS_SEG_FAIL_MAGIC - Magic number mismatch. + * * %NILFS_SEG_FAIL_SEQ - Sequence number mismatch. + * * %NIFLS_SEG_FAIL_CONSISTENCY - Block count out of range. + * * %NILFS_SEG_FAIL_IO - I/O error. + * * %NILFS_SEG_FAIL_CHECKSUM_FULL - Full log checksum verification failed. */ static int nilfs_validate_log(struct the_nilfs *nilfs, u64 seg_seq, struct buffer_head *bh_sum, @@ -238,6 +254,9 @@ out: * @pbh: the current buffer head on summary blocks [in, out] * @offset: the current byte offset on summary blocks [in, out] * @bytes: byte size of the item to be read + * + * Return: Kernel space address of current segment summary entry, or + * NULL if an I/O error occurs. */ static void *nilfs_read_summary_info(struct the_nilfs *nilfs, struct buffer_head **pbh, @@ -300,6 +319,11 @@ static void nilfs_skip_summary_info(struct the_nilfs *nilfs, * @start_blocknr: start block number of the log * @sum: log summary information * @head: list head to add nilfs_recovery_block struct + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr, struct nilfs_segment_summary *sum, @@ -571,6 +595,12 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs, * @sb: super block instance * @root: NILFS root instance * @ri: pointer to a nilfs_recovery_info + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - Log format error. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, struct super_block *sb, @@ -754,18 +784,13 @@ static void nilfs_abort_roll_forward(struct the_nilfs *nilfs) * @sb: super block instance * @ri: pointer to a nilfs_recovery_info struct to store search results. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error code is returned. - * - * %-EINVAL - Inconsistent filesystem state. - * - * %-EIO - I/O error - * - * %-ENOSPC - No space left on device (only in a panic state). - * - * %-ERESTARTSYS - Interrupted. - * - * %-ENOMEM - Insufficient memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - Inconsistent filesystem state. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - No space left on device (only in a panic state). + * * %-ERESTARTSYS - Interrupted. */ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs, struct super_block *sb, @@ -830,14 +855,11 @@ failed: * segment pointed by the superblock. It sets up struct the_nilfs through * this search. It fills nilfs_recovery_info (ri) required for recovery. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error code is returned. - * - * %-EINVAL - No valid segment found - * - * %-EIO - I/O error - * - * %-ENOMEM - Insufficient memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - No valid segment found. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_recovery_info *ri) diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index e08cab03366b..a8bdf3d318ea 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c @@ -406,12 +406,7 @@ static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf, * @segbuf: buffer storing a log to be written * @nilfs: nilfs object * - * Return Value: On Success, 0 is returned. On Error, one of the following - * negative error code is returned. - * - * %-EIO - I/O error - * - * %-ENOMEM - Insufficient memory available. + * Return: Always 0. */ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, struct the_nilfs *nilfs) @@ -452,10 +447,7 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, * nilfs_segbuf_wait - wait for completion of requested BIOs * @segbuf: segment buffer * - * Return Value: On Success, 0 is returned. On Error, one of the following - * negative error code is returned. - * - * %-EIO - I/O error + * Return: 0 on success, or %-EIO if I/O error is detected. */ static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf) { diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 587251830897..3a202e51b360 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -191,12 +191,10 @@ static int nilfs_prepare_segment_lock(struct super_block *sb, * When @vacancy_check flag is set, this function will check the amount of * free space, and will wait for the GC to reclaim disk space if low capacity. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error code is returned. - * - * %-ENOMEM - Insufficient memory available. - * - * %-ENOSPC - No space left on device + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - No space left on device (if checking free space). */ int nilfs_transaction_begin(struct super_block *sb, struct nilfs_transaction_info *ti, @@ -252,6 +250,8 @@ int nilfs_transaction_begin(struct super_block *sb, * nilfs_transaction_commit() sets a timer to start the segment * constructor. If a sync flag is set, it starts construction * directly. + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_transaction_commit(struct super_block *sb) { @@ -407,6 +407,8 @@ static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci, /** * nilfs_segctor_reset_segment_buffer - reset the current segment buffer * @sci: nilfs_sc_info + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci) { @@ -734,7 +736,6 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, if (!head) head = create_empty_buffers(folio, i_blocksize(inode), 0); - folio_unlock(folio); bh = head; do { @@ -744,11 +745,14 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, list_add_tail(&bh->b_assoc_buffers, listp); ndirties++; if (unlikely(ndirties >= nlimit)) { + folio_unlock(folio); folio_batch_release(&fbatch); cond_resched(); return ndirties; } } while (bh = bh->b_this_page, bh != head); + + folio_unlock(folio); } folio_batch_release(&fbatch); cond_resched(); @@ -1118,7 +1122,8 @@ static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci, * a super root block containing this sufile change is complete, and it can * be canceled with nilfs_sufile_cancel_freev() until then. * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EINVAL - Invalid segment number. * * %-EIO - I/O error (including metadata corruption). * * %-ENOMEM - Insufficient memory available. @@ -1315,6 +1320,8 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) * nilfs_segctor_begin_construction - setup segment buffer to make a new log * @sci: nilfs_sc_info * @nilfs: nilfs object + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci, struct the_nilfs *nilfs) @@ -2312,18 +2319,13 @@ static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err, bool force) * nilfs_construct_segment - construct a logical segment * @sb: super block * - * Return Value: On success, 0 is returned. On errors, one of the following - * negative error code is returned. - * - * %-EROFS - Read only filesystem. - * - * %-EIO - I/O error - * - * %-ENOSPC - No space left on device (only in a panic state). - * - * %-ERESTARTSYS - Interrupted. - * - * %-ENOMEM - Insufficient memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - No space left on device (only in a panic state). + * * %-ERESTARTSYS - Interrupted. + * * %-EROFS - Read only filesystem. */ int nilfs_construct_segment(struct super_block *sb) { @@ -2347,18 +2349,13 @@ int nilfs_construct_segment(struct super_block *sb) * @start: start byte offset * @end: end byte offset (inclusive) * - * Return Value: On success, 0 is returned. On errors, one of the following - * negative error code is returned. - * - * %-EROFS - Read only filesystem. - * - * %-EIO - I/O error - * - * %-ENOSPC - No space left on device (only in a panic state). - * - * %-ERESTARTSYS - Interrupted. - * - * %-ENOMEM - Insufficient memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - No space left on device (only in a panic state). + * * %-ERESTARTSYS - Interrupted. + * * %-EROFS - Read only filesystem. */ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode, loff_t start, loff_t end) @@ -2464,6 +2461,8 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err) * nilfs_segctor_construct - form logs and write them to disk * @sci: segment constructor object * @mode: mode of log forming + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode) { @@ -2836,7 +2835,8 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) * This allocates a log writer object, initializes it, and starts the * log writer. * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EINTR - Log writer thread creation failed due to interruption. * * %-ENOMEM - Insufficient memory available. */ diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c index d3ecc813d633..330f269abedf 100644 --- a/fs/nilfs2/sufile.c +++ b/fs/nilfs2/sufile.c @@ -133,6 +133,8 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, /** * nilfs_sufile_get_ncleansegs - return the number of clean segments * @sufile: inode of segment usage file + * + * Return: Number of clean segments. */ unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile) { @@ -155,17 +157,13 @@ unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile) * of successfully modified segments from the head is stored in the * place @ndone points to. * - * Return Value: On success, zero is returned. On error, one of the - * following negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - Given segment usage is in hole block (may be returned if - * @create is zero) - * - * %-EINVAL - Invalid segment usage number + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - Invalid segment usage number + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - Given segment usage is in hole block (may be returned if + * @create is zero) + * * %-ENOMEM - Insufficient memory available. */ int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs, int create, size_t *ndone, @@ -272,10 +270,7 @@ int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create, * @start: minimum segment number of allocatable region (inclusive) * @end: maximum segment number of allocatable region (inclusive) * - * Return Value: On success, 0 is returned. On error, one of the - * following negative error codes is returned. - * - * %-ERANGE - invalid segment region + * Return: 0 on success, or %-ERANGE if segment range is invalid. */ int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end) { @@ -300,17 +295,14 @@ int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end) * @sufile: inode of segment usage file * @segnump: pointer to segment number * - * Description: nilfs_sufile_alloc() allocates a clean segment. - * - * Return Value: On success, 0 is returned and the segment number of the - * allocated segment is stored in the place pointed by @segnump. On error, one - * of the following negative error codes is returned. - * - * %-EIO - I/O error. + * Description: nilfs_sufile_alloc() allocates a clean segment, and stores + * its segment number in the place pointed to by @segnump. * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOSPC - No clean segment left. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - No clean segment left. */ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) { @@ -510,6 +502,8 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty * @sufile: inode of segment usage file * @segnum: segment number + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum) { @@ -569,6 +563,8 @@ out_sem: * @segnum: segment number * @nblocks: number of live blocks in the segment * @modtime: modification time (option) + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, unsigned long nblocks, time64_t modtime) @@ -610,16 +606,13 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, * @sufile: inode of segment usage file * @sustat: pointer to a structure of segment usage statistics * - * Description: nilfs_sufile_get_stat() returns information about segment - * usage. + * Description: nilfs_sufile_get_stat() retrieves segment usage statistics + * and stores them in the location pointed to by @sustat. * - * Return Value: On success, 0 is returned, and segment usage information is - * stored in the place pointed by @sustat. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) { @@ -683,16 +676,12 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, * @start: start segment number (inclusive) * @end: end segment number (inclusive) * - * Return Value: On success, 0 is returned. On error, one of the - * following negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EINVAL - Invalid number of segments specified - * - * %-EBUSY - Dirty or active segments are present in the range + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EBUSY - Dirty or active segments are present in the range. + * * %-EINVAL - Invalid number of segments specified. + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_sufile_truncate_range(struct inode *sufile, __u64 start, __u64 end) @@ -787,16 +776,12 @@ out: * @sufile: inode of segment usage file * @newnsegs: new number of segments * - * Return Value: On success, 0 is returned. On error, one of the - * following negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOSPC - Enough free space is not left for shrinking - * - * %-EBUSY - Dirty or active segments exist in the region to be truncated + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EBUSY - Dirty or active segments exist in the region to be truncated. + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - Enough free space is not left for shrinking. */ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs) { @@ -865,7 +850,7 @@ out: * @nsi: size of suinfo array * * Return: Count of segment usage info items stored in the output buffer on - * success, or the following negative error code on failure. + * success, or one of the following negative error codes on failure: * * %-EIO - I/O error (including metadata corruption). * * %-ENOMEM - Insufficient memory available. */ @@ -939,14 +924,11 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, * segment usage accordingly. Only the fields indicated by the sup_flags * are updated. * - * Return Value: On success, 0 is returned. On error, one of the - * following negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EINVAL - Invalid values in input (segment number, flags or nblocks) + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - Invalid values in input (segment number, flags or nblocks). + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf, unsigned int supsz, size_t nsup) @@ -1073,7 +1055,7 @@ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf, * and start+len is rounded down. For each clean segment blkdev_issue_discard * function is invoked. * - * Return Value: On success, 0 is returned or negative error code, otherwise. + * Return: 0 on success, or a negative error code on failure. */ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range) { @@ -1219,6 +1201,8 @@ out_sem: * @susize: size of a segment usage entry * @raw_inode: on-disk sufile inode * @inodep: buffer to store the inode + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_sufile_read(struct super_block *sb, size_t susize, struct nilfs_inode *raw_inode, struct inode **inodep) diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h index 8e8a1a5a0402..cd6f28ab3521 100644 --- a/fs/nilfs2/sufile.h +++ b/fs/nilfs2/sufile.h @@ -58,6 +58,8 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range); * nilfs_sufile_scrap - make a segment garbage * @sufile: inode of segment usage file * @segnum: segment number to be freed + * + * Return: 0 on success, or a negative error code on failure. */ static inline int nilfs_sufile_scrap(struct inode *sufile, __u64 segnum) { @@ -68,6 +70,8 @@ static inline int nilfs_sufile_scrap(struct inode *sufile, __u64 segnum) * nilfs_sufile_free - free segment * @sufile: inode of segment usage file * @segnum: segment number to be freed + * + * Return: 0 on success, or a negative error code on failure. */ static inline int nilfs_sufile_free(struct inode *sufile, __u64 segnum) { @@ -80,6 +84,8 @@ static inline int nilfs_sufile_free(struct inode *sufile, __u64 segnum) * @segnumv: array of segment numbers * @nsegs: size of @segnumv array * @ndone: place to store the number of freed segments + * + * Return: 0 on success, or a negative error code on failure. */ static inline int nilfs_sufile_freev(struct inode *sufile, __u64 *segnumv, size_t nsegs, size_t *ndone) @@ -95,8 +101,7 @@ static inline int nilfs_sufile_freev(struct inode *sufile, __u64 *segnumv, * @nsegs: size of @segnumv array * @ndone: place to store the number of cancelled segments * - * Return Value: On success, 0 is returned. On error, a negative error codes - * is returned. + * Return: 0 on success, or a negative error code on failure. */ static inline int nilfs_sufile_cancel_freev(struct inode *sufile, __u64 *segnumv, size_t nsegs, @@ -114,14 +119,11 @@ static inline int nilfs_sufile_cancel_freev(struct inode *sufile, * Description: nilfs_sufile_set_error() marks the segment specified by * @segnum as erroneous. The error segment will never be used again. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EINVAL - Invalid segment usage number. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - Invalid segment usage number. + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ static inline int nilfs_sufile_set_error(struct inode *sufile, __u64 segnum) { diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index eca79cca3803..badc2cbc895e 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -309,6 +309,8 @@ int nilfs_commit_super(struct super_block *sb, int flag) * This function restores state flags in the on-disk super block. * This will set "clean" flag (i.e. NILFS_VALID_FS) unless the * filesystem was not clean previously. + * + * Return: 0 on success, %-EIO if I/O error or superblock is corrupted. */ int nilfs_cleanup_super(struct super_block *sb) { @@ -339,6 +341,8 @@ int nilfs_cleanup_super(struct super_block *sb) * nilfs_move_2nd_super - relocate secondary super block * @sb: super block instance * @sb2off: new offset of the secondary super block (in bytes) + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off) { @@ -420,6 +424,8 @@ out: * nilfs_resize_fs - resize the filesystem * @sb: super block instance * @newsize: new size of the filesystem (in bytes) + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_resize_fs(struct super_block *sb, __u64 newsize) { @@ -987,7 +993,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno, * nilfs_tree_is_busy() - try to shrink dentries of a checkpoint * @root_dentry: root dentry of the tree to be shrunk * - * This function returns true if the tree was in-use. + * Return: true if the tree was in-use, false otherwise. */ static bool nilfs_tree_is_busy(struct dentry *root_dentry) { @@ -1033,6 +1039,8 @@ int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno) * * This function is called exclusively by nilfs->ns_mount_mutex. * So, the recovery process is protected from other simultaneous mounts. + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_fill_super(struct super_block *sb, struct fs_context *fc) diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index ac03fd3c330c..cb01ea81724d 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -49,8 +49,8 @@ void nilfs_set_last_segment(struct the_nilfs *nilfs, * alloc_nilfs - allocate a nilfs object * @sb: super block instance * - * Return Value: On success, pointer to the_nilfs is returned. - * On error, NULL is returned. + * Return: a pointer to the allocated nilfs object on success, or NULL on + * failure. */ struct the_nilfs *alloc_nilfs(struct super_block *sb) { @@ -165,6 +165,9 @@ static void nilfs_clear_recovery_info(struct nilfs_recovery_info *ri) * containing a super root from a given super block, and initializes * relevant information on the nilfs object preparatory for log * scanning and recovery. + * + * Return: 0 on success, or %-EINVAL if current segment number is out + * of range. */ static int nilfs_store_log_cursor(struct the_nilfs *nilfs, struct nilfs_super_block *sbp) @@ -200,8 +203,7 @@ static int nilfs_store_log_cursor(struct the_nilfs *nilfs, * exponent information written in @sbp and stores it in @blocksize, * or aborts with an error message if it's too large. * - * Return Value: On success, 0 is returned. If the block size is too - * large, -EINVAL is returned. + * Return: 0 on success, or %-EINVAL if the block size is too large. */ static int nilfs_get_blocksize(struct super_block *sb, struct nilfs_super_block *sbp, int *blocksize) @@ -226,6 +228,13 @@ static int nilfs_get_blocksize(struct super_block *sb, * load_nilfs() searches and load the latest super root, * attaches the last segment, and does recovery if needed. * The caller must call this exclusively for simultaneous mounts. + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - No valid segment found. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. + * * %-EROFS - Read only device or RO compat mode (if recovery is required) */ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) { @@ -395,6 +404,8 @@ static unsigned long long nilfs_max_size(unsigned int blkbits) * nilfs_nrsvsegs - calculate the number of reserved segments * @nilfs: nilfs object * @nsegs: total number of segments + * + * Return: Number of reserved segments. */ unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs) { @@ -406,6 +417,8 @@ unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs) /** * nilfs_max_segment_count - calculate the maximum number of segments * @nilfs: nilfs object + * + * Return: Maximum number of segments */ static u64 nilfs_max_segment_count(struct the_nilfs *nilfs) { @@ -538,7 +551,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp) * area, or if the parameters themselves are not normal, it is * determined to be invalid. * - * Return Value: true if invalid, false if valid. + * Return: true if invalid, false if valid. */ static bool nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset) { @@ -684,8 +697,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs, * reading the super block, getting disk layout information, initializing * shared fields in the_nilfs). * - * Return Value: On success, 0 is returned. On error, a negative error - * code is returned. + * Return: 0 on success, or a negative error code on failure. */ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb) { diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c index 6004dfdfdf0f..c4cdaf5fa7ed 100644 --- a/fs/notify/dnotify/dnotify.c +++ b/fs/notify/dnotify/dnotify.c @@ -20,7 +20,7 @@ static int dir_notify_enable __read_mostly = 1; #ifdef CONFIG_SYSCTL -static struct ctl_table dnotify_sysctls[] = { +static const struct ctl_table dnotify_sysctls[] = { { .procname = "dir-notify-enable", .data = &dir_notify_enable, diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 24c7c5df4998..95646f7c46ca 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -223,7 +223,7 @@ static int fanotify_get_response(struct fsnotify_group *group, struct fanotify_perm_event *event, struct fsnotify_iter_info *iter_info) { - int ret; + int ret, errno; pr_debug("%s: group=%p event=%p\n", __func__, group, event); @@ -262,14 +262,23 @@ static int fanotify_get_response(struct fsnotify_group *group, ret = 0; break; case FAN_DENY: + /* Check custom errno from pre-content events */ + errno = fanotify_get_response_errno(event->response); + if (errno) { + ret = -errno; + break; + } + fallthrough; default: ret = -EPERM; } /* Check if the response should be audited */ - if (event->response & FAN_AUDIT) - audit_fanotify(event->response & ~FAN_AUDIT, - &event->audit_rule); + if (event->response & FAN_AUDIT) { + u32 response = event->response & + (FANOTIFY_RESPONSE_ACCESS | FANOTIFY_RESPONSE_FLAGS); + audit_fanotify(response & ~FAN_AUDIT, &event->audit_rule); + } pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__, group, event, ret); @@ -548,9 +557,13 @@ static struct fanotify_event *fanotify_alloc_path_event(const struct path *path, return &pevent->fae; } -static struct fanotify_event *fanotify_alloc_perm_event(const struct path *path, +static struct fanotify_event *fanotify_alloc_perm_event(const void *data, + int data_type, gfp_t gfp) { + const struct path *path = fsnotify_data_path(data, data_type); + const struct file_range *range = + fsnotify_data_file_range(data, data_type); struct fanotify_perm_event *pevent; pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp); @@ -564,6 +577,9 @@ static struct fanotify_event *fanotify_alloc_perm_event(const struct path *path, pevent->hdr.len = 0; pevent->state = FAN_EVENT_INIT; pevent->path = *path; + /* NULL ppos means no range info */ + pevent->ppos = range ? &range->pos : NULL; + pevent->count = range ? range->count : 0; path_get(path); return &pevent->fae; @@ -801,7 +817,7 @@ static struct fanotify_event *fanotify_alloc_event( old_memcg = set_active_memcg(group->memcg); if (fanotify_is_perm_event(mask)) { - event = fanotify_alloc_perm_event(path, gfp); + event = fanotify_alloc_perm_event(data, data_type, gfp); } else if (fanotify_is_error_event(mask)) { event = fanotify_alloc_error_event(group, fsid, data, data_type, &hash); @@ -909,8 +925,9 @@ static int fanotify_handle_event(struct fsnotify_group *group, u32 mask, BUILD_BUG_ON(FAN_OPEN_EXEC_PERM != FS_OPEN_EXEC_PERM); BUILD_BUG_ON(FAN_FS_ERROR != FS_ERROR); BUILD_BUG_ON(FAN_RENAME != FS_RENAME); + BUILD_BUG_ON(FAN_PRE_ACCESS != FS_PRE_ACCESS); - BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 21); + BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 22); mask = fanotify_group_event_mask(group, iter_info, &match_mask, mask, data, data_type, dir); diff --git a/fs/notify/fanotify/fanotify.h b/fs/notify/fanotify/fanotify.h index e5ab33cae6a7..c12cbc270539 100644 --- a/fs/notify/fanotify/fanotify.h +++ b/fs/notify/fanotify/fanotify.h @@ -425,6 +425,8 @@ FANOTIFY_PE(struct fanotify_event *event) struct fanotify_perm_event { struct fanotify_event fae; struct path path; + const loff_t *ppos; /* optional file range info */ + size_t count; u32 response; /* userspace answer to the event */ unsigned short state; /* state of the event */ int fd; /* fd we passed to userspace for this event */ @@ -446,6 +448,14 @@ static inline bool fanotify_is_perm_event(u32 mask) mask & FANOTIFY_PERM_EVENTS; } +static inline bool fanotify_event_has_access_range(struct fanotify_event *event) +{ + if (!(event->mask & FANOTIFY_PRE_CONTENT_EVENTS)) + return false; + + return FANOTIFY_PERM(event)->ppos; +} + static inline struct fanotify_event *FANOTIFY_E(struct fsnotify_event *fse) { return container_of(fse, struct fanotify_event, fse); @@ -518,3 +528,8 @@ static inline unsigned int fanotify_mark_user_flags(struct fsnotify_mark *mark) return mflags; } + +static inline u32 fanotify_get_response_errno(int res) +{ + return (res >> FAN_ERRNO_SHIFT) & FAN_ERRNO_MASK; +} diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 2d85c71717d6..ba3e2d09eb44 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -58,7 +58,7 @@ static int fanotify_max_queued_events __read_mostly; static long ft_zero = 0; static long ft_int_max = INT_MAX; -static struct ctl_table fanotify_table[] = { +static const struct ctl_table fanotify_table[] = { { .procname = "max_user_groups", .data = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS], @@ -100,8 +100,7 @@ static void __init fanotify_sysctls_init(void) * * Internal and external open flags are stored together in field f_flags of * struct file. Only external open flags shall be allowed in event_f_flags. - * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be - * excluded. + * Internal flags like FMODE_EXEC shall be excluded. */ #define FANOTIFY_INIT_ALL_EVENT_F_BITS ( \ O_ACCMODE | O_APPEND | O_NONBLOCK | \ @@ -118,10 +117,12 @@ struct kmem_cache *fanotify_perm_event_cachep __ro_after_init; #define FANOTIFY_EVENT_ALIGN 4 #define FANOTIFY_FID_INFO_HDR_LEN \ (sizeof(struct fanotify_event_info_fid) + sizeof(struct file_handle)) -#define FANOTIFY_PIDFD_INFO_HDR_LEN \ +#define FANOTIFY_PIDFD_INFO_LEN \ sizeof(struct fanotify_event_info_pidfd) #define FANOTIFY_ERROR_INFO_LEN \ (sizeof(struct fanotify_event_info_error)) +#define FANOTIFY_RANGE_INFO_LEN \ + (sizeof(struct fanotify_event_info_range)) static int fanotify_fid_info_len(int fh_len, int name_len) { @@ -159,9 +160,6 @@ static size_t fanotify_event_len(unsigned int info_mode, int fh_len; int dot_len = 0; - if (!info_mode) - return event_len; - if (fanotify_is_error_event(event->mask)) event_len += FANOTIFY_ERROR_INFO_LEN; @@ -176,14 +174,17 @@ static size_t fanotify_event_len(unsigned int info_mode, dot_len = 1; } - if (info_mode & FAN_REPORT_PIDFD) - event_len += FANOTIFY_PIDFD_INFO_HDR_LEN; - if (fanotify_event_has_object_fh(event)) { fh_len = fanotify_event_object_fh_len(event); event_len += fanotify_fid_info_len(fh_len, dot_len); } + if (info_mode & FAN_REPORT_PIDFD) + event_len += FANOTIFY_PIDFD_INFO_LEN; + + if (fanotify_event_has_access_range(event)) + event_len += FANOTIFY_RANGE_INFO_LEN; + return event_len; } @@ -258,12 +259,11 @@ static int create_fd(struct fsnotify_group *group, const struct path *path, return client_fd; /* - * we need a new file handle for the userspace program so it can read even if it was - * originally opened O_WRONLY. + * We provide an fd for the userspace program, so it could access the + * file without generating fanotify events itself. */ - new_file = dentry_open(path, - group->fanotify_data.f_flags | __FMODE_NONOTIFY, - current_cred()); + new_file = dentry_open_nonotify(path, group->fanotify_data.f_flags, + current_cred()); if (IS_ERR(new_file)) { put_unused_fd(client_fd); client_fd = PTR_ERR(new_file); @@ -327,11 +327,12 @@ static int process_access_response(struct fsnotify_group *group, struct fanotify_perm_event *event; int fd = response_struct->fd; u32 response = response_struct->response; + int errno = fanotify_get_response_errno(response); int ret = info_len; struct fanotify_response_info_audit_rule friar; - pr_debug("%s: group=%p fd=%d response=%u buf=%p size=%zu\n", __func__, - group, fd, response, info, info_len); + pr_debug("%s: group=%p fd=%d response=%x errno=%d buf=%p size=%zu\n", + __func__, group, fd, response, errno, info, info_len); /* * make sure the response is valid, if invalid we do nothing and either * userspace can send a valid response or we will clean it up after the @@ -342,7 +343,31 @@ static int process_access_response(struct fsnotify_group *group, switch (response & FANOTIFY_RESPONSE_ACCESS) { case FAN_ALLOW: + if (errno) + return -EINVAL; + break; case FAN_DENY: + /* Custom errno is supported only for pre-content groups */ + if (errno && group->priority != FSNOTIFY_PRIO_PRE_CONTENT) + return -EINVAL; + + /* + * Limit errno to values expected on open(2)/read(2)/write(2) + * of regular files. + */ + switch (errno) { + case 0: + case EIO: + case EPERM: + case EBUSY: + case ETXTBSY: + case EAGAIN: + case ENOSPC: + case EDQUOT: + break; + default: + return -EINVAL; + } break; default: return -EINVAL; @@ -506,7 +531,7 @@ static int copy_pidfd_info_to_user(int pidfd, size_t count) { struct fanotify_event_info_pidfd info = { }; - size_t info_len = FANOTIFY_PIDFD_INFO_HDR_LEN; + size_t info_len = FANOTIFY_PIDFD_INFO_LEN; if (WARN_ON_ONCE(info_len > count)) return -EFAULT; @@ -521,6 +546,30 @@ static int copy_pidfd_info_to_user(int pidfd, return info_len; } +static size_t copy_range_info_to_user(struct fanotify_event *event, + char __user *buf, int count) +{ + struct fanotify_perm_event *pevent = FANOTIFY_PERM(event); + struct fanotify_event_info_range info = { }; + size_t info_len = FANOTIFY_RANGE_INFO_LEN; + + if (WARN_ON_ONCE(info_len > count)) + return -EFAULT; + + if (WARN_ON_ONCE(!pevent->ppos)) + return -EINVAL; + + info.hdr.info_type = FAN_EVENT_INFO_TYPE_RANGE; + info.hdr.len = info_len; + info.offset = *(pevent->ppos); + info.count = pevent->count; + + if (copy_to_user(buf, &info, info_len)) + return -EFAULT; + + return info_len; +} + static int copy_info_records_to_user(struct fanotify_event *event, struct fanotify_info *info, unsigned int info_mode, int pidfd, @@ -642,6 +691,15 @@ static int copy_info_records_to_user(struct fanotify_event *event, total_bytes += ret; } + if (fanotify_event_has_access_range(event)) { + ret = copy_range_info_to_user(event, buf, count); + if (ret < 0) + return ret; + buf += ret; + count -= ret; + total_bytes += ret; + } + return total_bytes; } @@ -756,12 +814,10 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, buf += FAN_EVENT_METADATA_LEN; count -= FAN_EVENT_METADATA_LEN; - if (info_mode) { - ret = copy_info_records_to_user(event, info, info_mode, pidfd, - buf, count); - if (ret < 0) - goto out_close_fd; - } + ret = copy_info_records_to_user(event, info, info_mode, pidfd, + buf, count); + if (ret < 0) + goto out_close_fd; if (f) fd_install(fd, f); @@ -1294,7 +1350,7 @@ static int fanotify_group_init_error_pool(struct fsnotify_group *group) } static int fanotify_may_update_existing_mark(struct fsnotify_mark *fsn_mark, - unsigned int fan_flags) + __u32 mask, unsigned int fan_flags) { /* * Non evictable mark cannot be downgraded to evictable mark. @@ -1321,6 +1377,11 @@ static int fanotify_may_update_existing_mark(struct fsnotify_mark *fsn_mark, fsn_mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY) return -EEXIST; + /* For now pre-content events are not generated for directories */ + mask |= fsn_mark->mask; + if (mask & FANOTIFY_PRE_CONTENT_EVENTS && mask & FAN_ONDIR) + return -EEXIST; + return 0; } @@ -1347,7 +1408,7 @@ static int fanotify_add_mark(struct fsnotify_group *group, /* * Check if requested mark flags conflict with an existing mark flags. */ - ret = fanotify_may_update_existing_mark(fsn_mark, fan_flags); + ret = fanotify_may_update_existing_mark(fsn_mark, mask, fan_flags); if (ret) goto out; @@ -1409,6 +1470,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) unsigned int fid_mode = flags & FANOTIFY_FID_BITS; unsigned int class = flags & FANOTIFY_CLASS_BITS; unsigned int internal_flags = 0; + struct file *file; pr_debug("%s: flags=%x event_f_flags=%x\n", __func__, flags, event_f_flags); @@ -1477,7 +1539,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) (!(fid_mode & FAN_REPORT_NAME) || !(fid_mode & FAN_REPORT_FID))) return -EINVAL; - f_flags = O_RDWR | __FMODE_NONOTIFY; + f_flags = O_RDWR; if (flags & FAN_CLOEXEC) f_flags |= O_CLOEXEC; if (flags & FAN_NONBLOCK) @@ -1555,10 +1617,18 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) goto out_destroy_group; } - fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags); + fd = get_unused_fd_flags(f_flags); if (fd < 0) goto out_destroy_group; + file = anon_inode_getfile_fmode("[fanotify]", &fanotify_fops, group, + f_flags, FMODE_NONOTIFY); + if (IS_ERR(file)) { + put_unused_fd(fd); + fd = PTR_ERR(file); + goto out_destroy_group; + } + fd_install(fd, file); return fd; out_destroy_group: @@ -1638,12 +1708,24 @@ static int fanotify_events_supported(struct fsnotify_group *group, unsigned int flags) { unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS; + bool is_dir = d_is_dir(path->dentry); /* Strict validation of events in non-dir inode mask with v5.17+ APIs */ bool strict_dir_events = FAN_GROUP_FLAG(group, FAN_REPORT_TARGET_FID) || (mask & FAN_RENAME) || (flags & FAN_MARK_IGNORE); /* + * Filesystems need to opt-into pre-content evnets (a.k.a HSM) + * and they are only supported on regular files and directories. + */ + if (mask & FANOTIFY_PRE_CONTENT_EVENTS) { + if (!(path->mnt->mnt_sb->s_iflags & SB_I_ALLOW_HSM)) + return -EOPNOTSUPP; + if (!is_dir && !d_is_reg(path->dentry)) + return -EINVAL; + } + + /* * Some filesystems such as 'proc' acquire unusual locks when opening * files. For them fanotify permission events have high chances of * deadlocking the system - open done when reporting fanotify event @@ -1675,7 +1757,7 @@ static int fanotify_events_supported(struct fsnotify_group *group, * but because we always allowed it, error only when using new APIs. */ if (strict_dir_events && mark_type == FAN_MARK_INODE && - !d_is_dir(path->dentry) && (mask & FANOTIFY_DIRONLY_EVENT_BITS)) + !is_dir && (mask & FANOTIFY_DIRONLY_EVENT_BITS)) return -ENOTDIR; return 0; @@ -1776,10 +1858,14 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, return -EPERM; /* - * Permission events require minimum priority FAN_CLASS_CONTENT. + * Permission events are not allowed for FAN_CLASS_NOTIF. + * Pre-content permission events are not allowed for FAN_CLASS_CONTENT. */ if (mask & FANOTIFY_PERM_EVENTS && - group->priority < FSNOTIFY_PRIO_CONTENT) + group->priority == FSNOTIFY_PRIO_NORMAL) + return -EINVAL; + else if (mask & FANOTIFY_PRE_CONTENT_EVENTS && + group->priority == FSNOTIFY_PRIO_CONTENT) return -EINVAL; if (mask & FAN_FS_ERROR && @@ -1814,6 +1900,10 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, if (mask & FAN_RENAME && !(fid_mode & FAN_REPORT_NAME)) return -EINVAL; + /* Pre-content events are not currently generated for directories. */ + if (mask & FANOTIFY_PRE_CONTENT_EVENTS && mask & FAN_ONDIR) + return -EINVAL; + if (mark_cmd == FAN_MARK_FLUSH) { if (mark_type == FAN_MARK_MOUNT) fsnotify_clear_vfsmount_marks_by_group(group); diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index f976949d2634..fae1b6d397ea 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -193,7 +193,7 @@ static bool fsnotify_event_needs_parent(struct inode *inode, __u32 mnt_mask, return mask & marks_mask; } -/* Are there any inode/mount/sb objects that are interested in this event? */ +/* Are there any inode/mount/sb objects that watch for these events? */ static inline bool fsnotify_object_watched(struct inode *inode, __u32 mnt_mask, __u32 mask) { @@ -203,6 +203,24 @@ static inline bool fsnotify_object_watched(struct inode *inode, __u32 mnt_mask, return mask & marks_mask & ALL_FSNOTIFY_EVENTS; } +/* Report pre-content event with optional range info */ +int fsnotify_pre_content(const struct path *path, const loff_t *ppos, + size_t count) +{ + struct file_range range; + + /* Report page aligned range only when pos is known */ + if (!ppos) + return fsnotify_path(path, FS_PRE_ACCESS); + + range.path = path; + range.pos = PAGE_ALIGN_DOWN(*ppos); + range.count = PAGE_ALIGN(*ppos + count) - range.pos; + + return fsnotify_parent(path->dentry, FS_PRE_ACCESS, &range, + FSNOTIFY_EVENT_FILE_RANGE); +} + /* * Notify this dentry's parent about a child's events with child name info * if parent is watching or if inode/sb/mount are interested in events with @@ -623,11 +641,78 @@ out: } EXPORT_SYMBOL_GPL(fsnotify); +#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS +/* + * At open time we check fsnotify_sb_has_priority_watchers() and set the + * FMODE_NONOTIFY_ mode bits accordignly. + * Later, fsnotify permission hooks do not check if there are permission event + * watches, but that there were permission event watches at open time. + */ +void file_set_fsnotify_mode_from_watchers(struct file *file) +{ + struct dentry *dentry = file->f_path.dentry, *parent; + struct super_block *sb = dentry->d_sb; + __u32 mnt_mask, p_mask; + + /* Is it a file opened by fanotify? */ + if (FMODE_FSNOTIFY_NONE(file->f_mode)) + return; + + /* + * Permission events is a super set of pre-content events, so if there + * are no permission event watchers, there are also no pre-content event + * watchers and this is implied from the single FMODE_NONOTIFY_PERM bit. + */ + if (likely(!fsnotify_sb_has_priority_watchers(sb, + FSNOTIFY_PRIO_CONTENT))) { + file_set_fsnotify_mode(file, FMODE_NONOTIFY_PERM); + return; + } + + /* + * If there are permission event watchers but no pre-content event + * watchers, set FMODE_NONOTIFY | FMODE_NONOTIFY_PERM to indicate that. + */ + if ((!d_is_dir(dentry) && !d_is_reg(dentry)) || + likely(!fsnotify_sb_has_priority_watchers(sb, + FSNOTIFY_PRIO_PRE_CONTENT))) { + file_set_fsnotify_mode(file, FMODE_NONOTIFY | FMODE_NONOTIFY_PERM); + return; + } + + /* + * OK, there are some pre-content watchers. Check if anybody is + * watching for pre-content events on *this* file. + */ + mnt_mask = READ_ONCE(real_mount(file->f_path.mnt)->mnt_fsnotify_mask); + if (unlikely(fsnotify_object_watched(d_inode(dentry), mnt_mask, + FSNOTIFY_PRE_CONTENT_EVENTS))) { + /* Enable pre-content events */ + file_set_fsnotify_mode(file, 0); + return; + } + + /* Is parent watching for pre-content events on this file? */ + if (dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED) { + parent = dget_parent(dentry); + p_mask = fsnotify_inode_watches_children(d_inode(parent)); + dput(parent); + if (p_mask & FSNOTIFY_PRE_CONTENT_EVENTS) { + /* Enable pre-content events */ + file_set_fsnotify_mode(file, 0); + return; + } + } + /* Nobody watching for pre-content events from this file */ + file_set_fsnotify_mode(file, FMODE_NONOTIFY | FMODE_NONOTIFY_PERM); +} +#endif + static __init int fsnotify_init(void) { int ret; - BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 23); + BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 24); ret = init_srcu_struct(&fsnotify_mark_srcu); if (ret) diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c index 993375f0db67..cd7d11b0eb08 100644 --- a/fs/notify/inotify/inotify_fsnotify.c +++ b/fs/notify/inotify/inotify_fsnotify.c @@ -121,7 +121,7 @@ int inotify_handle_inode_event(struct fsnotify_mark *inode_mark, u32 mask, event->sync_cookie = cookie; event->name_len = len; if (len) - strcpy(event->name, name->name); + strscpy(event->name, name->name, event->name_len + 1); ret = fsnotify_add_event(group, fsn_event, inotify_merge); if (ret) { diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index e0c48956608a..b372fb2c56bd 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -58,7 +58,7 @@ struct kmem_cache *inotify_inode_mark_cachep __ro_after_init; static long it_zero = 0; static long it_int_max = INT_MAX; -static struct ctl_table inotify_table[] = { +static const struct ctl_table inotify_table[] = { { .procname = "max_user_instances", .data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES], diff --git a/fs/nsfs.c b/fs/nsfs.c index c675fc40ce2d..663f8656158d 100644 --- a/fs/nsfs.c +++ b/fs/nsfs.c @@ -274,10 +274,7 @@ static long ns_ioctl(struct file *filp, unsigned int ioctl, if (usize < MNT_NS_INFO_SIZE_VER0) return -EINVAL; - if (previous) - mnt_ns = lookup_prev_mnt_ns(to_mnt_ns(ns)); - else - mnt_ns = lookup_next_mnt_ns(to_mnt_ns(ns)); + mnt_ns = get_sequential_mnt_ns(to_mnt_ns(ns), previous); if (IS_ERR(mnt_ns)) return PTR_ERR(mnt_ns); diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c index 8d789b017fa9..af94e3737470 100644 --- a/fs/ntfs3/attrib.c +++ b/fs/ntfs3/attrib.c @@ -787,7 +787,8 @@ pack_runs: if (err) goto out; - attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id); + attr = mi_find_attr(ni, mi, NULL, type, name, name_len, + &le->id); if (!attr) { err = -EINVAL; goto bad_inode; @@ -1181,7 +1182,7 @@ repack: goto out; } - attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, &le->id); + attr = mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0, &le->id); if (!attr) { err = -EINVAL; goto out; @@ -1406,7 +1407,7 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr, */ if (!attr->non_res) { if (vbo[1] + bytes_per_off > le32_to_cpu(attr->res.data_size)) { - ntfs_inode_err(&ni->vfs_inode, "is corrupted"); + _ntfs_bad_inode(&ni->vfs_inode); return -EINVAL; } addr = resident_data(attr); @@ -1796,7 +1797,7 @@ repack: goto out; } - attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, + attr = mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0, &le->id); if (!attr) { err = -EINVAL; @@ -2041,8 +2042,8 @@ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes) } /* Look for required attribute. */ - attr = mi_find_attr(mi, NULL, ATTR_DATA, NULL, - 0, &le->id); + attr = mi_find_attr(ni, mi, NULL, ATTR_DATA, + NULL, 0, &le->id); if (!attr) { err = -EINVAL; goto out; @@ -2587,7 +2588,7 @@ int attr_force_nonresident(struct ntfs_inode *ni) attr = ni_find_attr(ni, NULL, &le, ATTR_DATA, NULL, 0, NULL, &mi); if (!attr) { - ntfs_bad_inode(&ni->vfs_inode, "no data attribute"); + _ntfs_bad_inode(&ni->vfs_inode); return -ENOENT; } diff --git a/fs/ntfs3/dir.c b/fs/ntfs3/dir.c index fc6a8aa29e3a..b6da80c69ca6 100644 --- a/fs/ntfs3/dir.c +++ b/fs/ntfs3/dir.c @@ -512,7 +512,7 @@ out: ctx->pos = pos; } else if (err < 0) { if (err == -EINVAL) - ntfs_inode_err(dir, "directory corrupted"); + _ntfs_bad_inode(dir); ctx->pos = eod; } diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c index 8b39d0ce5f28..5df6a0b5add9 100644 --- a/fs/ntfs3/frecord.c +++ b/fs/ntfs3/frecord.c @@ -75,7 +75,7 @@ struct ATTR_STD_INFO *ni_std(struct ntfs_inode *ni) { const struct ATTRIB *attr; - attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL); + attr = mi_find_attr(ni, &ni->mi, NULL, ATTR_STD, NULL, 0, NULL); return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO)) : NULL; } @@ -89,7 +89,7 @@ struct ATTR_STD_INFO5 *ni_std5(struct ntfs_inode *ni) { const struct ATTRIB *attr; - attr = mi_find_attr(&ni->mi, NULL, ATTR_STD, NULL, 0, NULL); + attr = mi_find_attr(ni, &ni->mi, NULL, ATTR_STD, NULL, 0, NULL); return attr ? resident_data_ex(attr, sizeof(struct ATTR_STD_INFO5)) : NULL; @@ -148,8 +148,10 @@ int ni_load_mi_ex(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi) goto out; err = mi_get(ni->mi.sbi, rno, &r); - if (err) + if (err) { + _ntfs_bad_inode(&ni->vfs_inode); return err; + } ni_add_mi(ni, r); @@ -201,7 +203,8 @@ struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr, *mi = &ni->mi; /* Look for required attribute in primary record. */ - return mi_find_attr(&ni->mi, attr, type, name, name_len, NULL); + return mi_find_attr(ni, &ni->mi, attr, type, name, name_len, + NULL); } /* First look for list entry of required type. */ @@ -217,7 +220,7 @@ struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr, return NULL; /* Look for required attribute. */ - attr = mi_find_attr(m, NULL, type, name, name_len, &le->id); + attr = mi_find_attr(ni, m, NULL, type, name, name_len, &le->id); if (!attr) goto out; @@ -238,8 +241,7 @@ struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr, return attr; out: - ntfs_inode_err(&ni->vfs_inode, "failed to parse mft record"); - ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR); + _ntfs_bad_inode(&ni->vfs_inode); return NULL; } @@ -259,7 +261,7 @@ struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr, if (mi) *mi = &ni->mi; /* Enum attributes in primary record. */ - return mi_enum_attr(&ni->mi, attr); + return mi_enum_attr(ni, &ni->mi, attr); } /* Get next list entry. */ @@ -275,7 +277,7 @@ struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr, *mi = mi2; /* Find attribute in loaded record. */ - return rec_find_attr_le(mi2, le2); + return rec_find_attr_le(ni, mi2, le2); } /* @@ -293,7 +295,8 @@ struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type, if (!ni->attr_list.size) { if (pmi) *pmi = &ni->mi; - return mi_find_attr(&ni->mi, NULL, type, name, name_len, NULL); + return mi_find_attr(ni, &ni->mi, NULL, type, name, name_len, + NULL); } le = al_find_ex(ni, NULL, type, name, name_len, NULL); @@ -319,7 +322,7 @@ struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type, if (pmi) *pmi = mi; - attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id); + attr = mi_find_attr(ni, mi, NULL, type, name, name_len, &le->id); if (!attr) return NULL; @@ -330,6 +333,7 @@ struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type, vcn <= le64_to_cpu(attr->nres.evcn)) return attr; + _ntfs_bad_inode(&ni->vfs_inode); return NULL; } @@ -398,7 +402,8 @@ int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type, int diff; if (base_only || type == ATTR_LIST || !ni->attr_list.size) { - attr = mi_find_attr(&ni->mi, NULL, type, name, name_len, id); + attr = mi_find_attr(ni, &ni->mi, NULL, type, name, name_len, + id); if (!attr) return -ENOENT; @@ -437,7 +442,7 @@ next_le2: al_remove_le(ni, le); - attr = mi_find_attr(mi, NULL, type, name, name_len, id); + attr = mi_find_attr(ni, mi, NULL, type, name, name_len, id); if (!attr) return -ENOENT; @@ -485,7 +490,7 @@ ni_ins_new_attr(struct ntfs_inode *ni, struct mft_inode *mi, name = le->name; } - attr = mi_insert_attr(mi, type, name, name_len, asize, name_off); + attr = mi_insert_attr(ni, mi, type, name, name_len, asize, name_off); if (!attr) { if (le_added) al_remove_le(ni, le); @@ -673,7 +678,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni) if (err) return err; - attr_list = mi_find_attr(&ni->mi, NULL, ATTR_LIST, NULL, 0, NULL); + attr_list = mi_find_attr(ni, &ni->mi, NULL, ATTR_LIST, NULL, 0, NULL); if (!attr_list) return 0; @@ -695,7 +700,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni) if (!mi) return 0; - attr = mi_find_attr(mi, NULL, le->type, le_name(le), + attr = mi_find_attr(ni, mi, NULL, le->type, le_name(le), le->name_len, &le->id); if (!attr) return 0; @@ -731,7 +736,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni) goto out; } - attr = mi_find_attr(mi, NULL, le->type, le_name(le), + attr = mi_find_attr(ni, mi, NULL, le->type, le_name(le), le->name_len, &le->id); if (!attr) { /* Should never happened, 'cause already checked. */ @@ -740,7 +745,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni) asize = le32_to_cpu(attr->size); /* Insert into primary record. */ - attr_ins = mi_insert_attr(&ni->mi, le->type, le_name(le), + attr_ins = mi_insert_attr(ni, &ni->mi, le->type, le_name(le), le->name_len, asize, le16_to_cpu(attr->name_off)); if (!attr_ins) { @@ -768,7 +773,7 @@ static int ni_try_remove_attr_list(struct ntfs_inode *ni) if (!mi) continue; - attr = mi_find_attr(mi, NULL, le->type, le_name(le), + attr = mi_find_attr(ni, mi, NULL, le->type, le_name(le), le->name_len, &le->id); if (!attr) continue; @@ -831,7 +836,7 @@ int ni_create_attr_list(struct ntfs_inode *ni) free_b = 0; attr = NULL; - for (; (attr = mi_enum_attr(&ni->mi, attr)); le = Add2Ptr(le, sz)) { + for (; (attr = mi_enum_attr(ni, &ni->mi, attr)); le = Add2Ptr(le, sz)) { sz = le_size(attr->name_len); le->type = attr->type; le->size = cpu_to_le16(sz); @@ -886,7 +891,7 @@ int ni_create_attr_list(struct ntfs_inode *ni) u32 asize = le32_to_cpu(b->size); u16 name_off = le16_to_cpu(b->name_off); - attr = mi_insert_attr(mi, b->type, Add2Ptr(b, name_off), + attr = mi_insert_attr(ni, mi, b->type, Add2Ptr(b, name_off), b->name_len, asize, name_off); if (!attr) goto out; @@ -909,7 +914,7 @@ int ni_create_attr_list(struct ntfs_inode *ni) goto out; } - attr = mi_insert_attr(&ni->mi, ATTR_LIST, NULL, 0, + attr = mi_insert_attr(ni, &ni->mi, ATTR_LIST, NULL, 0, lsize + SIZEOF_RESIDENT, SIZEOF_RESIDENT); if (!attr) goto out; @@ -993,13 +998,13 @@ static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le, mi = rb_entry(node, struct mft_inode, node); if (is_mft_data && - (mi_enum_attr(mi, NULL) || + (mi_enum_attr(ni, mi, NULL) || vbo <= ((u64)mi->rno << sbi->record_bits))) { /* We can't accept this record 'cause MFT's bootstrapping. */ continue; } if (is_mft && - mi_find_attr(mi, NULL, ATTR_DATA, NULL, 0, NULL)) { + mi_find_attr(ni, mi, NULL, ATTR_DATA, NULL, 0, NULL)) { /* * This child record already has a ATTR_DATA. * So it can't accept any other records. @@ -1008,7 +1013,7 @@ static int ni_ins_attr_ext(struct ntfs_inode *ni, struct ATTR_LIST_ENTRY *le, } if ((type != ATTR_NAME || name_len) && - mi_find_attr(mi, NULL, type, name, name_len, NULL)) { + mi_find_attr(ni, mi, NULL, type, name, name_len, NULL)) { /* Only indexed attributes can share same record. */ continue; } @@ -1157,7 +1162,7 @@ static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type, /* Estimate the result of moving all possible attributes away. */ attr = NULL; - while ((attr = mi_enum_attr(&ni->mi, attr))) { + while ((attr = mi_enum_attr(ni, &ni->mi, attr))) { if (attr->type == ATTR_STD) continue; if (attr->type == ATTR_LIST) @@ -1175,7 +1180,7 @@ static int ni_insert_attr(struct ntfs_inode *ni, enum ATTR_TYPE type, attr = NULL; for (;;) { - attr = mi_enum_attr(&ni->mi, attr); + attr = mi_enum_attr(ni, &ni->mi, attr); if (!attr) { /* We should never be here 'cause we have already check this case. */ err = -EINVAL; @@ -1259,7 +1264,7 @@ static int ni_expand_mft_list(struct ntfs_inode *ni) for (node = rb_first(&ni->mi_tree); node; node = rb_next(node)) { mi = rb_entry(node, struct mft_inode, node); - attr = mi_enum_attr(mi, NULL); + attr = mi_enum_attr(ni, mi, NULL); if (!attr) { mft_min = mi->rno; @@ -1280,7 +1285,7 @@ static int ni_expand_mft_list(struct ntfs_inode *ni) ni_remove_mi(ni, mi_new); } - attr = mi_find_attr(&ni->mi, NULL, ATTR_DATA, NULL, 0, NULL); + attr = mi_find_attr(ni, &ni->mi, NULL, ATTR_DATA, NULL, 0, NULL); if (!attr) { err = -EINVAL; goto out; @@ -1397,7 +1402,7 @@ int ni_expand_list(struct ntfs_inode *ni) continue; /* Find attribute in primary record. */ - attr = rec_find_attr_le(&ni->mi, le); + attr = rec_find_attr_le(ni, &ni->mi, le); if (!attr) { err = -EINVAL; goto out; @@ -1604,8 +1609,8 @@ int ni_delete_all(struct ntfs_inode *ni) roff = le16_to_cpu(attr->nres.run_off); if (roff > asize) { - _ntfs_bad_inode(&ni->vfs_inode); - return -EINVAL; + /* ni_enum_attr_ex checks this case. */ + continue; } /* run==1 means unpack and deallocate. */ @@ -2726,9 +2731,10 @@ int ni_write_frame(struct ntfs_inode *ni, struct page **pages, { int err; struct ntfs_sb_info *sbi = ni->mi.sbi; + struct folio *folio = page_folio(pages[0]); u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits; u32 frame_size = sbi->cluster_size << NTFS_LZNT_CUNIT; - u64 frame_vbo = (u64)pages[0]->index << PAGE_SHIFT; + u64 frame_vbo = folio_pos(folio); CLST frame = frame_vbo >> frame_bits; char *frame_ondisk = NULL; struct page **pages_disk = NULL; @@ -3343,7 +3349,7 @@ int ni_write_inode(struct inode *inode, int sync, const char *hint) if (!mi->dirty) continue; - is_empty = !mi_enum_attr(mi, NULL); + is_empty = !mi_enum_attr(ni, mi, NULL); if (is_empty) clear_rec_inuse(mi->mrec); diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c index 03471bc9371c..938d351ebac7 100644 --- a/fs/ntfs3/fsntfs.c +++ b/fs/ntfs3/fsntfs.c @@ -908,7 +908,11 @@ void ntfs_bad_inode(struct inode *inode, const char *hint) ntfs_inode_err(inode, "%s", hint); make_bad_inode(inode); - ntfs_set_state(sbi, NTFS_DIRTY_ERROR); + /* Avoid recursion if bad inode is $Volume. */ + if (inode->i_ino != MFT_REC_VOL && + !(sbi->flags & NTFS_FLAGS_LOG_REPLAYING)) { + ntfs_set_state(sbi, NTFS_DIRTY_ERROR); + } } /* diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c index 9089c58a005c..7eb9fae22f8d 100644 --- a/fs/ntfs3/index.c +++ b/fs/ntfs3/index.c @@ -1094,8 +1094,7 @@ int indx_read(struct ntfs_index *indx, struct ntfs_inode *ni, CLST vbn, ok: if (!index_buf_check(ib, bytes, &vbn)) { - ntfs_inode_err(&ni->vfs_inode, "directory corrupted"); - ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR); + _ntfs_bad_inode(&ni->vfs_inode); err = -EINVAL; goto out; } @@ -1117,8 +1116,7 @@ ok: out: if (err == -E_NTFS_CORRUPT) { - ntfs_inode_err(&ni->vfs_inode, "directory corrupted"); - ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR); + _ntfs_bad_inode(&ni->vfs_inode); err = -EINVAL; } diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c index be04d2845bb7..a1e11228dafd 100644 --- a/fs/ntfs3/inode.c +++ b/fs/ntfs3/inode.c @@ -410,6 +410,9 @@ end_enum: if (!std5) goto out; + if (is_bad_inode(inode)) + goto out; + if (!is_match && name) { err = -ENOENT; goto out; diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h index cd8e8374bb5a..382820464dee 100644 --- a/fs/ntfs3/ntfs_fs.h +++ b/fs/ntfs3/ntfs_fs.h @@ -745,23 +745,24 @@ int mi_get(struct ntfs_sb_info *sbi, CLST rno, struct mft_inode **mi); void mi_put(struct mft_inode *mi); int mi_init(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno); int mi_read(struct mft_inode *mi, bool is_mft); -struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr); -// TODO: id? -struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr, - enum ATTR_TYPE type, const __le16 *name, - u8 name_len, const __le16 *id); -static inline struct ATTRIB *rec_find_attr_le(struct mft_inode *rec, +struct ATTRIB *mi_enum_attr(struct ntfs_inode *ni, struct mft_inode *mi, + struct ATTRIB *attr); +struct ATTRIB *mi_find_attr(struct ntfs_inode *ni, struct mft_inode *mi, + struct ATTRIB *attr, enum ATTR_TYPE type, + const __le16 *name, u8 name_len, const __le16 *id); +static inline struct ATTRIB *rec_find_attr_le(struct ntfs_inode *ni, + struct mft_inode *rec, struct ATTR_LIST_ENTRY *le) { - return mi_find_attr(rec, NULL, le->type, le_name(le), le->name_len, + return mi_find_attr(ni, rec, NULL, le->type, le_name(le), le->name_len, &le->id); } int mi_write(struct mft_inode *mi, int wait); int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno, __le16 flags, bool is_mft); -struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type, - const __le16 *name, u8 name_len, u32 asize, - u16 name_off); +struct ATTRIB *mi_insert_attr(struct ntfs_inode *ni, struct mft_inode *mi, + enum ATTR_TYPE type, const __le16 *name, + u8 name_len, u32 asize, u16 name_off); bool mi_remove_attr(struct ntfs_inode *ni, struct mft_inode *mi, struct ATTRIB *attr); diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c index 61d53d39f3b9..714c7ecedca8 100644 --- a/fs/ntfs3/record.c +++ b/fs/ntfs3/record.c @@ -31,7 +31,7 @@ static inline int compare_attr(const struct ATTRIB *left, enum ATTR_TYPE type, * * Return: Unused attribute id that is less than mrec->next_attr_id. */ -static __le16 mi_new_attt_id(struct mft_inode *mi) +static __le16 mi_new_attt_id(struct ntfs_inode *ni, struct mft_inode *mi) { u16 free_id, max_id, t16; struct MFT_REC *rec = mi->mrec; @@ -52,7 +52,7 @@ static __le16 mi_new_attt_id(struct mft_inode *mi) attr = NULL; for (;;) { - attr = mi_enum_attr(mi, attr); + attr = mi_enum_attr(ni, mi, attr); if (!attr) { rec->next_attr_id = cpu_to_le16(max_id + 1); mi->dirty = true; @@ -195,7 +195,8 @@ out: * NOTE: mi->mrec - memory of size sbi->record_size * here we sure that mi->mrec->total == sbi->record_size (see mi_read) */ -struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr) +struct ATTRIB *mi_enum_attr(struct ntfs_inode *ni, struct mft_inode *mi, + struct ATTRIB *attr) { const struct MFT_REC *rec = mi->mrec; u32 used = le32_to_cpu(rec->used); @@ -209,11 +210,11 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr) off = le16_to_cpu(rec->attr_off); if (used > total) - return NULL; + goto out; if (off >= used || off < MFTRECORD_FIXUP_OFFSET_1 || !IS_ALIGNED(off, 8)) { - return NULL; + goto out; } /* Skip non-resident records. */ @@ -243,7 +244,7 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr) */ if (off + 8 > used) { static_assert(ALIGN(sizeof(enum ATTR_TYPE), 8) == 8); - return NULL; + goto out; } if (attr->type == ATTR_END) { @@ -254,112 +255,116 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr) /* 0x100 is last known attribute for now. */ t32 = le32_to_cpu(attr->type); if (!t32 || (t32 & 0xf) || (t32 > 0x100)) - return NULL; + goto out; /* attributes in record must be ordered by type */ if (t32 < prev_type) - return NULL; + goto out; asize = le32_to_cpu(attr->size); if (!IS_ALIGNED(asize, 8)) - return NULL; + goto out; /* Check overflow and boundary. */ if (off + asize < off || off + asize > used) - return NULL; + goto out; /* Can we use the field attr->non_res. */ if (off + 9 > used) - return NULL; + goto out; /* Check size of attribute. */ if (!attr->non_res) { /* Check resident fields. */ if (asize < SIZEOF_RESIDENT) - return NULL; + goto out; t16 = le16_to_cpu(attr->res.data_off); if (t16 > asize) - return NULL; + goto out; if (le32_to_cpu(attr->res.data_size) > asize - t16) - return NULL; + goto out; t32 = sizeof(short) * attr->name_len; if (t32 && le16_to_cpu(attr->name_off) + t32 > t16) - return NULL; + goto out; return attr; } /* Check nonresident fields. */ if (attr->non_res != 1) - return NULL; + goto out; /* Can we use memory including attr->nres.valid_size? */ if (asize < SIZEOF_NONRESIDENT) - return NULL; + goto out; t16 = le16_to_cpu(attr->nres.run_off); if (t16 > asize) - return NULL; + goto out; t32 = sizeof(short) * attr->name_len; if (t32 && le16_to_cpu(attr->name_off) + t32 > t16) - return NULL; + goto out; /* Check start/end vcn. */ if (le64_to_cpu(attr->nres.svcn) > le64_to_cpu(attr->nres.evcn) + 1) - return NULL; + goto out; data_size = le64_to_cpu(attr->nres.data_size); if (le64_to_cpu(attr->nres.valid_size) > data_size) - return NULL; + goto out; alloc_size = le64_to_cpu(attr->nres.alloc_size); if (data_size > alloc_size) - return NULL; + goto out; t32 = mi->sbi->cluster_mask; if (alloc_size & t32) - return NULL; + goto out; if (!attr->nres.svcn && is_attr_ext(attr)) { /* First segment of sparse/compressed attribute */ /* Can we use memory including attr->nres.total_size? */ if (asize < SIZEOF_NONRESIDENT_EX) - return NULL; + goto out; tot_size = le64_to_cpu(attr->nres.total_size); if (tot_size & t32) - return NULL; + goto out; if (tot_size > alloc_size) - return NULL; + goto out; } else { if (attr->nres.c_unit) - return NULL; + goto out; if (alloc_size > mi->sbi->volume.size) - return NULL; + goto out; } return attr; + +out: + _ntfs_bad_inode(&ni->vfs_inode); + return NULL; } /* * mi_find_attr - Find the attribute by type and name and id. */ -struct ATTRIB *mi_find_attr(struct mft_inode *mi, struct ATTRIB *attr, - enum ATTR_TYPE type, const __le16 *name, - u8 name_len, const __le16 *id) +struct ATTRIB *mi_find_attr(struct ntfs_inode *ni, struct mft_inode *mi, + struct ATTRIB *attr, enum ATTR_TYPE type, + const __le16 *name, u8 name_len, const __le16 *id) { u32 type_in = le32_to_cpu(type); u32 atype; next_attr: - attr = mi_enum_attr(mi, attr); + attr = mi_enum_attr(ni, mi, attr); if (!attr) return NULL; @@ -467,9 +472,9 @@ int mi_format_new(struct mft_inode *mi, struct ntfs_sb_info *sbi, CLST rno, * * Return: Not full constructed attribute or NULL if not possible to create. */ -struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type, - const __le16 *name, u8 name_len, u32 asize, - u16 name_off) +struct ATTRIB *mi_insert_attr(struct ntfs_inode *ni, struct mft_inode *mi, + enum ATTR_TYPE type, const __le16 *name, + u8 name_len, u32 asize, u16 name_off) { size_t tail; struct ATTRIB *attr; @@ -488,7 +493,7 @@ struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type, * at which we should insert it. */ attr = NULL; - while ((attr = mi_enum_attr(mi, attr))) { + while ((attr = mi_enum_attr(ni, mi, attr))) { int diff = compare_attr(attr, type, name, name_len, upcase); if (diff < 0) @@ -508,7 +513,7 @@ struct ATTRIB *mi_insert_attr(struct mft_inode *mi, enum ATTR_TYPE type, tail = used - PtrOffset(rec, attr); } - id = mi_new_attt_id(mi); + id = mi_new_attt_id(ni, mi); memmove(Add2Ptr(attr, asize), attr, tail); memset(attr, 0, asize); diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 395e23920632..4414743b638e 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -566,7 +566,7 @@ static void ocfs2_adjust_rightmost_records(handle_t *handle, struct ocfs2_path *path, struct ocfs2_extent_rec *insert_rec); /* - * Reset the actual path elements so that we can re-use the structure + * Reset the actual path elements so that we can reuse the structure * to build another path. Generally, this involves freeing the buffer * heads. */ @@ -1182,7 +1182,7 @@ static int ocfs2_add_branch(handle_t *handle, /* * If there is a gap before the root end and the real end - * of the righmost leaf block, we need to remove the gap + * of the rightmost leaf block, we need to remove the gap * between new_cpos and root_end first so that the tree * is consistent after we add a new branch(it will start * from new_cpos). @@ -1238,7 +1238,7 @@ static int ocfs2_add_branch(handle_t *handle, /* Note: new_eb_bhs[new_blocks - 1] is the guy which will be * linked with the rest of the tree. - * conversly, new_eb_bhs[0] is the new bottommost leaf. + * conversely, new_eb_bhs[0] is the new bottommost leaf. * * when we leave the loop, new_last_eb_blk will point to the * newest leaf, and next_blkno will point to the topmost extent @@ -3712,7 +3712,7 @@ static int ocfs2_try_to_merge_extent(handle_t *handle, * update split_index here. * * When the split_index is zero, we need to merge it to the - * prevoius extent block. It is more efficient and easier + * previous extent block. It is more efficient and easier * if we do merge_right first and merge_left later. */ ret = ocfs2_merge_rec_right(path, handle, et, split_rec, @@ -4517,7 +4517,7 @@ static void ocfs2_figure_contig_type(struct ocfs2_extent_tree *et, } /* - * This should only be called against the righmost leaf extent list. + * This should only be called against the rightmost leaf extent list. * * ocfs2_figure_appending_type() will figure out whether we'll have to * insert at the tail of the rightmost leaf. @@ -6154,6 +6154,9 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb, int status; struct inode *inode = NULL; struct buffer_head *bh = NULL; + struct ocfs2_dinode *di; + struct ocfs2_truncate_log *tl; + unsigned int tl_count; inode = ocfs2_get_system_file_inode(osb, TRUNCATE_LOG_SYSTEM_INODE, @@ -6171,6 +6174,18 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb, goto bail; } + di = (struct ocfs2_dinode *)bh->b_data; + tl = &di->id2.i_dealloc; + tl_count = le16_to_cpu(tl->tl_count); + if (unlikely(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) || + tl_count == 0)) { + status = -EFSCORRUPTED; + iput(inode); + brelse(bh); + mlog_errno(status); + goto bail; + } + *tl_inode = inode; *tl_bh = bh; bail: @@ -6808,27 +6823,27 @@ static int ocfs2_zero_func(handle_t *handle, struct buffer_head *bh) return 0; } -void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, - unsigned int from, unsigned int to, - struct page *page, int zero, u64 *phys) +void ocfs2_map_and_dirty_folio(struct inode *inode, handle_t *handle, + size_t from, size_t to, struct folio *folio, int zero, + u64 *phys) { int ret, partial = 0; - loff_t start_byte = ((loff_t)page->index << PAGE_SHIFT) + from; + loff_t start_byte = folio_pos(folio) + from; loff_t length = to - from; - ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0); + ret = ocfs2_map_folio_blocks(folio, phys, inode, from, to, 0); if (ret) mlog_errno(ret); if (zero) - zero_user_segment(page, from, to); + folio_zero_segment(folio, from, to); /* * Need to set the buffers we zero'd into uptodate * here if they aren't - ocfs2_map_page_blocks() * might've skipped some */ - ret = walk_page_buffers(handle, page_buffers(page), + ret = walk_page_buffers(handle, folio_buffers(folio), from, to, &partial, ocfs2_zero_func); if (ret < 0) @@ -6841,92 +6856,88 @@ void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, } if (!partial) - SetPageUptodate(page); + folio_mark_uptodate(folio); - flush_dcache_page(page); + flush_dcache_folio(folio); } -static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start, - loff_t end, struct page **pages, - int numpages, u64 phys, handle_t *handle) +static void ocfs2_zero_cluster_folios(struct inode *inode, loff_t start, + loff_t end, struct folio **folios, int numfolios, + u64 phys, handle_t *handle) { int i; - struct page *page; - unsigned int from, to = PAGE_SIZE; struct super_block *sb = inode->i_sb; BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb))); - if (numpages == 0) + if (numfolios == 0) goto out; - to = PAGE_SIZE; - for(i = 0; i < numpages; i++) { - page = pages[i]; - - from = start & (PAGE_SIZE - 1); - if ((end >> PAGE_SHIFT) == page->index) - to = end & (PAGE_SIZE - 1); + for (i = 0; i < numfolios; i++) { + struct folio *folio = folios[i]; + size_t to = folio_size(folio); + size_t from = offset_in_folio(folio, start); - BUG_ON(from > PAGE_SIZE); - BUG_ON(to > PAGE_SIZE); + if (to > end - folio_pos(folio)) + to = end - folio_pos(folio); - ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1, - &phys); + ocfs2_map_and_dirty_folio(inode, handle, from, to, folio, 1, + &phys); - start = (page->index + 1) << PAGE_SHIFT; + start = folio_next_index(folio) << PAGE_SHIFT; } out: - if (pages) - ocfs2_unlock_and_free_pages(pages, numpages); + if (folios) + ocfs2_unlock_and_free_folios(folios, numfolios); } -int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end, - struct page **pages, int *num) +static int ocfs2_grab_folios(struct inode *inode, loff_t start, loff_t end, + struct folio **folios, int *num) { - int numpages, ret = 0; + int numfolios, ret = 0; struct address_space *mapping = inode->i_mapping; unsigned long index; loff_t last_page_bytes; BUG_ON(start > end); - numpages = 0; + numfolios = 0; last_page_bytes = PAGE_ALIGN(end); index = start >> PAGE_SHIFT; do { - pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS); - if (!pages[numpages]) { - ret = -ENOMEM; + folios[numfolios] = __filemap_get_folio(mapping, index, + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS); + if (IS_ERR(folios[numfolios])) { + ret = PTR_ERR(folios[numfolios]); mlog_errno(ret); goto out; } - numpages++; - index++; + index = folio_next_index(folios[numfolios]); + numfolios++; } while (index < (last_page_bytes >> PAGE_SHIFT)); out: if (ret != 0) { - if (pages) - ocfs2_unlock_and_free_pages(pages, numpages); - numpages = 0; + if (folios) + ocfs2_unlock_and_free_folios(folios, numfolios); + numfolios = 0; } - *num = numpages; + *num = numfolios; return ret; } -static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end, - struct page **pages, int *num) +static int ocfs2_grab_eof_folios(struct inode *inode, loff_t start, loff_t end, + struct folio **folios, int *num) { struct super_block *sb = inode->i_sb; BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits != (end - 1) >> OCFS2_SB(sb)->s_clustersize_bits); - return ocfs2_grab_pages(inode, start, end, pages, num); + return ocfs2_grab_folios(inode, start, end, folios, num); } /* @@ -6940,8 +6951,8 @@ static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end, int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, u64 range_start, u64 range_end) { - int ret = 0, numpages; - struct page **pages = NULL; + int ret = 0, numfolios; + struct folio **folios = NULL; u64 phys; unsigned int ext_flags; struct super_block *sb = inode->i_sb; @@ -6954,17 +6965,17 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, return 0; /* - * Avoid zeroing pages fully beyond current i_size. It is pointless as - * underlying blocks of those pages should be already zeroed out and + * Avoid zeroing folios fully beyond current i_size. It is pointless as + * underlying blocks of those folios should be already zeroed out and * page writeback will skip them anyway. */ range_end = min_t(u64, range_end, i_size_read(inode)); if (range_start >= range_end) return 0; - pages = kcalloc(ocfs2_pages_per_cluster(sb), - sizeof(struct page *), GFP_NOFS); - if (pages == NULL) { + folios = kcalloc(ocfs2_pages_per_cluster(sb), + sizeof(struct folio *), GFP_NOFS); + if (folios == NULL) { ret = -ENOMEM; mlog_errno(ret); goto out; @@ -6985,18 +6996,18 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, if (phys == 0 || ext_flags & OCFS2_EXT_UNWRITTEN) goto out; - ret = ocfs2_grab_eof_pages(inode, range_start, range_end, pages, - &numpages); + ret = ocfs2_grab_eof_folios(inode, range_start, range_end, folios, + &numfolios); if (ret) { mlog_errno(ret); goto out; } - ocfs2_zero_cluster_pages(inode, range_start, range_end, pages, - numpages, phys, handle); + ocfs2_zero_cluster_folios(inode, range_start, range_end, folios, + numfolios, phys, handle); /* - * Initiate writeout of the pages we zero'd here. We don't + * Initiate writeout of the folios we zero'd here. We don't * wait on them - the truncate_inode_pages() call later will * do that for us. */ @@ -7006,7 +7017,7 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, mlog_errno(ret); out: - kfree(pages); + kfree(folios); return ret; } @@ -7059,7 +7070,7 @@ void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di) int ocfs2_convert_inline_data_to_extents(struct inode *inode, struct buffer_head *di_bh) { - int ret, has_data, num_pages = 0; + int ret, has_data, num_folios = 0; int need_free = 0; u32 bit_off, num; handle_t *handle; @@ -7068,7 +7079,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_alloc_context *data_ac = NULL; - struct page *page = NULL; + struct folio *folio = NULL; struct ocfs2_extent_tree et; int did_quota = 0; @@ -7119,12 +7130,12 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, /* * Save two copies, one for insert, and one that can - * be changed by ocfs2_map_and_dirty_page() below. + * be changed by ocfs2_map_and_dirty_folio() below. */ block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off); - ret = ocfs2_grab_eof_pages(inode, 0, page_end, &page, - &num_pages); + ret = ocfs2_grab_eof_folios(inode, 0, page_end, &folio, + &num_folios); if (ret) { mlog_errno(ret); need_free = 1; @@ -7135,15 +7146,15 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, * This should populate the 1st page for us and mark * it up to date. */ - ret = ocfs2_read_inline_data(inode, page, di_bh); + ret = ocfs2_read_inline_data(inode, folio, di_bh); if (ret) { mlog_errno(ret); need_free = 1; goto out_unlock; } - ocfs2_map_and_dirty_page(inode, handle, 0, page_end, page, 0, - &phys); + ocfs2_map_and_dirty_folio(inode, handle, 0, page_end, folio, 0, + &phys); } spin_lock(&oi->ip_lock); @@ -7174,8 +7185,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, } out_unlock: - if (page) - ocfs2_unlock_and_free_pages(&page, num_pages); + if (folio) + ocfs2_unlock_and_free_folios(&folio, num_folios); out_commit: if (ret < 0 && did_quota) diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h index 4af7abaa6e40..1c0c83362904 100644 --- a/fs/ocfs2/alloc.h +++ b/fs/ocfs2/alloc.h @@ -254,11 +254,9 @@ static inline int ocfs2_is_empty_extent(struct ocfs2_extent_rec *rec) return !rec->e_leaf_clusters; } -int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end, - struct page **pages, int *num); -void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, - unsigned int from, unsigned int to, - struct page *page, int zero, u64 *phys); +void ocfs2_map_and_dirty_folio(struct inode *inode, handle_t *handle, + size_t from, size_t to, struct folio *folio, int zero, + u64 *phys); /* * Structures which describe a path through a btree, and functions to * manipulate them. diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index db72b3e924b3..5bbeb6fbb1ac 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -215,10 +215,9 @@ bail: return err; } -int ocfs2_read_inline_data(struct inode *inode, struct page *page, +int ocfs2_read_inline_data(struct inode *inode, struct folio *folio, struct buffer_head *di_bh) { - void *kaddr; loff_t size; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; @@ -230,7 +229,7 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page, size = i_size_read(inode); - if (size > PAGE_SIZE || + if (size > folio_size(folio) || size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) { ocfs2_error(inode->i_sb, "Inode %llu has with inline data has bad size: %Lu\n", @@ -239,25 +238,18 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page, return -EROFS; } - kaddr = kmap_atomic(page); - if (size) - memcpy(kaddr, di->id2.i_data.id_data, size); - /* Clear the remaining part of the page */ - memset(kaddr + size, 0, PAGE_SIZE - size); - flush_dcache_page(page); - kunmap_atomic(kaddr); - - SetPageUptodate(page); + folio_fill_tail(folio, 0, di->id2.i_data.id_data, size); + folio_mark_uptodate(folio); return 0; } -static int ocfs2_readpage_inline(struct inode *inode, struct page *page) +static int ocfs2_readpage_inline(struct inode *inode, struct folio *folio) { int ret; struct buffer_head *di_bh = NULL; - BUG_ON(!PageLocked(page)); + BUG_ON(!folio_test_locked(folio)); BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)); ret = ocfs2_read_inode_block(inode, &di_bh); @@ -266,9 +258,9 @@ static int ocfs2_readpage_inline(struct inode *inode, struct page *page) goto out; } - ret = ocfs2_read_inline_data(inode, page, di_bh); + ret = ocfs2_read_inline_data(inode, folio, di_bh); out: - unlock_page(page); + folio_unlock(folio); brelse(di_bh); return ret; @@ -283,7 +275,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio) trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, folio->index); - ret = ocfs2_inode_lock_with_page(inode, NULL, 0, &folio->page); + ret = ocfs2_inode_lock_with_folio(inode, NULL, 0, folio); if (ret != 0) { if (ret == AOP_TRUNCATED_PAGE) unlock = 0; @@ -305,7 +297,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio) } /* - * i_size might have just been updated as we grabed the meta lock. We + * i_size might have just been updated as we grabbed the meta lock. We * might now be discovering a truncate that hit on another node. * block_read_full_folio->get_block freaks out if it is asked to read * beyond the end of a file, so we check here. Callers @@ -322,7 +314,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio) } if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) - ret = ocfs2_readpage_inline(inode, &folio->page); + ret = ocfs2_readpage_inline(inode, folio); else ret = block_read_full_folio(folio, ocfs2_get_block); unlock = 0; @@ -534,7 +526,7 @@ static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, * * from == to == 0 is code for "zero the entire cluster region" */ -static void ocfs2_clear_page_regions(struct page *page, +static void ocfs2_clear_folio_regions(struct folio *folio, struct ocfs2_super *osb, u32 cpos, unsigned from, unsigned to) { @@ -543,7 +535,7 @@ static void ocfs2_clear_page_regions(struct page *page, ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end); - kaddr = kmap_atomic(page); + kaddr = kmap_local_folio(folio, 0); if (from || to) { if (from > cluster_start) @@ -554,13 +546,13 @@ static void ocfs2_clear_page_regions(struct page *page, memset(kaddr + cluster_start, 0, cluster_end - cluster_start); } - kunmap_atomic(kaddr); + kunmap_local(kaddr); } /* * Nonsparse file systems fully allocate before we get to the write * code. This prevents ocfs2_write() from tagging the write as an - * allocating one, which means ocfs2_map_page_blocks() might try to + * allocating one, which means ocfs2_map_folio_blocks() might try to * read-in the blocks at the tail of our file. Avoid reading them by * testing i_size against each block offset. */ @@ -585,11 +577,10 @@ static int ocfs2_should_read_blk(struct inode *inode, struct folio *folio, * * This will also skip zeroing, which is handled externally. */ -int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, +int ocfs2_map_folio_blocks(struct folio *folio, u64 *p_blkno, struct inode *inode, unsigned int from, unsigned int to, int new) { - struct folio *folio = page_folio(page); int ret = 0; struct buffer_head *head, *bh, *wait[2], **wait_bh = wait; unsigned int block_end, block_start; @@ -729,24 +720,24 @@ struct ocfs2_write_ctxt { unsigned int w_large_pages; /* - * Pages involved in this write. + * Folios involved in this write. * - * w_target_page is the page being written to by the user. + * w_target_folio is the folio being written to by the user. * - * w_pages is an array of pages which always contains - * w_target_page, and in the case of an allocating write with + * w_folios is an array of folios which always contains + * w_target_folio, and in the case of an allocating write with * page_size < cluster size, it will contain zero'd and mapped - * pages adjacent to w_target_page which need to be written + * pages adjacent to w_target_folio which need to be written * out in so that future reads from that region will get * zero's. */ - unsigned int w_num_pages; - struct page *w_pages[OCFS2_MAX_CTXT_PAGES]; - struct page *w_target_page; + unsigned int w_num_folios; + struct folio *w_folios[OCFS2_MAX_CTXT_PAGES]; + struct folio *w_target_folio; /* * w_target_locked is used for page_mkwrite path indicating no unlocking - * against w_target_page in ocfs2_write_end_nolock. + * against w_target_folio in ocfs2_write_end_nolock. */ unsigned int w_target_locked:1; @@ -771,40 +762,40 @@ struct ocfs2_write_ctxt { unsigned int w_unwritten_count; }; -void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) +void ocfs2_unlock_and_free_folios(struct folio **folios, int num_folios) { int i; - for(i = 0; i < num_pages; i++) { - if (pages[i]) { - unlock_page(pages[i]); - mark_page_accessed(pages[i]); - put_page(pages[i]); - } + for(i = 0; i < num_folios; i++) { + if (!folios[i]) + continue; + folio_unlock(folios[i]); + folio_mark_accessed(folios[i]); + folio_put(folios[i]); } } -static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc) +static void ocfs2_unlock_folios(struct ocfs2_write_ctxt *wc) { int i; /* * w_target_locked is only set to true in the page_mkwrite() case. * The intent is to allow us to lock the target page from write_begin() - * to write_end(). The caller must hold a ref on w_target_page. + * to write_end(). The caller must hold a ref on w_target_folio. */ if (wc->w_target_locked) { - BUG_ON(!wc->w_target_page); - for (i = 0; i < wc->w_num_pages; i++) { - if (wc->w_target_page == wc->w_pages[i]) { - wc->w_pages[i] = NULL; + BUG_ON(!wc->w_target_folio); + for (i = 0; i < wc->w_num_folios; i++) { + if (wc->w_target_folio == wc->w_folios[i]) { + wc->w_folios[i] = NULL; break; } } - mark_page_accessed(wc->w_target_page); - put_page(wc->w_target_page); + folio_mark_accessed(wc->w_target_folio); + folio_put(wc->w_target_folio); } - ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); + ocfs2_unlock_and_free_folios(wc->w_folios, wc->w_num_folios); } static void ocfs2_free_unwritten_list(struct inode *inode, @@ -826,7 +817,7 @@ static void ocfs2_free_write_ctxt(struct inode *inode, struct ocfs2_write_ctxt *wc) { ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list); - ocfs2_unlock_pages(wc); + ocfs2_unlock_folios(wc); brelse(wc->w_di_bh); kfree(wc); } @@ -869,29 +860,30 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, * and dirty so they'll be written out (in order to prevent uninitialised * block data from leaking). And clear the new bit. */ -static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to) +static void ocfs2_zero_new_buffers(struct folio *folio, size_t from, size_t to) { unsigned int block_start, block_end; struct buffer_head *head, *bh; - BUG_ON(!PageLocked(page)); - if (!page_has_buffers(page)) + BUG_ON(!folio_test_locked(folio)); + head = folio_buffers(folio); + if (!head) return; - bh = head = page_buffers(page); + bh = head; block_start = 0; do { block_end = block_start + bh->b_size; if (buffer_new(bh)) { if (block_end > from && block_start < to) { - if (!PageUptodate(page)) { + if (!folio_test_uptodate(folio)) { unsigned start, end; start = max(from, block_start); end = min(to, block_end); - zero_user_segment(page, start, end); + folio_zero_segment(folio, start, end); set_buffer_uptodate(bh); } @@ -916,29 +908,26 @@ static void ocfs2_write_failure(struct inode *inode, int i; unsigned from = user_pos & (PAGE_SIZE - 1), to = user_pos + user_len; - struct page *tmppage; - if (wc->w_target_page) - ocfs2_zero_new_buffers(wc->w_target_page, from, to); + if (wc->w_target_folio) + ocfs2_zero_new_buffers(wc->w_target_folio, from, to); - for(i = 0; i < wc->w_num_pages; i++) { - tmppage = wc->w_pages[i]; + for (i = 0; i < wc->w_num_folios; i++) { + struct folio *folio = wc->w_folios[i]; - if (tmppage && page_has_buffers(tmppage)) { + if (folio && folio_buffers(folio)) { if (ocfs2_should_order_data(inode)) ocfs2_jbd2_inode_add_write(wc->w_handle, inode, user_pos, user_len); - block_commit_write(tmppage, from, to); + block_commit_write(&folio->page, from, to); } } } -static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, - struct ocfs2_write_ctxt *wc, - struct page *page, u32 cpos, - loff_t user_pos, unsigned user_len, - int new) +static int ocfs2_prepare_folio_for_write(struct inode *inode, u64 *p_blkno, + struct ocfs2_write_ctxt *wc, struct folio *folio, u32 cpos, + loff_t user_pos, unsigned user_len, int new) { int ret; unsigned int map_from = 0, map_to = 0; @@ -951,20 +940,19 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, /* treat the write as new if the a hole/lseek spanned across * the page boundary. */ - new = new | ((i_size_read(inode) <= page_offset(page)) && - (page_offset(page) <= user_pos)); + new = new | ((i_size_read(inode) <= folio_pos(folio)) && + (folio_pos(folio) <= user_pos)); - if (page == wc->w_target_page) { + if (folio == wc->w_target_folio) { map_from = user_pos & (PAGE_SIZE - 1); map_to = map_from + user_len; if (new) - ret = ocfs2_map_page_blocks(page, p_blkno, inode, - cluster_start, cluster_end, - new); + ret = ocfs2_map_folio_blocks(folio, p_blkno, inode, + cluster_start, cluster_end, new); else - ret = ocfs2_map_page_blocks(page, p_blkno, inode, - map_from, map_to, new); + ret = ocfs2_map_folio_blocks(folio, p_blkno, inode, + map_from, map_to, new); if (ret) { mlog_errno(ret); goto out; @@ -978,7 +966,7 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, } } else { /* - * If we haven't allocated the new page yet, we + * If we haven't allocated the new folio yet, we * shouldn't be writing it out without copying user * data. This is likely a math error from the caller. */ @@ -987,8 +975,8 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, map_from = cluster_start; map_to = cluster_end; - ret = ocfs2_map_page_blocks(page, p_blkno, inode, - cluster_start, cluster_end, new); + ret = ocfs2_map_folio_blocks(folio, p_blkno, inode, + cluster_start, cluster_end, new); if (ret) { mlog_errno(ret); goto out; @@ -996,20 +984,20 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, } /* - * Parts of newly allocated pages need to be zero'd. + * Parts of newly allocated folios need to be zero'd. * * Above, we have also rewritten 'to' and 'from' - as far as * the rest of the function is concerned, the entire cluster - * range inside of a page needs to be written. + * range inside of a folio needs to be written. * - * We can skip this if the page is up to date - it's already + * We can skip this if the folio is uptodate - it's already * been zero'd from being read in as a hole. */ - if (new && !PageUptodate(page)) - ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb), + if (new && !folio_test_uptodate(folio)) + ocfs2_clear_folio_regions(folio, OCFS2_SB(inode->i_sb), cpos, user_data_from, user_data_to); - flush_dcache_page(page); + flush_dcache_folio(folio); out: return ret; @@ -1018,11 +1006,9 @@ out: /* * This function will only grab one clusters worth of pages. */ -static int ocfs2_grab_pages_for_write(struct address_space *mapping, - struct ocfs2_write_ctxt *wc, - u32 cpos, loff_t user_pos, - unsigned user_len, int new, - struct page *mmap_page) +static int ocfs2_grab_folios_for_write(struct address_space *mapping, + struct ocfs2_write_ctxt *wc, u32 cpos, loff_t user_pos, + unsigned user_len, int new, struct folio *mmap_folio) { int ret = 0, i; unsigned long start, target_index, end_index, index; @@ -1039,7 +1025,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, * last page of the write. */ if (new) { - wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb); + wc->w_num_folios = ocfs2_pages_per_cluster(inode->i_sb); start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos); /* * We need the index *past* the last page we could possibly @@ -1049,15 +1035,15 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, last_byte = max(user_pos + user_len, i_size_read(inode)); BUG_ON(last_byte < 1); end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1; - if ((start + wc->w_num_pages) > end_index) - wc->w_num_pages = end_index - start; + if ((start + wc->w_num_folios) > end_index) + wc->w_num_folios = end_index - start; } else { - wc->w_num_pages = 1; + wc->w_num_folios = 1; start = target_index; } end_index = (user_pos + user_len - 1) >> PAGE_SHIFT; - for(i = 0; i < wc->w_num_pages; i++) { + for(i = 0; i < wc->w_num_folios; i++) { index = start + i; if (index >= target_index && index <= end_index && @@ -1067,37 +1053,38 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, * and wants us to directly use the page * passed in. */ - lock_page(mmap_page); + folio_lock(mmap_folio); /* Exit and let the caller retry */ - if (mmap_page->mapping != mapping) { - WARN_ON(mmap_page->mapping); - unlock_page(mmap_page); + if (mmap_folio->mapping != mapping) { + WARN_ON(mmap_folio->mapping); + folio_unlock(mmap_folio); ret = -EAGAIN; goto out; } - get_page(mmap_page); - wc->w_pages[i] = mmap_page; + folio_get(mmap_folio); + wc->w_folios[i] = mmap_folio; wc->w_target_locked = true; } else if (index >= target_index && index <= end_index && wc->w_type == OCFS2_WRITE_DIRECT) { /* Direct write has no mapping page. */ - wc->w_pages[i] = NULL; + wc->w_folios[i] = NULL; continue; } else { - wc->w_pages[i] = find_or_create_page(mapping, index, - GFP_NOFS); - if (!wc->w_pages[i]) { - ret = -ENOMEM; + wc->w_folios[i] = __filemap_get_folio(mapping, index, + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, + GFP_NOFS); + if (IS_ERR(wc->w_folios[i])) { + ret = PTR_ERR(wc->w_folios[i]); mlog_errno(ret); goto out; } } - wait_for_stable_page(wc->w_pages[i]); + folio_wait_stable(wc->w_folios[i]); if (index == target_index) - wc->w_target_page = wc->w_pages[i]; + wc->w_target_folio = wc->w_folios[i]; } out: if (ret) @@ -1181,19 +1168,18 @@ static int ocfs2_write_cluster(struct address_space *mapping, if (!should_zero) p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1); - for(i = 0; i < wc->w_num_pages; i++) { + for (i = 0; i < wc->w_num_folios; i++) { int tmpret; /* This is the direct io target page. */ - if (wc->w_pages[i] == NULL) { + if (wc->w_folios[i] == NULL) { p_blkno += (1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits)); continue; } - tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc, - wc->w_pages[i], cpos, - user_pos, user_len, - should_zero); + tmpret = ocfs2_prepare_folio_for_write(inode, &p_blkno, wc, + wc->w_folios[i], cpos, user_pos, user_len, + should_zero); if (tmpret) { mlog_errno(tmpret); if (ret == 0) @@ -1472,7 +1458,7 @@ static int ocfs2_write_begin_inline(struct address_space *mapping, { int ret; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - struct page *page; + struct folio *folio; handle_t *handle; struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; @@ -1483,19 +1469,21 @@ static int ocfs2_write_begin_inline(struct address_space *mapping, goto out; } - page = find_or_create_page(mapping, 0, GFP_NOFS); - if (!page) { + folio = __filemap_get_folio(mapping, 0, + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS); + if (IS_ERR(folio)) { ocfs2_commit_trans(osb, handle); - ret = -ENOMEM; + ret = PTR_ERR(folio); mlog_errno(ret); goto out; } /* - * If we don't set w_num_pages then this page won't get unlocked + * If we don't set w_num_folios then this folio won't get unlocked * and freed on cleanup of the write context. */ - wc->w_pages[0] = wc->w_target_page = page; - wc->w_num_pages = 1; + wc->w_target_folio = folio; + wc->w_folios[0] = folio; + wc->w_num_folios = 1; ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); @@ -1509,8 +1497,8 @@ static int ocfs2_write_begin_inline(struct address_space *mapping, if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) ocfs2_set_inode_data_inline(inode, di); - if (!PageUptodate(page)) { - ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh); + if (!folio_test_uptodate(folio)) { + ret = ocfs2_read_inline_data(inode, folio, wc->w_di_bh); if (ret) { ocfs2_commit_trans(osb, handle); @@ -1533,9 +1521,8 @@ int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size) } static int ocfs2_try_to_write_inline_data(struct address_space *mapping, - struct inode *inode, loff_t pos, - unsigned len, struct page *mmap_page, - struct ocfs2_write_ctxt *wc) + struct inode *inode, loff_t pos, size_t len, + struct folio *mmap_folio, struct ocfs2_write_ctxt *wc) { int ret, written = 0; loff_t end = pos + len; @@ -1550,7 +1537,7 @@ static int ocfs2_try_to_write_inline_data(struct address_space *mapping, * Handle inodes which already have inline data 1st. */ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { - if (mmap_page == NULL && + if (mmap_folio == NULL && ocfs2_size_fits_inline_data(wc->w_di_bh, end)) goto do_inline_write; @@ -1574,7 +1561,7 @@ static int ocfs2_try_to_write_inline_data(struct address_space *mapping, * Check whether the write can fit. */ di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; - if (mmap_page || + if (mmap_folio || end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) return 0; @@ -1641,9 +1628,9 @@ static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh, } int ocfs2_write_begin_nolock(struct address_space *mapping, - loff_t pos, unsigned len, ocfs2_write_type_t type, - struct folio **foliop, void **fsdata, - struct buffer_head *di_bh, struct page *mmap_page) + loff_t pos, unsigned len, ocfs2_write_type_t type, + struct folio **foliop, void **fsdata, + struct buffer_head *di_bh, struct folio *mmap_folio) { int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS; unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0; @@ -1666,7 +1653,7 @@ try_again: if (ocfs2_supports_inline_data(osb)) { ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len, - mmap_page, wc); + mmap_folio, wc); if (ret == 1) { ret = 0; goto success; @@ -1718,7 +1705,7 @@ try_again: (unsigned long long)OCFS2_I(inode)->ip_blkno, (long long)i_size_read(inode), le32_to_cpu(di->i_clusters), - pos, len, type, mmap_page, + pos, len, type, mmap_folio, clusters_to_alloc, extents_to_split); /* @@ -1789,21 +1776,21 @@ try_again: } /* - * Fill our page array first. That way we've grabbed enough so + * Fill our folio array first. That way we've grabbed enough so * that we can zero and flush if we error after adding the * extent. */ - ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len, - cluster_of_pages, mmap_page); + ret = ocfs2_grab_folios_for_write(mapping, wc, wc->w_cpos, pos, len, + cluster_of_pages, mmap_folio); if (ret) { /* - * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock - * the target page. In this case, we exit with no error and no target - * page. This will trigger the caller, page_mkwrite(), to re-try - * the operation. + * ocfs2_grab_folios_for_write() returns -EAGAIN if it + * could not lock the target folio. In this case, we exit + * with no error and no target folio. This will trigger + * the caller, page_mkwrite(), to re-try the operation. */ if (type == OCFS2_WRITE_MMAP && ret == -EAGAIN) { - BUG_ON(wc->w_target_page); + BUG_ON(wc->w_target_folio); ret = 0; goto out_quota; } @@ -1826,7 +1813,7 @@ try_again: success: if (foliop) - *foliop = page_folio(wc->w_target_page); + *foliop = wc->w_target_folio; *fsdata = wc; return 0; out_quota: @@ -1845,7 +1832,7 @@ out: * to VM code. */ if (wc->w_target_locked) - unlock_page(mmap_page); + folio_unlock(mmap_folio); ocfs2_free_write_ctxt(inode, wc); @@ -1924,18 +1911,15 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos, struct ocfs2_dinode *di, struct ocfs2_write_ctxt *wc) { - void *kaddr; - if (unlikely(*copied < len)) { - if (!PageUptodate(wc->w_target_page)) { + if (!folio_test_uptodate(wc->w_target_folio)) { *copied = 0; return; } } - kaddr = kmap_atomic(wc->w_target_page); - memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied); - kunmap_atomic(kaddr); + memcpy_from_folio(di->id2.i_data.id_data + pos, wc->w_target_folio, + pos, *copied); trace_ocfs2_write_end_inline( (unsigned long long)OCFS2_I(inode)->ip_blkno, @@ -1944,17 +1928,16 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos, le16_to_cpu(di->i_dyn_features)); } -int ocfs2_write_end_nolock(struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, void *fsdata) +int ocfs2_write_end_nolock(struct address_space *mapping, loff_t pos, + unsigned len, unsigned copied, void *fsdata) { int i, ret; - unsigned from, to, start = pos & (PAGE_SIZE - 1); + size_t from, to, start = pos & (PAGE_SIZE - 1); struct inode *inode = mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_write_ctxt *wc = fsdata; struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; handle_t *handle = wc->w_handle; - struct page *tmppage; BUG_ON(!list_empty(&wc->w_unwritten_list)); @@ -1973,44 +1956,44 @@ int ocfs2_write_end_nolock(struct address_space *mapping, goto out_write_size; } - if (unlikely(copied < len) && wc->w_target_page) { + if (unlikely(copied < len) && wc->w_target_folio) { loff_t new_isize; - if (!PageUptodate(wc->w_target_page)) + if (!folio_test_uptodate(wc->w_target_folio)) copied = 0; new_isize = max_t(loff_t, i_size_read(inode), pos + copied); - if (new_isize > page_offset(wc->w_target_page)) - ocfs2_zero_new_buffers(wc->w_target_page, start+copied, + if (new_isize > folio_pos(wc->w_target_folio)) + ocfs2_zero_new_buffers(wc->w_target_folio, start+copied, start+len); else { /* - * When page is fully beyond new isize (data copy - * failed), do not bother zeroing the page. Invalidate + * When folio is fully beyond new isize (data copy + * failed), do not bother zeroing the folio. Invalidate * it instead so that writeback does not get confused * put page & buffer dirty bits into inconsistent * state. */ - block_invalidate_folio(page_folio(wc->w_target_page), - 0, PAGE_SIZE); + block_invalidate_folio(wc->w_target_folio, 0, + folio_size(wc->w_target_folio)); } } - if (wc->w_target_page) - flush_dcache_page(wc->w_target_page); + if (wc->w_target_folio) + flush_dcache_folio(wc->w_target_folio); - for(i = 0; i < wc->w_num_pages; i++) { - tmppage = wc->w_pages[i]; + for (i = 0; i < wc->w_num_folios; i++) { + struct folio *folio = wc->w_folios[i]; - /* This is the direct io target page. */ - if (tmppage == NULL) + /* This is the direct io target folio */ + if (folio == NULL) continue; - if (tmppage == wc->w_target_page) { + if (folio == wc->w_target_folio) { from = wc->w_target_from; to = wc->w_target_to; - BUG_ON(from > PAGE_SIZE || - to > PAGE_SIZE || + BUG_ON(from > folio_size(folio) || + to > folio_size(folio) || to < from); } else { /* @@ -2019,19 +2002,17 @@ int ocfs2_write_end_nolock(struct address_space *mapping, * to flush their entire range. */ from = 0; - to = PAGE_SIZE; + to = folio_size(folio); } - if (page_has_buffers(tmppage)) { + if (folio_buffers(folio)) { if (handle && ocfs2_should_order_data(inode)) { - loff_t start_byte = - ((loff_t)tmppage->index << PAGE_SHIFT) + - from; + loff_t start_byte = folio_pos(folio) + from; loff_t length = to - from; ocfs2_jbd2_inode_add_write(handle, inode, start_byte, length); } - block_commit_write(tmppage, from, to); + block_commit_write(&folio->page, from, to); } } @@ -2060,7 +2041,7 @@ out: * this lock and will ask for the page lock when flushing the data. * put it here to preserve the unlock order. */ - ocfs2_unlock_pages(wc); + ocfs2_unlock_folios(wc); if (handle) ocfs2_commit_trans(osb, handle); diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h index 1d1b4b7edba0..114efc9111e4 100644 --- a/fs/ocfs2/aops.h +++ b/fs/ocfs2/aops.h @@ -8,16 +8,11 @@ #include <linux/fs.h> -handle_t *ocfs2_start_walk_page_trans(struct inode *inode, - struct page *page, - unsigned from, - unsigned to); - -int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, +int ocfs2_map_folio_blocks(struct folio *folio, u64 *p_blkno, struct inode *inode, unsigned int from, unsigned int to, int new); -void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages); +void ocfs2_unlock_and_free_folios(struct folio **folios, int num_folios); int walk_page_buffers( handle_t *handle, struct buffer_head *head, @@ -37,11 +32,11 @@ typedef enum { } ocfs2_write_type_t; int ocfs2_write_begin_nolock(struct address_space *mapping, - loff_t pos, unsigned len, ocfs2_write_type_t type, - struct folio **foliop, void **fsdata, - struct buffer_head *di_bh, struct page *mmap_page); + loff_t pos, unsigned len, ocfs2_write_type_t type, + struct folio **foliop, void **fsdata, + struct buffer_head *di_bh, struct folio *mmap_folio); -int ocfs2_read_inline_data(struct inode *inode, struct page *page, +int ocfs2_read_inline_data(struct inode *inode, struct folio *folio, struct buffer_head *di_bh); int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size); diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 4200a0341343..724350925aff 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -3,6 +3,7 @@ * Copyright (C) 2004, 2005 Oracle. All rights reserved. */ +#include "linux/kstrtox.h" #include <linux/kernel.h> #include <linux/sched.h> #include <linux/jiffies.h> @@ -1020,7 +1021,7 @@ fire_callbacks: if (list_empty(&slot->ds_live_item)) goto out; - /* live nodes only go dead after enough consequtive missed + /* live nodes only go dead after enough consecutive missed * samples.. reset the missed counter whenever we see * activity */ if (slot->ds_equal_samples >= o2hb_dead_threshold || gen_changed) { @@ -1535,10 +1536,11 @@ static int o2hb_read_block_input(struct o2hb_region *reg, { unsigned long bytes; char *p = (char *)page; + int ret; - bytes = simple_strtoul(p, &p, 0); - if (!p || (*p && (*p != '\n'))) - return -EINVAL; + ret = kstrtoul(p, 0, &bytes); + if (ret) + return ret; /* Heartbeat and fs min / max block sizes are the same. */ if (bytes > 4096 || bytes < 512) @@ -1622,13 +1624,14 @@ static ssize_t o2hb_region_blocks_store(struct config_item *item, struct o2hb_region *reg = to_o2hb_region(item); unsigned long tmp; char *p = (char *)page; + int ret; if (reg->hr_bdev_file) return -EINVAL; - tmp = simple_strtoul(p, &p, 0); - if (!p || (*p && (*p != '\n'))) - return -EINVAL; + ret = kstrtoul(p, 0, &tmp); + if (ret) + return ret; if (tmp > O2NM_MAX_NODES || tmp == 0) return -ERANGE; @@ -1776,8 +1779,8 @@ static ssize_t o2hb_region_dev_store(struct config_item *item, if (o2nm_this_node() == O2NM_MAX_NODES) return -EINVAL; - fd = simple_strtol(p, &p, 0); - if (!p || (*p && (*p != '\n'))) + ret = kstrtol(p, 0, &fd); + if (ret < 0) return -EINVAL; if (fd < 0 || fd >= INT_MAX) @@ -2136,10 +2139,11 @@ static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *ite { unsigned long tmp; char *p = (char *)page; + int ret; - tmp = simple_strtoul(p, &p, 10); - if (!p || (*p && (*p != '\n'))) - return -EINVAL; + ret = kstrtoul(p, 10, &tmp); + if (ret) + return ret; /* this will validate ranges for us. */ o2hb_dead_threshold_set((unsigned int) tmp); diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h index b73fc42e46ff..630bd5a3dd0d 100644 --- a/fs/ocfs2/cluster/masklog.h +++ b/fs/ocfs2/cluster/masklog.h @@ -29,7 +29,7 @@ * just calling printk() so that this can eventually make its way through * relayfs along with the debugging messages. Everything else gets KERN_DEBUG. * The inline tests and macro dance give GCC the opportunity to quite cleverly - * only emit the appropriage printk() when the caller passes in a constant + * only emit the appropriate printk() when the caller passes in a constant * mask, as is almost always the case. * * All this bitmask nonsense is managed from the files under diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c index 8bf17231d7b7..bfb8b456876c 100644 --- a/fs/ocfs2/cluster/quorum.c +++ b/fs/ocfs2/cluster/quorum.c @@ -23,7 +23,7 @@ * race between when we see a node start heartbeating and when we connect * to it. * - * So nodes that are in this transtion put a hold on the quorum decision + * So nodes that are in this transition put a hold on the quorum decision * with a counter. As they fall out of this transition they drop the count * and if they're the last, they fire off the decision. */ @@ -189,7 +189,7 @@ static void o2quo_clear_hold(struct o2quo_state *qs, u8 node) } /* as a node comes up we delay the quorum decision until we know the fate of - * the connection. the hold will be droped in conn_up or hb_down. it might be + * the connection. the hold will be dropped in conn_up or hb_down. it might be * perpetuated by con_err until hb_down. if we already have a conn, we might * be dropping a hold that conn_up got. */ void o2quo_hb_up(u8 node) @@ -256,7 +256,7 @@ void o2quo_hb_still_up(u8 node) } /* This is analogous to hb_up. as a node's connection comes up we delay the - * quorum decision until we see it heartbeating. the hold will be droped in + * quorum decision until we see it heartbeating. the hold will be dropped in * hb_up or hb_down. it might be perpetuated by con_err until hb_down. if * it's already heartbeating we might be dropping a hold that conn_up got. * */ diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 2b8fa3e782fb..0f46b22561d6 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -5,13 +5,13 @@ * * ---- * - * Callers for this were originally written against a very simple synchronus + * Callers for this were originally written against a very simple synchronous * API. This implementation reflects those simple callers. Some day I'm sure * we'll need to move to a more robust posting/callback mechanism. * * Transmit calls pass in kernel virtual addresses and block copying this into * the socket's tx buffers via a usual blocking sendmsg. They'll block waiting - * for a failed socket to timeout. TX callers can also pass in a poniter to an + * for a failed socket to timeout. TX callers can also pass in a pointer to an * 'int' which gets filled with an errno off the wire in response to the * message they send. * @@ -101,7 +101,7 @@ static struct socket *o2net_listen_sock; * o2net_wq. teardown detaches the callbacks before destroying the workqueue. * quorum work is queued as sock containers are shutdown.. stop_listening * tears down all the node's sock containers, preventing future shutdowns - * and queued quroum work, before canceling delayed quorum work and + * and queued quorum work, before canceling delayed quorum work and * destroying the work queue. */ static struct workqueue_struct *o2net_wq; @@ -1419,7 +1419,7 @@ out: return ret; } -/* this work func is triggerd by data ready. it reads until it can read no +/* this work func is triggered by data ready. it reads until it can read no * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing * our work the work struct will be marked and we'll be called again. */ static void o2net_rx_until_empty(struct work_struct *work) diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c index a9b8688aaf30..1873bbbb7e5b 100644 --- a/fs/ocfs2/dcache.c +++ b/fs/ocfs2/dcache.c @@ -32,7 +32,8 @@ void ocfs2_dentry_attach_gen(struct dentry *dentry) } -static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags) +static int ocfs2_dentry_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { struct inode *inode; int ret = 0; /* if all else fails, just return false */ @@ -44,8 +45,7 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags) inode = d_inode(dentry); osb = OCFS2_SB(dentry->d_sb); - trace_ocfs2_dentry_revalidate(dentry, dentry->d_name.len, - dentry->d_name.name); + trace_ocfs2_dentry_revalidate(dentry, name->len, name->name); /* For a negative dentry - * check the generation number of the parent and compare with the @@ -53,12 +53,8 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags) */ if (inode == NULL) { unsigned long gen = (unsigned long) dentry->d_fsdata; - unsigned long pgen; - spin_lock(&dentry->d_lock); - pgen = OCFS2_I(d_inode(dentry->d_parent))->ip_dir_lock_gen; - spin_unlock(&dentry->d_lock); - trace_ocfs2_dentry_revalidate_negative(dentry->d_name.len, - dentry->d_name.name, + unsigned long pgen = OCFS2_I(dir)->ip_dir_lock_gen; + trace_ocfs2_dentry_revalidate_negative(name->len, name->name, pgen, gen); if (gen != pgen) goto bail; diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 213206ebdd58..7799f4d16ce9 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -1065,26 +1065,39 @@ int ocfs2_find_entry(const char *name, int namelen, { struct buffer_head *bh; struct ocfs2_dir_entry *res_dir = NULL; + int ret = 0; if (ocfs2_dir_indexed(dir)) return ocfs2_find_entry_dx(name, namelen, dir, lookup); + if (unlikely(i_size_read(dir) <= 0)) { + ret = -EFSCORRUPTED; + mlog_errno(ret); + goto out; + } /* * The unindexed dir code only uses part of the lookup * structure, so there's no reason to push it down further * than this. */ - if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) + if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { + if (unlikely(i_size_read(dir) > dir->i_sb->s_blocksize)) { + ret = -EFSCORRUPTED; + mlog_errno(ret); + goto out; + } bh = ocfs2_find_entry_id(name, namelen, dir, &res_dir); - else + } else { bh = ocfs2_find_entry_el(name, namelen, dir, &res_dir); + } if (bh == NULL) return -ENOENT; lookup->dl_leaf_bh = bh; lookup->dl_entry = res_dir; - return 0; +out: + return ret; } /* @@ -2010,6 +2023,7 @@ int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name, * * Return 0 if the name does not exist * Return -EEXIST if the directory contains the name + * Return -EFSCORRUPTED if found corruption * * Callers should have i_rwsem + a cluster lock on dir */ @@ -2023,9 +2037,12 @@ int ocfs2_check_dir_for_entry(struct inode *dir, trace_ocfs2_check_dir_for_entry( (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name); - if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) { + ret = ocfs2_find_entry(name, namelen, dir, &lookup); + if (ret == 0) { ret = -EEXIST; mlog_errno(ret); + } else if (ret == -ENOENT) { + ret = 0; } ocfs2_free_dir_lookup_result(&lookup); diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h index 847a52dcbe7d..1969db8ffa9c 100644 --- a/fs/ocfs2/dlm/dlmapi.h +++ b/fs/ocfs2/dlm/dlmapi.h @@ -118,7 +118,7 @@ struct dlm_lockstatus { #define LKM_VALBLK 0x00000100 /* lock value block request */ #define LKM_NOQUEUE 0x00000200 /* non blocking request */ #define LKM_CONVERT 0x00000400 /* conversion request */ -#define LKM_NODLCKWT 0x00000800 /* this lock wont deadlock (U) */ +#define LKM_NODLCKWT 0x00000800 /* this lock won't deadlock (U) */ #define LKM_UNLOCK 0x00001000 /* deallocate this lock */ #define LKM_CANCEL 0x00002000 /* cancel conversion request */ #define LKM_DEQALL 0x00004000 /* remove all locks held by proc (U) */ diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index e9ef4e2b0e75..fe4fdd09bae3 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c @@ -14,6 +14,7 @@ #include <linux/spinlock.h> #include <linux/debugfs.h> #include <linux/export.h> +#include <linux/string_choices.h> #include "../cluster/heartbeat.h" #include "../cluster/nodemanager.h" @@ -90,12 +91,12 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res) buf, res->owner, res->state); printk(" last used: %lu, refcnt: %u, on purge list: %s\n", res->last_used, kref_read(&res->refs), - list_empty(&res->purge) ? "no" : "yes"); + str_no_yes(list_empty(&res->purge))); printk(" on dirty list: %s, on reco list: %s, " "migrating pending: %s\n", - list_empty(&res->dirty) ? "no" : "yes", - list_empty(&res->recovering) ? "no" : "yes", - res->migration_pending ? "yes" : "no"); + str_no_yes(list_empty(&res->dirty)), + str_no_yes(list_empty(&res->recovering)), + str_yes_no(res->migration_pending)); printk(" inflight locks: %d, asts reserved: %d\n", res->inflight_locks, atomic_read(&res->asts_reserved)); dlm_print_lockres_refmap(res); diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index d610da8e2f24..86bb1a03bcc1 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -21,7 +21,7 @@ #include <linux/inet.h> #include <linux/spinlock.h> #include <linux/delay.h> - +#include <linux/string_choices.h> #include "../cluster/heartbeat.h" #include "../cluster/nodemanager.h" @@ -2859,7 +2859,7 @@ static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, dlm_lockres_release_ast(dlm, res); mlog(0, "about to wait on migration_wq, dirty=%s\n", - res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); + str_yes_no(res->state & DLM_LOCK_RES_DIRTY)); /* if the extra ref we just put was the final one, this * will pass thru immediately. otherwise, we need to wait * for the last ast to finish. */ @@ -2869,12 +2869,12 @@ again: msecs_to_jiffies(1000)); if (ret < 0) { mlog(0, "woken again: migrating? %s, dead? %s\n", - res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", - test_bit(target, dlm->domain_map) ? "no":"yes"); + str_yes_no(res->state & DLM_LOCK_RES_MIGRATING), + str_no_yes(test_bit(target, dlm->domain_map))); } else { mlog(0, "all is well: migrating? %s, dead? %s\n", - res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", - test_bit(target, dlm->domain_map) ? "no":"yes"); + str_yes_no(res->state & DLM_LOCK_RES_MIGRATING), + str_no_yes(test_bit(target, dlm->domain_map))); } if (!dlm_migration_can_proceed(dlm, res, target)) { mlog(0, "trying again...\n"); diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 50da8af988c1..67fc62a49a76 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -22,7 +22,7 @@ #include <linux/timer.h> #include <linux/kthread.h> #include <linux/delay.h> - +#include <linux/string_choices.h> #include "../cluster/heartbeat.h" #include "../cluster/nodemanager.h" @@ -207,7 +207,7 @@ void dlm_complete_recovery_thread(struct dlm_ctxt *dlm) * 1) all recovery threads cluster wide will work on recovering * ONE node at a time * 2) negotiate who will take over all the locks for the dead node. - * thats right... ALL the locks. + * that's right... ALL the locks. * 3) once a new master is chosen, everyone scans all locks * and moves aside those mastered by the dead guy * 4) each of these locks should be locked until recovery is done @@ -581,8 +581,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) msecs_to_jiffies(1000)); mlog(0, "waited 1 sec for %u, " "dead? %s\n", ndata->node_num, - dlm_is_node_dead(dlm, ndata->node_num) ? - "yes" : "no"); + str_yes_no(dlm_is_node_dead(dlm, ndata->node_num))); } else { /* -ENOMEM on the other node */ mlog(0, "%s: node %u returned " @@ -677,7 +676,7 @@ static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) spin_unlock(&dlm_reco_state_lock); mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass, - all_nodes_done?"yes":"no"); + str_yes_no(all_nodes_done)); if (all_nodes_done) { int ret; @@ -1469,7 +1468,7 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, * The first one is handled at the end of this function. The * other two are handled in the worker thread after locks have * been attached. Yes, we don't wait for purge time to match - * kref_init. The lockres will still have atleast one ref + * kref_init. The lockres will still have at least one ref * added because it is in the hash __dlm_insert_lockres() */ extra_refs++; @@ -1735,7 +1734,7 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, spin_unlock(&res->spinlock); } } else { - /* put.. incase we are not the master */ + /* put.. in case we are not the master */ spin_unlock(&res->spinlock); dlm_lockres_put(res); } diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index 7fc0e920eda7..2a7f36643895 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -20,6 +20,7 @@ #include <linux/module.h> #include <linux/fs.h> +#include <linux/fs_context.h> #include <linux/pagemap.h> #include <linux/types.h> #include <linux/slab.h> @@ -506,9 +507,7 @@ bail: return status; } -static int dlmfs_fill_super(struct super_block * sb, - void * data, - int silent) +static int dlmfs_fill_super(struct super_block *sb, struct fs_context *fc) { sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = PAGE_SIZE; @@ -556,17 +555,27 @@ static const struct inode_operations dlmfs_file_inode_operations = { .setattr = dlmfs_file_setattr, }; -static struct dentry *dlmfs_mount(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data) +static int dlmfs_get_tree(struct fs_context *fc) { - return mount_nodev(fs_type, flags, data, dlmfs_fill_super); + return get_tree_nodev(fc, dlmfs_fill_super); +} + +static const struct fs_context_operations dlmfs_context_ops = { + .get_tree = dlmfs_get_tree, +}; + +static int dlmfs_init_fs_context(struct fs_context *fc) +{ + fc->ops = &dlmfs_context_ops; + + return 0; } static struct file_system_type dlmfs_fs_type = { .owner = THIS_MODULE, .name = "ocfs2_dlmfs", - .mount = dlmfs_mount, .kill_sb = kill_litter_super, + .init_fs_context = dlmfs_init_fs_context, }; MODULE_ALIAS_FS("ocfs2_dlmfs"); diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 764ecbd5ad41..92a6149da9c1 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -19,6 +19,7 @@ #include <linux/delay.h> #include <linux/quotaops.h> #include <linux/sched/signal.h> +#include <linux/string_choices.h> #define MLOG_MASK_PREFIX ML_DLM_GLUE #include <cluster/masklog.h> @@ -794,7 +795,7 @@ void ocfs2_lock_res_free(struct ocfs2_lock_res *res) /* * Keep a list of processes who have interest in a lockres. - * Note: this is now only uesed for check recursive cluster locking. + * Note: this is now only used for check recursive cluster locking. */ static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres, struct ocfs2_lock_holder *oh) @@ -2529,30 +2530,28 @@ bail: /* * This is working around a lock inversion between tasks acquiring DLM - * locks while holding a page lock and the downconvert thread which - * blocks dlm lock acquiry while acquiring page locks. + * locks while holding a folio lock and the downconvert thread which + * blocks dlm lock acquiry while acquiring folio locks. * - * ** These _with_page variantes are only intended to be called from aop - * methods that hold page locks and return a very specific *positive* error + * ** These _with_folio variants are only intended to be called from aop + * methods that hold folio locks and return a very specific *positive* error * code that aop methods pass up to the VFS -- test for errors with != 0. ** * * The DLM is called such that it returns -EAGAIN if it would have * blocked waiting for the downconvert thread. In that case we unlock - * our page so the downconvert thread can make progress. Once we've + * our folio so the downconvert thread can make progress. Once we've * done this we have to return AOP_TRUNCATED_PAGE so the aop method * that called us can bubble that back up into the VFS who will then * immediately retry the aop call. */ -int ocfs2_inode_lock_with_page(struct inode *inode, - struct buffer_head **ret_bh, - int ex, - struct page *page) +int ocfs2_inode_lock_with_folio(struct inode *inode, + struct buffer_head **ret_bh, int ex, struct folio *folio) { int ret; ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK); if (ret == -EAGAIN) { - unlock_page(page); + folio_unlock(folio); /* * If we can't get inode lock immediately, we should not return * directly here, since this will lead to a softlockup problem. @@ -2630,7 +2629,7 @@ void ocfs2_inode_unlock(struct inode *inode, } /* - * This _tracker variantes are introduced to deal with the recursive cluster + * This _tracker variants are introduced to deal with the recursive cluster * locking issue. The idea is to keep track of a lock holder on the stack of * the current process. If there's a lock holder on the stack, we know the * task context is already protected by cluster locking. Currently, they're @@ -2735,7 +2734,7 @@ void ocfs2_inode_unlock_tracker(struct inode *inode, struct ocfs2_lock_res *lockres; lockres = &OCFS2_I(inode)->ip_inode_lockres; - /* had_lock means that the currect process already takes the cluster + /* had_lock means that the current process already takes the cluster * lock previously. * If had_lock is 1, we have nothing to do here. * If had_lock is 0, we will release the lock. @@ -3802,9 +3801,9 @@ recheck: * set when the ast is received for an upconvert just before the * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast * on the heels of the ast, we want to delay the downconvert just - * enough to allow the up requestor to do its task. Because this + * enough to allow the up requester to do its task. Because this * lock is in the blocked queue, the lock will be downconverted - * as soon as the requestor is done with the lock. + * as soon as the requester is done with the lock. */ if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) goto leave_requeue; @@ -4339,7 +4338,7 @@ unqueue: ocfs2_schedule_blocked_lock(osb, lockres); mlog(ML_BASTS, "lockres %s, requeue = %s.\n", lockres->l_name, - ctl.requeue ? "yes" : "no"); + str_yes_no(ctl.requeue)); spin_unlock_irqrestore(&lockres->l_lock, flags); if (ctl.unblock_action != UNBLOCK_CONTINUE diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h index e5da5809ed95..a3ebd7303ea2 100644 --- a/fs/ocfs2/dlmglue.h +++ b/fs/ocfs2/dlmglue.h @@ -137,10 +137,8 @@ int ocfs2_inode_lock_full_nested(struct inode *inode, int ex, int arg_flags, int subclass); -int ocfs2_inode_lock_with_page(struct inode *inode, - struct buffer_head **ret_bh, - int ex, - struct page *page); +int ocfs2_inode_lock_with_folio(struct inode *inode, + struct buffer_head **ret_bh, int ex, struct folio *folio); /* Variants without special locking class or flags */ #define ocfs2_inode_lock_full(i, r, e, f)\ ocfs2_inode_lock_full_nested(i, r, e, f, OI_LS_NORMAL) diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index f7672472fa82..930150ed5db1 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c @@ -435,6 +435,16 @@ static int ocfs2_get_clusters_nocache(struct inode *inode, } } + if (le16_to_cpu(el->l_next_free_rec) > le16_to_cpu(el->l_count)) { + ocfs2_error(inode->i_sb, + "Inode %lu has an invalid extent (next_free_rec %u, count %u)\n", + inode->i_ino, + le16_to_cpu(el->l_next_free_rec), + le16_to_cpu(el->l_count)); + ret = -EROFS; + goto out; + } + i = ocfs2_search_extent_list(el, v_cluster); if (i == -1) { /* diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 957ced628eb1..e54f2c4b5a90 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -782,11 +782,11 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, goto out_commit_trans; } - /* Get the offsets within the page that we want to zero */ - zero_from = abs_from & (PAGE_SIZE - 1); - zero_to = abs_to & (PAGE_SIZE - 1); + /* Get the offsets within the folio that we want to zero */ + zero_from = offset_in_folio(folio, abs_from); + zero_to = offset_in_folio(folio, abs_to); if (!zero_to) - zero_to = PAGE_SIZE; + zero_to = folio_size(folio); trace_ocfs2_write_zero_page( (unsigned long long)OCFS2_I(inode)->ip_blkno, diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 2cc5c99fe941..12e5d1f73325 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -200,6 +200,20 @@ bail: return inode; } +static int ocfs2_dinode_has_extents(struct ocfs2_dinode *di) +{ + /* inodes flagged with other stuff in id2 */ + if (di->i_flags & (OCFS2_SUPER_BLOCK_FL | OCFS2_LOCAL_ALLOC_FL | + OCFS2_CHAIN_FL | OCFS2_DEALLOC_FL)) + return 0; + /* i_flags doesn't indicate when id2 is a fast symlink */ + if (S_ISLNK(di->i_mode) && di->i_size && di->i_clusters == 0) + return 0; + if (di->i_dyn_features & OCFS2_INLINE_DATA_FL) + return 0; + + return 1; +} /* * here's how inodes get read from disk: @@ -1122,7 +1136,7 @@ static void ocfs2_clear_inode(struct inode *inode) dquot_drop(inode); - /* To preven remote deletes we hold open lock before, now it + /* To prevent remote deletes we hold open lock before, now it * is time to unlock PR and EX open locks. */ ocfs2_open_unlock(inode); @@ -1437,7 +1451,7 @@ static int ocfs2_filecheck_validate_inode_block(struct super_block *sb, * Call ocfs2_validate_meta_ecc() first since it has ecc repair * function, but we should not return error immediately when ecc * validation fails, because the reason is quite likely the invalid - * inode number inputed. + * inode number inputted. */ rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check); if (rc) { @@ -1547,6 +1561,16 @@ static int ocfs2_filecheck_repair_inode_block(struct super_block *sb, le32_to_cpu(di->i_fs_generation)); } + if (ocfs2_dinode_has_extents(di) && + le16_to_cpu(di->id2.i_list.l_next_free_rec) > le16_to_cpu(di->id2.i_list.l_count)) { + di->id2.i_list.l_next_free_rec = di->id2.i_list.l_count; + changed = 1; + mlog(ML_ERROR, + "Filecheck: reset dinode #%llu: l_next_free_rec to %u\n", + (unsigned long long)bh->b_blocknr, + le16_to_cpu(di->id2.i_list.l_next_free_rec)); + } + if (changed || ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check)) { ocfs2_compute_meta_ecc(sb, bh->b_data, &di->i_check); mark_buffer_dirty(bh); diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index 71beef7f8a60..7ae96fb8807a 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c @@ -796,7 +796,7 @@ bail: /* * OCFS2_IOC_INFO handles an array of requests passed from userspace. * - * ocfs2_info_handle() recevies a large info aggregation, grab and + * ocfs2_info_handle() receives a large info aggregation, grab and * validate the request count from header, then break it into small * pieces, later specific handlers can handle them one by one. * diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 1bf188b6866a..f1b4b3e611cb 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -1956,7 +1956,7 @@ bail: /* * Scan timer should get fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT. Add some - * randomness to the timeout to minimize multple nodes firing the timer at the + * randomness to the timeout to minimize multiple nodes firing the timer at the * same time. */ static inline unsigned long ocfs2_orphan_scan_timeout(void) diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index 6ef4cb045ccd..6a314e9f2b49 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c @@ -44,13 +44,13 @@ static vm_fault_t ocfs2_fault(struct vm_fault *vmf) } static vm_fault_t __ocfs2_page_mkwrite(struct file *file, - struct buffer_head *di_bh, struct page *page) + struct buffer_head *di_bh, struct folio *folio) { int err; vm_fault_t ret = VM_FAULT_NOPAGE; struct inode *inode = file_inode(file); struct address_space *mapping = inode->i_mapping; - loff_t pos = page_offset(page); + loff_t pos = folio_pos(folio); unsigned int len = PAGE_SIZE; pgoff_t last_index; struct folio *locked_folio = NULL; @@ -72,9 +72,9 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file, * * Let VM retry with these cases. */ - if ((page->mapping != inode->i_mapping) || - (!PageUptodate(page)) || - (page_offset(page) >= size)) + if ((folio->mapping != inode->i_mapping) || + !folio_test_uptodate(folio) || + (pos >= size)) goto out; /* @@ -87,11 +87,11 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file, * worry about ocfs2_write_begin() skipping some buffer reads * because the "write" would invalidate their data. */ - if (page->index == last_index) + if (folio->index == last_index) len = ((size - 1) & ~PAGE_MASK) + 1; err = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP, - &locked_folio, &fsdata, di_bh, page); + &locked_folio, &fsdata, di_bh, folio); if (err) { if (err != -ENOSPC) mlog_errno(err); @@ -112,7 +112,7 @@ out: static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf) { - struct page *page = vmf->page; + struct folio *folio = page_folio(vmf->page); struct inode *inode = file_inode(vmf->vma->vm_file); struct buffer_head *di_bh = NULL; sigset_t oldset; @@ -141,7 +141,7 @@ static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf) */ down_write(&OCFS2_I(inode)->ip_alloc_sem); - ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, page); + ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, folio); up_write(&OCFS2_I(inode)->ip_alloc_sem); diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c index f9d6a4f9ca92..369c7d27befd 100644 --- a/fs/ocfs2/move_extents.c +++ b/fs/ocfs2/move_extents.c @@ -492,7 +492,7 @@ static int ocfs2_validate_and_adjust_move_goal(struct inode *inode, bg = (struct ocfs2_group_desc *)gd_bh->b_data; /* - * moving goal is not allowd to start with a group desc blok(#0 blk) + * moving goal is not allowed to start with a group desc blok(#0 blk) * let's compromise to the latter cluster. */ if (range->me_goal == le64_to_cpu(bg->bg_blkno)) @@ -658,7 +658,7 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context, /* * probe the victim cluster group to find a proper - * region to fit wanted movement, it even will perfrom + * region to fit wanted movement, it even will perform * a best-effort attempt by compromising to a threshold * around the goal. */ @@ -920,7 +920,7 @@ static int ocfs2_move_extents(struct ocfs2_move_extents_context *context) } /* - * rememer ip_xattr_sem also needs to be held if necessary + * remember ip_xattr_sem also needs to be held if necessary */ down_write(&OCFS2_I(inode)->ip_alloc_sem); @@ -1022,7 +1022,7 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp) context->range = ⦥ /* - * ok, the default theshold for the defragmentation + * ok, the default threshold for the defragmentation * is 1M, since our maximum clustersize was 1M also. * any thought? */ diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 5550f8afa438..0ec63a1a94b8 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -508,7 +508,6 @@ static int __ocfs2_mknod_locked(struct inode *dir, struct inode *inode, dev_t dev, struct buffer_head **new_fe_bh, - struct buffer_head *parent_fe_bh, handle_t *handle, struct ocfs2_alloc_context *inode_ac, u64 fe_blkno, u64 suballoc_loc, u16 suballoc_bit) @@ -641,8 +640,8 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, } return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh, - parent_fe_bh, handle, inode_ac, - fe_blkno, suballoc_loc, suballoc_bit); + handle, inode_ac, fe_blkno, + suballoc_loc, suballoc_bit); } static int ocfs2_mkdir(struct mnt_idmap *idmap, @@ -2576,7 +2575,7 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, clear_nlink(inode); /* do the real work now. */ status = __ocfs2_mknod_locked(dir, inode, - 0, &new_di_bh, parent_di_bh, handle, + 0, &new_di_bh, handle, inode_ac, di_blkno, suballoc_loc, suballoc_bit); if (status < 0) { diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index c93689b568fe..e8e94599e907 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -132,7 +132,7 @@ * well as the name of the cluster being joined. * mount.ocfs2 must pass in a matching stack name. * - * If not set, the classic stack will be used. This is compatbile with + * If not set, the classic stack will be used. This is compatible with * all older versions. */ #define OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK 0x0080 @@ -143,7 +143,7 @@ /* Support for extended attributes */ #define OCFS2_FEATURE_INCOMPAT_XATTR 0x0200 -/* Support for indexed directores */ +/* Support for indexed directories */ #define OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS 0x0400 /* Metadata checksum and error correction */ @@ -156,7 +156,7 @@ #define OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG 0x2000 /* - * Incompat bit to indicate useable clusterinfo with stackflags for all + * Incompat bit to indicate usable clusterinfo with stackflags for all * cluster stacks (userspace adnd o2cb). If this bit is set, * INCOMPAT_USERSPACE_STACK becomes superfluous and thus should not be set. */ @@ -1083,7 +1083,7 @@ struct ocfs2_xattr_block { struct ocfs2_xattr_header xb_header; /* xattr header if this block contains xattr */ struct ocfs2_xattr_tree_root xb_root;/* xattr tree root if this - block cotains xattr + block contains xattr tree. */ } xb_attrs; }; diff --git a/fs/ocfs2/ocfs2_ioctl.h b/fs/ocfs2/ocfs2_ioctl.h index 9680797bc531..2de2f8733283 100644 --- a/fs/ocfs2/ocfs2_ioctl.h +++ b/fs/ocfs2/ocfs2_ioctl.h @@ -215,7 +215,7 @@ struct ocfs2_move_extents { movement less likely to fail, may make fs even more fragmented */ -#define OCFS2_MOVE_EXT_FL_COMPLETE (0x00000004) /* Move or defragmenation +#define OCFS2_MOVE_EXT_FL_COMPLETE (0x00000004) /* Move or defragmentation completely gets done. */ diff --git a/fs/ocfs2/ocfs2_lockid.h b/fs/ocfs2/ocfs2_lockid.h index 8ac357ce6a30..9b234c03d693 100644 --- a/fs/ocfs2/ocfs2_lockid.h +++ b/fs/ocfs2/ocfs2_lockid.h @@ -93,7 +93,7 @@ static char *ocfs2_lock_type_strings[] = { [OCFS2_LOCK_TYPE_DATA] = "Data", [OCFS2_LOCK_TYPE_SUPER] = "Super", [OCFS2_LOCK_TYPE_RENAME] = "Rename", - /* Need to differntiate from [R]ename.. serializing writes is the + /* Need to differentiate from [R]ename.. serializing writes is the * important job it does, anyway. */ [OCFS2_LOCK_TYPE_RW] = "Write/Read", [OCFS2_LOCK_TYPE_DENTRY] = "Dentry", diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h index 0511c69c9fde..54ed1495de9a 100644 --- a/fs/ocfs2/ocfs2_trace.h +++ b/fs/ocfs2/ocfs2_trace.h @@ -1658,34 +1658,34 @@ TRACE_EVENT(ocfs2_remount, ); TRACE_EVENT(ocfs2_fill_super, - TP_PROTO(void *sb, void *data, int silent), - TP_ARGS(sb, data, silent), + TP_PROTO(void *sb, void *fc, int silent), + TP_ARGS(sb, fc, silent), TP_STRUCT__entry( __field(void *, sb) - __field(void *, data) + __field(void *, fc) __field(int, silent) ), TP_fast_assign( __entry->sb = sb; - __entry->data = data; + __entry->fc = fc; __entry->silent = silent; ), TP_printk("%p %p %d", __entry->sb, - __entry->data, __entry->silent) + __entry->fc, __entry->silent) ); TRACE_EVENT(ocfs2_parse_options, - TP_PROTO(int is_remount, char *options), - TP_ARGS(is_remount, options), + TP_PROTO(int is_remount, const char *option), + TP_ARGS(is_remount, option), TP_STRUCT__entry( __field(int, is_remount) - __string(options, options) + __string(option, option) ), TP_fast_assign( __entry->is_remount = is_remount; - __assign_str(options); + __assign_str(option); ), - TP_printk("%d %s", __entry->is_remount, __get_str(options)) + TP_printk("%d %s", __entry->is_remount, __get_str(option)) ); DEFINE_OCFS2_POINTER_EVENT(ocfs2_put_super); diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index 3404e7a30c33..15d9acd456ec 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -761,6 +761,11 @@ static int ocfs2_release_dquot(struct dquot *dquot) handle = ocfs2_start_trans(osb, ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_id.type)); if (IS_ERR(handle)) { + /* + * Mark dquot as inactive to avoid endless cycle in + * quota_release_workfn(). + */ + clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); status = PTR_ERR(handle); mlog_errno(status); goto out_ilock; diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 004393b13c0a..8f732742b26e 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -2420,7 +2420,7 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb, * * If we will insert a new one, this is easy and only happens * during adding refcounted flag to the extent, so we don't - * have a chance of spliting. We just need one record. + * have a chance of splitting. We just need one record. * * If the refcount rec already exists, that would be a little * complicated. we may have to: @@ -2610,11 +2610,11 @@ static inline unsigned int ocfs2_cow_align_length(struct super_block *sb, /* * Calculate out the start and number of virtual clusters we need to CoW. * - * cpos is vitual start cluster position we want to do CoW in a + * cpos is virtual start cluster position we want to do CoW in a * file and write_len is the cluster length. * max_cpos is the place where we want to stop CoW intentionally. * - * Normal we will start CoW from the beginning of extent record cotaining cpos. + * Normal we will start CoW from the beginning of extent record containing cpos. * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we * get good I/O from the resulting extent tree. */ @@ -2902,7 +2902,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, int ret = 0, partial; struct super_block *sb = inode->i_sb; u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); - struct page *page; pgoff_t page_index; unsigned int from, to; loff_t offset, end, map_end; @@ -2921,6 +2920,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, end = i_size_read(inode); while (offset < end) { + struct folio *folio; page_index = offset >> PAGE_SHIFT; map_end = ((loff_t)page_index + 1) << PAGE_SHIFT; if (map_end > end) @@ -2933,9 +2933,10 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, to = map_end & (PAGE_SIZE - 1); retry: - page = find_or_create_page(mapping, page_index, GFP_NOFS); - if (!page) { - ret = -ENOMEM; + folio = __filemap_get_folio(mapping, page_index, + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS); + if (IS_ERR(folio)) { + ret = PTR_ERR(folio); mlog_errno(ret); break; } @@ -2945,9 +2946,9 @@ retry: * page, so write it back. */ if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) { - if (PageDirty(page)) { - unlock_page(page); - put_page(page); + if (folio_test_dirty(folio)) { + folio_unlock(folio); + folio_put(folio); ret = filemap_write_and_wait_range(mapping, offset, map_end - 1); @@ -2955,9 +2956,7 @@ retry: } } - if (!PageUptodate(page)) { - struct folio *folio = page_folio(page); - + if (!folio_test_uptodate(folio)) { ret = block_read_full_folio(folio, ocfs2_get_block); if (ret) { mlog_errno(ret); @@ -2966,8 +2965,8 @@ retry: folio_lock(folio); } - if (page_has_buffers(page)) { - ret = walk_page_buffers(handle, page_buffers(page), + if (folio_buffers(folio)) { + ret = walk_page_buffers(handle, folio_buffers(folio), from, to, &partial, ocfs2_clear_cow_buffer); if (ret) { @@ -2976,14 +2975,12 @@ retry: } } - ocfs2_map_and_dirty_page(inode, - handle, from, to, - page, 0, &new_block); - mark_page_accessed(page); + ocfs2_map_and_dirty_folio(inode, handle, from, to, + folio, 0, &new_block); + folio_mark_accessed(folio); unlock: - unlock_page(page); - put_page(page); - page = NULL; + folio_unlock(folio); + folio_put(folio); offset = map_end; if (ret) break; diff --git a/fs/ocfs2/reservations.h b/fs/ocfs2/reservations.h index ec8101ef5717..4fce17180342 100644 --- a/fs/ocfs2/reservations.h +++ b/fs/ocfs2/reservations.h @@ -31,7 +31,7 @@ struct ocfs2_alloc_reservation { #define OCFS2_RESV_FLAG_INUSE 0x01 /* Set when r_node is part of a btree */ #define OCFS2_RESV_FLAG_TMP 0x02 /* Temporary reservation, will be - * destroyed immedately after use */ + * destroyed immediately after use */ #define OCFS2_RESV_FLAG_DIR 0x04 /* Reservation is for an unindexed * directory btree */ @@ -125,7 +125,7 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap, /** * ocfs2_resmap_claimed_bits() - Tell the reservation code that bits were used. * @resmap: reservations bitmap - * @resv: optional reservation to recalulate based on new bitmap + * @resv: optional reservation to recalculate based on new bitmap * @cstart: start of allocation in clusters * @clen: end of allocation in clusters. * diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c index 10157d9d7a9c..f58e891aa2da 100644 --- a/fs/ocfs2/stack_o2cb.c +++ b/fs/ocfs2/stack_o2cb.c @@ -227,7 +227,7 @@ static int o2cb_dlm_lock_status(struct ocfs2_dlm_lksb *lksb) } /* - * o2dlm aways has a "valid" LVB. If the dlm loses track of the LVB + * o2dlm always has a "valid" LVB. If the dlm loses track of the LVB * contents, it will zero out the LVB. Thus the caller can always trust * the contents. */ diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c index 20aa37b67cfb..ddd761cf44c8 100644 --- a/fs/ocfs2/stackglue.c +++ b/fs/ocfs2/stackglue.c @@ -650,7 +650,7 @@ error: * and easier to preserve the name. */ -static struct ctl_table ocfs2_nm_table[] = { +static const struct ctl_table ocfs2_nm_table[] = { { .procname = "hb_ctl_path", .data = ocfs2_hb_ctl_path, diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h index 02ab072c528a..5486a6dce70a 100644 --- a/fs/ocfs2/stackglue.h +++ b/fs/ocfs2/stackglue.h @@ -210,7 +210,7 @@ struct ocfs2_stack_operations { struct file_lock *fl); /* - * This is an optoinal debugging hook. If provided, the + * This is an optional debugging hook. If provided, the * stack can dump debugging information about this lock. */ void (*dump_lksb)(struct ocfs2_dlm_lksb *lksb); diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index c79b4291777f..8bb5022f3082 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -19,10 +19,10 @@ #include <linux/blkdev.h> #include <linux/socket.h> #include <linux/inet.h> -#include <linux/parser.h> +#include <linux/fs_parser.h> +#include <linux/fs_context.h> #include <linux/crc32.h> #include <linux/debugfs.h> -#include <linux/mount.h> #include <linux/seq_file.h> #include <linux/quotaops.h> #include <linux/signal.h> @@ -80,17 +80,15 @@ struct mount_options unsigned int resv_level; int dir_resv_level; char cluster_stack[OCFS2_STACK_LABEL_LEN + 1]; + bool user_stack; }; -static int ocfs2_parse_options(struct super_block *sb, char *options, - struct mount_options *mopt, - int is_remount); +static int ocfs2_parse_param(struct fs_context *fc, struct fs_parameter *param); static int ocfs2_check_set_options(struct super_block *sb, struct mount_options *options); static int ocfs2_show_options(struct seq_file *s, struct dentry *root); static void ocfs2_put_super(struct super_block *sb); static int ocfs2_mount_volume(struct super_block *sb); -static int ocfs2_remount(struct super_block *sb, int *flags, char *data); static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err); static int ocfs2_initialize_mem_caches(void); static void ocfs2_free_mem_caches(void); @@ -135,7 +133,6 @@ static const struct super_operations ocfs2_sops = { .evict_inode = ocfs2_evict_inode, .sync_fs = ocfs2_sync_fs, .put_super = ocfs2_put_super, - .remount_fs = ocfs2_remount, .show_options = ocfs2_show_options, .quota_read = ocfs2_quota_read, .quota_write = ocfs2_quota_write, @@ -144,15 +141,10 @@ static const struct super_operations ocfs2_sops = { enum { Opt_barrier, - Opt_err_panic, - Opt_err_ro, + Opt_errors, Opt_intr, - Opt_nointr, - Opt_hb_none, - Opt_hb_local, - Opt_hb_global, - Opt_data_ordered, - Opt_data_writeback, + Opt_heartbeat, + Opt_data, Opt_atime_quantum, Opt_slot, Opt_commit, @@ -160,52 +152,64 @@ enum { Opt_localflocks, Opt_stack, Opt_user_xattr, - Opt_nouser_xattr, Opt_inode64, Opt_acl, - Opt_noacl, Opt_usrquota, Opt_grpquota, - Opt_coherency_buffered, - Opt_coherency_full, + Opt_coherency, Opt_resv_level, Opt_dir_resv_level, Opt_journal_async_commit, - Opt_err_cont, - Opt_err, }; -static const match_table_t tokens = { - {Opt_barrier, "barrier=%u"}, - {Opt_err_panic, "errors=panic"}, - {Opt_err_ro, "errors=remount-ro"}, - {Opt_intr, "intr"}, - {Opt_nointr, "nointr"}, - {Opt_hb_none, OCFS2_HB_NONE}, - {Opt_hb_local, OCFS2_HB_LOCAL}, - {Opt_hb_global, OCFS2_HB_GLOBAL}, - {Opt_data_ordered, "data=ordered"}, - {Opt_data_writeback, "data=writeback"}, - {Opt_atime_quantum, "atime_quantum=%u"}, - {Opt_slot, "preferred_slot=%u"}, - {Opt_commit, "commit=%u"}, - {Opt_localalloc, "localalloc=%d"}, - {Opt_localflocks, "localflocks"}, - {Opt_stack, "cluster_stack=%s"}, - {Opt_user_xattr, "user_xattr"}, - {Opt_nouser_xattr, "nouser_xattr"}, - {Opt_inode64, "inode64"}, - {Opt_acl, "acl"}, - {Opt_noacl, "noacl"}, - {Opt_usrquota, "usrquota"}, - {Opt_grpquota, "grpquota"}, - {Opt_coherency_buffered, "coherency=buffered"}, - {Opt_coherency_full, "coherency=full"}, - {Opt_resv_level, "resv_level=%u"}, - {Opt_dir_resv_level, "dir_resv_level=%u"}, - {Opt_journal_async_commit, "journal_async_commit"}, - {Opt_err_cont, "errors=continue"}, - {Opt_err, NULL} +static const struct constant_table ocfs2_param_errors[] = { + {"panic", OCFS2_MOUNT_ERRORS_PANIC}, + {"remount-ro", OCFS2_MOUNT_ERRORS_ROFS}, + {"continue", OCFS2_MOUNT_ERRORS_CONT}, + {} +}; + +static const struct constant_table ocfs2_param_heartbeat[] = { + {"local", OCFS2_MOUNT_HB_LOCAL}, + {"none", OCFS2_MOUNT_HB_NONE}, + {"global", OCFS2_MOUNT_HB_GLOBAL}, + {} +}; + +static const struct constant_table ocfs2_param_data[] = { + {"writeback", OCFS2_MOUNT_DATA_WRITEBACK}, + {"ordered", 0}, + {} +}; + +static const struct constant_table ocfs2_param_coherency[] = { + {"buffered", OCFS2_MOUNT_COHERENCY_BUFFERED}, + {"full", 0}, + {} +}; + +static const struct fs_parameter_spec ocfs2_param_spec[] = { + fsparam_u32 ("barrier", Opt_barrier), + fsparam_enum ("errors", Opt_errors, ocfs2_param_errors), + fsparam_flag_no ("intr", Opt_intr), + fsparam_enum ("heartbeat", Opt_heartbeat, ocfs2_param_heartbeat), + fsparam_enum ("data", Opt_data, ocfs2_param_data), + fsparam_u32 ("atime_quantum", Opt_atime_quantum), + fsparam_u32 ("preferred_slot", Opt_slot), + fsparam_u32 ("commit", Opt_commit), + fsparam_s32 ("localalloc", Opt_localalloc), + fsparam_flag ("localflocks", Opt_localflocks), + fsparam_string ("cluster_stack", Opt_stack), + fsparam_flag_no ("user_xattr", Opt_user_xattr), + fsparam_flag ("inode64", Opt_inode64), + fsparam_flag_no ("acl", Opt_acl), + fsparam_flag ("usrquota", Opt_usrquota), + fsparam_flag ("grpquota", Opt_grpquota), + fsparam_enum ("coherency", Opt_coherency, ocfs2_param_coherency), + fsparam_u32 ("resv_level", Opt_resv_level), + fsparam_u32 ("dir_resv_level", Opt_dir_resv_level), + fsparam_flag ("journal_async_commit", Opt_journal_async_commit), + {} }; #ifdef CONFIG_DEBUG_FS @@ -600,32 +604,32 @@ static unsigned long long ocfs2_max_file_offset(unsigned int bbits, return (((unsigned long long)bytes) << bitshift) - trim; } -static int ocfs2_remount(struct super_block *sb, int *flags, char *data) +static int ocfs2_reconfigure(struct fs_context *fc) { int incompat_features; int ret = 0; - struct mount_options parsed_options; + struct mount_options *parsed_options = fc->fs_private; + struct super_block *sb = fc->root->d_sb; struct ocfs2_super *osb = OCFS2_SB(sb); u32 tmp; sync_filesystem(sb); - if (!ocfs2_parse_options(sb, data, &parsed_options, 1) || - !ocfs2_check_set_options(sb, &parsed_options)) { + if (!ocfs2_check_set_options(sb, parsed_options)) { ret = -EINVAL; goto out; } tmp = OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL | OCFS2_MOUNT_HB_NONE; - if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) { + if ((osb->s_mount_opt & tmp) != (parsed_options->mount_opt & tmp)) { ret = -EINVAL; mlog(ML_ERROR, "Cannot change heartbeat mode on remount\n"); goto out; } if ((osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK) != - (parsed_options.mount_opt & OCFS2_MOUNT_DATA_WRITEBACK)) { + (parsed_options->mount_opt & OCFS2_MOUNT_DATA_WRITEBACK)) { ret = -EINVAL; mlog(ML_ERROR, "Cannot change data mode on remount\n"); goto out; @@ -634,16 +638,16 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data) /* Probably don't want this on remount; it might * mess with other nodes */ if (!(osb->s_mount_opt & OCFS2_MOUNT_INODE64) && - (parsed_options.mount_opt & OCFS2_MOUNT_INODE64)) { + (parsed_options->mount_opt & OCFS2_MOUNT_INODE64)) { ret = -EINVAL; mlog(ML_ERROR, "Cannot enable inode64 on remount\n"); goto out; } /* We're going to/from readonly mode. */ - if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) { + if ((bool)(fc->sb_flags & SB_RDONLY) != sb_rdonly(sb)) { /* Disable quota accounting before remounting RO */ - if (*flags & SB_RDONLY) { + if (fc->sb_flags & SB_RDONLY) { ret = ocfs2_susp_quotas(osb, 0); if (ret < 0) goto out; @@ -657,7 +661,7 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data) goto unlock_osb; } - if (*flags & SB_RDONLY) { + if (fc->sb_flags & SB_RDONLY) { sb->s_flags |= SB_RDONLY; osb->osb_flags |= OCFS2_OSB_SOFT_RO; } else { @@ -678,11 +682,11 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data) sb->s_flags &= ~SB_RDONLY; osb->osb_flags &= ~OCFS2_OSB_SOFT_RO; } - trace_ocfs2_remount(sb->s_flags, osb->osb_flags, *flags); + trace_ocfs2_remount(sb->s_flags, osb->osb_flags, fc->sb_flags); unlock_osb: spin_unlock(&osb->osb_lock); /* Enable quota accounting after remounting RW */ - if (!ret && !(*flags & SB_RDONLY)) { + if (!ret && !(fc->sb_flags & SB_RDONLY)) { if (sb_any_quota_suspended(sb)) ret = ocfs2_susp_quotas(osb, 1); else @@ -701,11 +705,11 @@ unlock_osb: if (!ret) { /* Only save off the new mount options in case of a successful * remount. */ - osb->s_mount_opt = parsed_options.mount_opt; - osb->s_atime_quantum = parsed_options.atime_quantum; - osb->preferred_slot = parsed_options.slot; - if (parsed_options.commit_interval) - osb->osb_commit_interval = parsed_options.commit_interval; + osb->s_mount_opt = parsed_options->mount_opt; + osb->s_atime_quantum = parsed_options->atime_quantum; + osb->preferred_slot = parsed_options->slot; + if (parsed_options->commit_interval) + osb->osb_commit_interval = parsed_options->commit_interval; if (!ocfs2_is_hard_readonly(osb)) ocfs2_set_journal_params(osb); @@ -966,23 +970,18 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb) } } -static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) +static int ocfs2_fill_super(struct super_block *sb, struct fs_context *fc) { struct dentry *root; int status, sector_size; - struct mount_options parsed_options; + struct mount_options *parsed_options = fc->fs_private; struct inode *inode = NULL; struct ocfs2_super *osb = NULL; struct buffer_head *bh = NULL; char nodestr[12]; struct ocfs2_blockcheck_stats stats; - trace_ocfs2_fill_super(sb, data, silent); - - if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) { - status = -EINVAL; - goto out; - } + trace_ocfs2_fill_super(sb, fc, fc->sb_flags & SB_SILENT); /* probe for superblock */ status = ocfs2_sb_probe(sb, &bh, §or_size, &stats); @@ -999,24 +998,24 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) osb = OCFS2_SB(sb); - if (!ocfs2_check_set_options(sb, &parsed_options)) { + if (!ocfs2_check_set_options(sb, parsed_options)) { status = -EINVAL; goto out_super; } - osb->s_mount_opt = parsed_options.mount_opt; - osb->s_atime_quantum = parsed_options.atime_quantum; - osb->preferred_slot = parsed_options.slot; - osb->osb_commit_interval = parsed_options.commit_interval; + osb->s_mount_opt = parsed_options->mount_opt; + osb->s_atime_quantum = parsed_options->atime_quantum; + osb->preferred_slot = parsed_options->slot; + osb->osb_commit_interval = parsed_options->commit_interval; - ocfs2_la_set_sizes(osb, parsed_options.localalloc_opt); - osb->osb_resv_level = parsed_options.resv_level; - osb->osb_dir_resv_level = parsed_options.resv_level; - if (parsed_options.dir_resv_level == -1) - osb->osb_dir_resv_level = parsed_options.resv_level; + ocfs2_la_set_sizes(osb, parsed_options->localalloc_opt); + osb->osb_resv_level = parsed_options->resv_level; + osb->osb_dir_resv_level = parsed_options->resv_level; + if (parsed_options->dir_resv_level == -1) + osb->osb_dir_resv_level = parsed_options->resv_level; else - osb->osb_dir_resv_level = parsed_options.dir_resv_level; + osb->osb_dir_resv_level = parsed_options->dir_resv_level; - status = ocfs2_verify_userspace_stack(osb, &parsed_options); + status = ocfs2_verify_userspace_stack(osb, parsed_options); if (status) goto out_super; @@ -1180,27 +1179,72 @@ out: return status; } -static struct dentry *ocfs2_mount(struct file_system_type *fs_type, - int flags, - const char *dev_name, - void *data) +static int ocfs2_get_tree(struct fs_context *fc) +{ + return get_tree_bdev(fc, ocfs2_fill_super); +} + +static void ocfs2_free_fc(struct fs_context *fc) { - return mount_bdev(fs_type, flags, dev_name, data, ocfs2_fill_super); + kfree(fc->fs_private); +} + +static const struct fs_context_operations ocfs2_context_ops = { + .parse_param = ocfs2_parse_param, + .get_tree = ocfs2_get_tree, + .reconfigure = ocfs2_reconfigure, + .free = ocfs2_free_fc, +}; + +static int ocfs2_init_fs_context(struct fs_context *fc) +{ + struct mount_options *mopt; + + mopt = kzalloc(sizeof(struct mount_options), GFP_KERNEL); + if (!mopt) + return -EINVAL; + + mopt->commit_interval = 0; + mopt->mount_opt = OCFS2_MOUNT_NOINTR; + mopt->atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM; + mopt->slot = OCFS2_INVALID_SLOT; + mopt->localalloc_opt = -1; + mopt->cluster_stack[0] = '\0'; + mopt->resv_level = OCFS2_DEFAULT_RESV_LEVEL; + mopt->dir_resv_level = -1; + + fc->fs_private = mopt; + fc->ops = &ocfs2_context_ops; + + return 0; } static struct file_system_type ocfs2_fs_type = { .owner = THIS_MODULE, .name = "ocfs2", - .mount = ocfs2_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV|FS_RENAME_DOES_D_MOVE, - .next = NULL + .next = NULL, + .init_fs_context = ocfs2_init_fs_context, + .parameters = ocfs2_param_spec, }; MODULE_ALIAS_FS("ocfs2"); static int ocfs2_check_set_options(struct super_block *sb, struct mount_options *options) { + if (options->user_stack == 0) { + u32 tmp; + + /* Ensure only one heartbeat mode */ + tmp = options->mount_opt & (OCFS2_MOUNT_HB_LOCAL | + OCFS2_MOUNT_HB_GLOBAL | + OCFS2_MOUNT_HB_NONE); + if (hweight32(tmp) != 1) { + mlog(ML_ERROR, "Invalid heartbeat mount options\n"); + return 0; + } + } if (options->mount_opt & OCFS2_MOUNT_USRQUOTA && !OCFS2_HAS_RO_COMPAT_FEATURE(sb, OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) { @@ -1232,241 +1276,142 @@ static int ocfs2_check_set_options(struct super_block *sb, return 1; } -static int ocfs2_parse_options(struct super_block *sb, - char *options, - struct mount_options *mopt, - int is_remount) +static int ocfs2_parse_param(struct fs_context *fc, struct fs_parameter *param) { - int status, user_stack = 0; - char *p; - u32 tmp; - int token, option; - substring_t args[MAX_OPT_ARGS]; - - trace_ocfs2_parse_options(is_remount, options ? options : "(none)"); - - mopt->commit_interval = 0; - mopt->mount_opt = OCFS2_MOUNT_NOINTR; - mopt->atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM; - mopt->slot = OCFS2_INVALID_SLOT; - mopt->localalloc_opt = -1; - mopt->cluster_stack[0] = '\0'; - mopt->resv_level = OCFS2_DEFAULT_RESV_LEVEL; - mopt->dir_resv_level = -1; - - if (!options) { - status = 1; - goto bail; - } - - while ((p = strsep(&options, ",")) != NULL) { - if (!*p) - continue; - - token = match_token(p, tokens, args); - switch (token) { - case Opt_hb_local: - mopt->mount_opt |= OCFS2_MOUNT_HB_LOCAL; - break; - case Opt_hb_none: - mopt->mount_opt |= OCFS2_MOUNT_HB_NONE; - break; - case Opt_hb_global: - mopt->mount_opt |= OCFS2_MOUNT_HB_GLOBAL; - break; - case Opt_barrier: - if (match_int(&args[0], &option)) { - status = 0; - goto bail; - } - if (option) - mopt->mount_opt |= OCFS2_MOUNT_BARRIER; - else - mopt->mount_opt &= ~OCFS2_MOUNT_BARRIER; - break; - case Opt_intr: - mopt->mount_opt &= ~OCFS2_MOUNT_NOINTR; - break; - case Opt_nointr: + struct fs_parse_result result; + int opt; + struct mount_options *mopt = fc->fs_private; + bool is_remount = (fc->purpose & FS_CONTEXT_FOR_RECONFIGURE); + + trace_ocfs2_parse_options(is_remount, param->key); + + opt = fs_parse(fc, ocfs2_param_spec, param, &result); + if (opt < 0) + return opt; + + switch (opt) { + case Opt_heartbeat: + mopt->mount_opt |= result.uint_32; + break; + case Opt_barrier: + if (result.uint_32) + mopt->mount_opt |= OCFS2_MOUNT_BARRIER; + else + mopt->mount_opt &= ~OCFS2_MOUNT_BARRIER; + break; + case Opt_intr: + if (result.negated) mopt->mount_opt |= OCFS2_MOUNT_NOINTR; - break; - case Opt_err_panic: - mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_CONT; - mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_ROFS; - mopt->mount_opt |= OCFS2_MOUNT_ERRORS_PANIC; - break; - case Opt_err_ro: - mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_CONT; - mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC; - mopt->mount_opt |= OCFS2_MOUNT_ERRORS_ROFS; - break; - case Opt_err_cont: - mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_ROFS; - mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC; - mopt->mount_opt |= OCFS2_MOUNT_ERRORS_CONT; - break; - case Opt_data_ordered: - mopt->mount_opt &= ~OCFS2_MOUNT_DATA_WRITEBACK; - break; - case Opt_data_writeback: - mopt->mount_opt |= OCFS2_MOUNT_DATA_WRITEBACK; - break; - case Opt_user_xattr: - mopt->mount_opt &= ~OCFS2_MOUNT_NOUSERXATTR; - break; - case Opt_nouser_xattr: + else + mopt->mount_opt &= ~OCFS2_MOUNT_NOINTR; + break; + case Opt_errors: + mopt->mount_opt &= ~(OCFS2_MOUNT_ERRORS_CONT | + OCFS2_MOUNT_ERRORS_ROFS | + OCFS2_MOUNT_ERRORS_PANIC); + mopt->mount_opt |= result.uint_32; + break; + case Opt_data: + mopt->mount_opt &= ~OCFS2_MOUNT_DATA_WRITEBACK; + mopt->mount_opt |= result.uint_32; + break; + case Opt_user_xattr: + if (result.negated) mopt->mount_opt |= OCFS2_MOUNT_NOUSERXATTR; - break; - case Opt_atime_quantum: - if (match_int(&args[0], &option)) { - status = 0; - goto bail; - } - if (option >= 0) - mopt->atime_quantum = option; - break; - case Opt_slot: - if (match_int(&args[0], &option)) { - status = 0; - goto bail; - } - if (option) - mopt->slot = (u16)option; - break; - case Opt_commit: - if (match_int(&args[0], &option)) { - status = 0; - goto bail; - } - if (option < 0) - return 0; - if (option == 0) - option = JBD2_DEFAULT_MAX_COMMIT_AGE; - mopt->commit_interval = HZ * option; - break; - case Opt_localalloc: - if (match_int(&args[0], &option)) { - status = 0; - goto bail; - } - if (option >= 0) - mopt->localalloc_opt = option; - break; - case Opt_localflocks: - /* - * Changing this during remount could race - * flock() requests, or "unbalance" existing - * ones (e.g., a lock is taken in one mode but - * dropped in the other). If users care enough - * to flip locking modes during remount, we - * could add a "local" flag to individual - * flock structures for proper tracking of - * state. - */ - if (!is_remount) - mopt->mount_opt |= OCFS2_MOUNT_LOCALFLOCKS; - break; - case Opt_stack: - /* Check both that the option we were passed - * is of the right length and that it is a proper - * string of the right length. - */ - if (((args[0].to - args[0].from) != - OCFS2_STACK_LABEL_LEN) || - (strnlen(args[0].from, - OCFS2_STACK_LABEL_LEN) != - OCFS2_STACK_LABEL_LEN)) { - mlog(ML_ERROR, - "Invalid cluster_stack option\n"); - status = 0; - goto bail; - } - memcpy(mopt->cluster_stack, args[0].from, - OCFS2_STACK_LABEL_LEN); - mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0'; - /* - * Open code the memcmp here as we don't have - * an osb to pass to - * ocfs2_userspace_stack(). - */ - if (memcmp(mopt->cluster_stack, - OCFS2_CLASSIC_CLUSTER_STACK, - OCFS2_STACK_LABEL_LEN)) - user_stack = 1; - break; - case Opt_inode64: - mopt->mount_opt |= OCFS2_MOUNT_INODE64; - break; - case Opt_usrquota: - mopt->mount_opt |= OCFS2_MOUNT_USRQUOTA; - break; - case Opt_grpquota: - mopt->mount_opt |= OCFS2_MOUNT_GRPQUOTA; - break; - case Opt_coherency_buffered: - mopt->mount_opt |= OCFS2_MOUNT_COHERENCY_BUFFERED; - break; - case Opt_coherency_full: - mopt->mount_opt &= ~OCFS2_MOUNT_COHERENCY_BUFFERED; - break; - case Opt_acl: - mopt->mount_opt |= OCFS2_MOUNT_POSIX_ACL; - mopt->mount_opt &= ~OCFS2_MOUNT_NO_POSIX_ACL; - break; - case Opt_noacl: + else + mopt->mount_opt &= ~OCFS2_MOUNT_NOUSERXATTR; + break; + case Opt_atime_quantum: + mopt->atime_quantum = result.uint_32; + break; + case Opt_slot: + if (result.uint_32) + mopt->slot = (u16)result.uint_32; + break; + case Opt_commit: + if (result.uint_32 == 0) + mopt->commit_interval = HZ * JBD2_DEFAULT_MAX_COMMIT_AGE; + else + mopt->commit_interval = HZ * result.uint_32; + break; + case Opt_localalloc: + if (result.int_32 >= 0) + mopt->localalloc_opt = result.int_32; + break; + case Opt_localflocks: + /* + * Changing this during remount could race flock() requests, or + * "unbalance" existing ones (e.g., a lock is taken in one mode + * but dropped in the other). If users care enough to flip + * locking modes during remount, we could add a "local" flag to + * individual flock structures for proper tracking of state. + */ + if (!is_remount) + mopt->mount_opt |= OCFS2_MOUNT_LOCALFLOCKS; + break; + case Opt_stack: + /* Check both that the option we were passed is of the right + * length and that it is a proper string of the right length. + */ + if (strlen(param->string) != OCFS2_STACK_LABEL_LEN) { + mlog(ML_ERROR, "Invalid cluster_stack option\n"); + return -EINVAL; + } + memcpy(mopt->cluster_stack, param->string, OCFS2_STACK_LABEL_LEN); + mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0'; + /* + * Open code the memcmp here as we don't have an osb to pass + * to ocfs2_userspace_stack(). + */ + if (memcmp(mopt->cluster_stack, + OCFS2_CLASSIC_CLUSTER_STACK, + OCFS2_STACK_LABEL_LEN)) + mopt->user_stack = 1; + break; + case Opt_inode64: + mopt->mount_opt |= OCFS2_MOUNT_INODE64; + break; + case Opt_usrquota: + mopt->mount_opt |= OCFS2_MOUNT_USRQUOTA; + break; + case Opt_grpquota: + mopt->mount_opt |= OCFS2_MOUNT_GRPQUOTA; + break; + case Opt_coherency: + mopt->mount_opt &= ~OCFS2_MOUNT_COHERENCY_BUFFERED; + mopt->mount_opt |= result.uint_32; + break; + case Opt_acl: + if (result.negated) { mopt->mount_opt |= OCFS2_MOUNT_NO_POSIX_ACL; mopt->mount_opt &= ~OCFS2_MOUNT_POSIX_ACL; + } else { + mopt->mount_opt |= OCFS2_MOUNT_POSIX_ACL; + mopt->mount_opt &= ~OCFS2_MOUNT_NO_POSIX_ACL; + } + break; + case Opt_resv_level: + if (is_remount) break; - case Opt_resv_level: - if (is_remount) - break; - if (match_int(&args[0], &option)) { - status = 0; - goto bail; - } - if (option >= OCFS2_MIN_RESV_LEVEL && - option < OCFS2_MAX_RESV_LEVEL) - mopt->resv_level = option; - break; - case Opt_dir_resv_level: - if (is_remount) - break; - if (match_int(&args[0], &option)) { - status = 0; - goto bail; - } - if (option >= OCFS2_MIN_RESV_LEVEL && - option < OCFS2_MAX_RESV_LEVEL) - mopt->dir_resv_level = option; - break; - case Opt_journal_async_commit: - mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT; + if (result.uint_32 >= OCFS2_MIN_RESV_LEVEL && + result.uint_32 < OCFS2_MAX_RESV_LEVEL) + mopt->resv_level = result.uint_32; + break; + case Opt_dir_resv_level: + if (is_remount) break; - default: - mlog(ML_ERROR, - "Unrecognized mount option \"%s\" " - "or missing value\n", p); - status = 0; - goto bail; - } - } - - if (user_stack == 0) { - /* Ensure only one heartbeat mode */ - tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | - OCFS2_MOUNT_HB_GLOBAL | - OCFS2_MOUNT_HB_NONE); - if (hweight32(tmp) != 1) { - mlog(ML_ERROR, "Invalid heartbeat mount options\n"); - status = 0; - goto bail; - } + if (result.uint_32 >= OCFS2_MIN_RESV_LEVEL && + result.uint_32 < OCFS2_MAX_RESV_LEVEL) + mopt->dir_resv_level = result.uint_32; + break; + case Opt_journal_async_commit: + mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT; + break; + default: + return -EINVAL; } - status = 1; - -bail: - return status; + return 0; } static int ocfs2_show_options(struct seq_file *s, struct dentry *root) @@ -1858,7 +1803,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err) osb = OCFS2_SB(sb); BUG_ON(!osb); - /* Remove file check sysfs related directores/files, + /* Remove file check sysfs related directories/files, * and wait for the pending file check operations */ ocfs2_filecheck_remove_sysfs(osb); @@ -2340,7 +2285,7 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di, mlog(ML_ERROR, "found superblock with incorrect block " "size bits: found %u, should be 9, 10, 11, or 12\n", blksz_bits); - } else if ((1 << le32_to_cpu(blksz_bits)) != blksz) { + } else if ((1 << blksz_bits) != blksz) { mlog(ML_ERROR, "found superblock with incorrect block " "size: found %u, should be %u\n", 1 << blksz_bits, blksz); } else if (le16_to_cpu(di->id2.i_super.s_major_rev_level) != diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c index d4c5fdcfa1e4..ad8be3300b49 100644 --- a/fs/ocfs2/symlink.c +++ b/fs/ocfs2/symlink.c @@ -54,31 +54,27 @@ static int ocfs2_fast_symlink_read_folio(struct file *f, struct folio *folio) { - struct page *page = &folio->page; - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; struct buffer_head *bh = NULL; int status = ocfs2_read_inode_block(inode, &bh); struct ocfs2_dinode *fe; const char *link; - void *kaddr; size_t len; if (status < 0) { mlog_errno(status); - return status; + goto out; } fe = (struct ocfs2_dinode *) bh->b_data; link = (char *) fe->id2.i_symlink; /* will be less than a page size */ len = strnlen(link, ocfs2_fast_symlink_chars(inode->i_sb)); - kaddr = kmap_atomic(page); - memcpy(kaddr, link, len + 1); - kunmap_atomic(kaddr); - SetPageUptodate(page); - unlock_page(page); + memcpy_to_folio(folio, 0, link, len + 1); +out: + folio_end_read(folio, status == 0); brelse(bh); - return 0; + return status; } const struct address_space_operations ocfs2_fast_symlink_aops = { diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 73a6f6fd8a8e..d70a20d29e3e 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -648,7 +648,7 @@ int ocfs2_calc_xattr_init(struct inode *dir, * 256(name) + 80(value) + 16(entry) = 352 bytes, * The max space of acl xattr taken inline is * 80(value) + 16(entry) * 2(if directory) = 192 bytes, - * when blocksize = 512, may reserve one more cluser for + * when blocksize = 512, may reserve one more cluster for * xattr bucket, otherwise reserve one metadata block * for them is ok. * If this is a new directory with inline data, @@ -4371,7 +4371,7 @@ static int cmp_xe_offset(const void *a, const void *b) /* * defrag a xattr bucket if we find that the bucket has some - * holes beteen name/value pairs. + * holes between name/value pairs. * We will move all the name/value pairs to the end of the bucket * so that we can spare some space for insertion. */ @@ -5011,7 +5011,7 @@ static int ocfs2_divide_xattr_cluster(struct inode *inode, * 2. If cluster_size == bucket_size: * a) If the previous extent rec has more than one cluster and the insert * place isn't in the last cluster, copy the entire last cluster to the - * new one. This time, we don't need to upate the first_bh and header_bh + * new one. This time, we don't need to update the first_bh and header_bh * since they will not be moved into the new cluster. * b) Otherwise, move the bottom half of the xattrs in the last cluster into * the new one. And we set the extend flag to zero if the insert place is @@ -6189,7 +6189,7 @@ struct ocfs2_xattr_reflink { /* * Given a xattr header and xe offset, * return the proper xv and the corresponding bh. - * xattr in inode, block and xattr tree have different implementaions. + * xattr in inode, block and xattr tree have different implementations. */ typedef int (get_xattr_value_root)(struct super_block *sb, struct buffer_head *bh, @@ -6269,7 +6269,7 @@ static int ocfs2_get_xattr_value_root(struct super_block *sb, } /* - * Lock the meta_ac and caculate how much credits we need for reflink xattrs. + * Lock the meta_ac and calculate how much credits we need for reflink xattrs. * It is only used for inline xattr and xattr block. */ static int ocfs2_reflink_lock_xattr_allocators(struct ocfs2_super *osb, diff --git a/fs/open.c b/fs/open.c index e6911101fe71..1be20de9f283 100644 --- a/fs/open.c +++ b/fs/open.c @@ -81,14 +81,18 @@ long vfs_truncate(const struct path *path, loff_t length) if (!S_ISREG(inode->i_mode)) return -EINVAL; - error = mnt_want_write(path->mnt); - if (error) - goto out; - idmap = mnt_idmap(path->mnt); error = inode_permission(idmap, inode, MAY_WRITE); if (error) - goto mnt_drop_write_and_out; + return error; + + error = fsnotify_truncate_perm(path, length); + if (error) + return error; + + error = mnt_want_write(path->mnt); + if (error) + return error; error = -EPERM; if (IS_APPEND(inode)) @@ -114,7 +118,7 @@ put_write_and_out: put_write_access(inode); mnt_drop_write_and_out: mnt_drop_write(path->mnt); -out: + return error; } EXPORT_SYMBOL_GPL(vfs_truncate); @@ -175,11 +179,18 @@ long do_ftruncate(struct file *file, loff_t length, int small) /* Check IS_APPEND on real upper inode */ if (IS_APPEND(file_inode(file))) return -EPERM; - sb_start_write(inode->i_sb); + error = security_file_truncate(file); - if (!error) - error = do_truncate(file_mnt_idmap(file), dentry, length, - ATTR_MTIME | ATTR_CTIME, file); + if (error) + return error; + + error = fsnotify_truncate_perm(&file->f_path, length); + if (error) + return error; + + sb_start_write(inode->i_sb); + error = do_truncate(file_mnt_idmap(file), dentry, length, + ATTR_MTIME | ATTR_CTIME, file); sb_end_write(inode->i_sb); return error; @@ -402,7 +413,6 @@ static bool access_need_override_creds(int flags) static const struct cred *access_override_creds(void) { - const struct cred *old_cred; struct cred *override_cred; override_cred = prepare_creds(); @@ -447,13 +457,7 @@ static const struct cred *access_override_creds(void) * freeing. */ override_cred->non_rcu = 1; - - old_cred = override_creds(override_cred); - - /* override_cred() gets its own ref */ - put_cred(override_cred); - - return old_cred; + return override_creds(override_cred); } static long do_faccessat(int dfd, const char __user *filename, int mode, int flags) @@ -523,7 +527,7 @@ out_path_release: } out: if (old_cred) - revert_creds(old_cred); + put_cred(revert_creds(old_cred)); return res; } @@ -902,6 +906,7 @@ static int do_dentry_open(struct file *f, if (unlikely(f->f_flags & O_PATH)) { f->f_mode = FMODE_PATH | FMODE_OPENED; + file_set_fsnotify_mode(f, FMODE_NONOTIFY); f->f_op = &empty_fops; return 0; } @@ -929,6 +934,12 @@ static int do_dentry_open(struct file *f, if (error) goto cleanup_all; + /* + * Set FMODE_NONOTIFY_* bits according to existing permission watches. + * If FMODE_NONOTIFY mode was already set for an fanotify fd or for a + * pseudo file, this call will not change the mode. + */ + file_set_fsnotify_mode_from_watchers(f); error = fsnotify_open_perm(f); if (error) goto cleanup_all; @@ -1105,6 +1116,23 @@ struct file *dentry_open(const struct path *path, int flags, } EXPORT_SYMBOL(dentry_open); +struct file *dentry_open_nonotify(const struct path *path, int flags, + const struct cred *cred) +{ + struct file *f = alloc_empty_file(flags, cred); + if (!IS_ERR(f)) { + int error; + + file_set_fsnotify_mode(f, FMODE_NONOTIFY); + error = vfs_open(path, f); + if (error) { + fput(f); + f = ERR_PTR(error); + } + } + return f; +} + /** * dentry_create - Create and open a file * @path: path to create @@ -1202,7 +1230,7 @@ inline struct open_how build_open_how(int flags, umode_t mode) inline int build_open_flags(const struct open_how *how, struct open_flags *op) { u64 flags = how->flags; - u64 strip = __FMODE_NONOTIFY | O_CLOEXEC; + u64 strip = O_CLOEXEC; int lookup_flags = 0; int acc_mode = ACC_MODE(flags); @@ -1210,9 +1238,7 @@ inline int build_open_flags(const struct open_how *how, struct open_flags *op) "struct open_flags doesn't yet handle flags > 32 bits"); /* - * Strip flags that either shouldn't be set by userspace like - * FMODE_NONOTIFY or that aren't relevant in determining struct - * open_flags like O_CLOEXEC. + * Strip flags that aren't relevant in determining struct open_flags. */ flags &= ~strip; @@ -1504,7 +1530,7 @@ static int filp_flush(struct file *filp, fl_owner_t id) { int retval = 0; - if (CHECK_DATA_CORRUPTION(file_count(filp) == 0, + if (CHECK_DATA_CORRUPTION(file_count(filp) == 0, filp, "VFS: Close: file count is 0 (f_op=%ps)", filp->f_op)) { return 0; diff --git a/fs/orangefs/dcache.c b/fs/orangefs/dcache.c index 395a00ed8ac7..a19d1ad705db 100644 --- a/fs/orangefs/dcache.c +++ b/fs/orangefs/dcache.c @@ -13,10 +13,9 @@ #include "orangefs-kernel.h" /* Returns 1 if dentry can still be trusted, else 0. */ -static int orangefs_revalidate_lookup(struct dentry *dentry) +static int orangefs_revalidate_lookup(struct inode *parent_inode, const struct qstr *name, + struct dentry *dentry) { - struct dentry *parent_dentry = dget_parent(dentry); - struct inode *parent_inode = parent_dentry->d_inode; struct orangefs_inode_s *parent = ORANGEFS_I(parent_inode); struct inode *inode = dentry->d_inode; struct orangefs_kernel_op_s *new_op; @@ -26,14 +25,14 @@ static int orangefs_revalidate_lookup(struct dentry *dentry) gossip_debug(GOSSIP_DCACHE_DEBUG, "%s: attempting lookup.\n", __func__); new_op = op_alloc(ORANGEFS_VFS_OP_LOOKUP); - if (!new_op) { - ret = -ENOMEM; - goto out_put_parent; - } + if (!new_op) + return -ENOMEM; new_op->upcall.req.lookup.sym_follow = ORANGEFS_LOOKUP_LINK_NO_FOLLOW; new_op->upcall.req.lookup.parent_refn = parent->refn; - strscpy(new_op->upcall.req.lookup.d_name, dentry->d_name.name); + /* op_alloc() leaves ->upcall zeroed */ + memcpy(new_op->upcall.req.lookup.d_name, name->name, + min(name->len, ORANGEFS_NAME_MAX - 1)); gossip_debug(GOSSIP_DCACHE_DEBUG, "%s:%s:%d interrupt flag [%d]\n", @@ -78,8 +77,6 @@ static int orangefs_revalidate_lookup(struct dentry *dentry) ret = 1; out_release_op: op_release(new_op); -out_put_parent: - dput(parent_dentry); return ret; out_drop: gossip_debug(GOSSIP_DCACHE_DEBUG, "%s:%s:%d revalidate failed\n", @@ -92,7 +89,8 @@ out_drop: * * Should return 1 if dentry can still be trusted, else 0. */ -static int orangefs_d_revalidate(struct dentry *dentry, unsigned int flags) +static int orangefs_d_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { int ret; unsigned long time = (unsigned long) dentry->d_fsdata; @@ -114,7 +112,7 @@ static int orangefs_d_revalidate(struct dentry *dentry, unsigned int flags) * If this passes, the positive dentry still exists or the negative * dentry still does not exist. */ - if (!orangefs_revalidate_lookup(dentry)) + if (!orangefs_revalidate_lookup(dir, name, dentry)) return 0; /* We do not need to continue with negative dentries. */ diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c index 1b508f543384..f52073022fae 100644 --- a/fs/orangefs/orangefs-debugfs.c +++ b/fs/orangefs/orangefs-debugfs.c @@ -206,8 +206,8 @@ static void orangefs_kernel_debug_init(void) pr_info("%s: overflow 1!\n", __func__); } - debugfs_create_file(ORANGEFS_KMOD_DEBUG_FILE, 0444, debug_dir, k_buffer, - &kernel_debug_fops); + debugfs_create_file_aux_num(ORANGEFS_KMOD_DEBUG_FILE, 0444, debug_dir, k_buffer, + 0, &kernel_debug_fops); } @@ -306,11 +306,10 @@ static void orangefs_client_debug_init(void) pr_info("%s: overflow! 2\n", __func__); } - client_debug_dentry = debugfs_create_file(ORANGEFS_CLIENT_DEBUG_FILE, - 0444, - debug_dir, - c_buffer, - &kernel_debug_fops); + client_debug_dentry = debugfs_create_file_aux_num( + ORANGEFS_CLIENT_DEBUG_FILE, + 0444, debug_dir, c_buffer, 1, + &kernel_debug_fops); } /* open ORANGEFS_KMOD_DEBUG_FILE or ORANGEFS_CLIENT_DEBUG_FILE.*/ @@ -393,9 +392,9 @@ static ssize_t orangefs_debug_write(struct file *file, * Thwart users who try to jamb a ridiculous number * of bytes into the debug file... */ - if (count > ORANGEFS_MAX_DEBUG_STRING_LEN + 1) { + if (count > ORANGEFS_MAX_DEBUG_STRING_LEN) { silly = count; - count = ORANGEFS_MAX_DEBUG_STRING_LEN + 1; + count = ORANGEFS_MAX_DEBUG_STRING_LEN; } buf = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL); @@ -418,8 +417,7 @@ static ssize_t orangefs_debug_write(struct file *file, * A service operation is required to set a new client-side * debug mask. */ - if (!strcmp(file->f_path.dentry->d_name.name, - ORANGEFS_KMOD_DEBUG_FILE)) { + if (!debugfs_get_aux_num(file)) { // kernel-debug debug_string_to_mask(buf, &orangefs_gossip_debug_mask, 0); debug_mask_to_string(&orangefs_gossip_debug_mask, 0); debug_string = kernel_debug_string; diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 08e683917d12..c9993ff66fc2 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -575,12 +575,12 @@ static const struct cred *ovl_setup_cred_for_create(struct dentry *dentry, } /* - * Caller is going to match this with revert_creds_light() and drop + * Caller is going to match this with revert_creds() and drop * referenec on the returned creds. * We must be called with creator creds already, otherwise we risk * leaking creds. */ - old_cred = override_creds_light(override_cred); + old_cred = override_creds(override_cred); WARN_ON_ONCE(old_cred != ovl_creds(dentry->d_sb)); return override_cred; diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index cea820cb3b55..be5c65d6f848 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c @@ -14,8 +14,6 @@ #include <linux/exportfs.h> #include "overlayfs.h" -#include "../internal.h" /* for vfs_path_lookup */ - struct ovl_lookup_data { struct super_block *sb; const struct ovl_layer *layer; diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index fe511192f83c..86ae6f6da36b 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -91,7 +91,24 @@ static int ovl_revalidate_real(struct dentry *d, unsigned int flags, bool weak) if (d->d_flags & DCACHE_OP_WEAK_REVALIDATE) ret = d->d_op->d_weak_revalidate(d, flags); } else if (d->d_flags & DCACHE_OP_REVALIDATE) { - ret = d->d_op->d_revalidate(d, flags); + struct dentry *parent; + struct inode *dir; + struct name_snapshot n; + + if (flags & LOOKUP_RCU) { + parent = READ_ONCE(d->d_parent); + dir = d_inode_rcu(parent); + if (!dir) + return -ECHILD; + } else { + parent = dget_parent(d); + dir = d_inode(parent); + } + take_dentry_name_snapshot(&n, d); + ret = d->d_op->d_revalidate(dir, &n.name, d, flags); + release_dentry_name_snapshot(&n); + if (!(flags & LOOKUP_RCU)) + dput(parent); if (!ret) { if (!(flags & LOOKUP_RCU)) d_invalidate(d); @@ -127,7 +144,8 @@ static int ovl_dentry_revalidate_common(struct dentry *dentry, return ret; } -static int ovl_dentry_revalidate(struct dentry *dentry, unsigned int flags) +static int ovl_dentry_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { return ovl_dentry_revalidate_common(dentry, flags, false); } diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c index 9aa7493b1e10..0819c739cc2f 100644 --- a/fs/overlayfs/util.c +++ b/fs/overlayfs/util.c @@ -65,12 +65,12 @@ const struct cred *ovl_override_creds(struct super_block *sb) { struct ovl_fs *ofs = OVL_FS(sb); - return override_creds_light(ofs->creator_cred); + return override_creds(ofs->creator_cred); } void ovl_revert_creds(const struct cred *old_cred) { - revert_creds_light(old_cred); + revert_creds(old_cred); } /* diff --git a/fs/pidfs.c b/fs/pidfs.c index 618abb1fa1b8..63f9699ebac3 100644 --- a/fs/pidfs.c +++ b/fs/pidfs.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include <linux/anon_inodes.h> +#include <linux/exportfs.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/cgroup.h> @@ -23,6 +24,97 @@ #include "internal.h" #include "mount.h" +static struct rb_root pidfs_ino_tree = RB_ROOT; + +#if BITS_PER_LONG == 32 +static inline unsigned long pidfs_ino(u64 ino) +{ + return lower_32_bits(ino); +} + +/* On 32 bit the generation number are the upper 32 bits. */ +static inline u32 pidfs_gen(u64 ino) +{ + return upper_32_bits(ino); +} + +#else + +/* On 64 bit simply return ino. */ +static inline unsigned long pidfs_ino(u64 ino) +{ + return ino; +} + +/* On 64 bit the generation number is 0. */ +static inline u32 pidfs_gen(u64 ino) +{ + return 0; +} +#endif + +static int pidfs_ino_cmp(struct rb_node *a, const struct rb_node *b) +{ + struct pid *pid_a = rb_entry(a, struct pid, pidfs_node); + struct pid *pid_b = rb_entry(b, struct pid, pidfs_node); + u64 pid_ino_a = pid_a->ino; + u64 pid_ino_b = pid_b->ino; + + if (pid_ino_a < pid_ino_b) + return -1; + if (pid_ino_a > pid_ino_b) + return 1; + return 0; +} + +void pidfs_add_pid(struct pid *pid) +{ + static u64 pidfs_ino_nr = 2; + + /* + * On 64 bit nothing special happens. The 64bit number assigned + * to struct pid is the inode number. + * + * On 32 bit the 64 bit number assigned to struct pid is split + * into two 32 bit numbers. The lower 32 bits are used as the + * inode number and the upper 32 bits are used as the inode + * generation number. + * + * On 32 bit pidfs_ino() will return the lower 32 bit. When + * pidfs_ino() returns zero a wrap around happened. When a + * wraparound happens the 64 bit number will be incremented by 2 + * so inode numbering starts at 2 again. + * + * On 64 bit comparing two pidfds is as simple as comparing + * inode numbers. + * + * When a wraparound happens on 32 bit multiple pidfds with the + * same inode number are likely to exist (This isn't a problem + * since before pidfs pidfds used the anonymous inode meaning + * all pidfds had the same inode number.). Userspace can + * reconstruct the 64 bit identifier by retrieving both the + * inode number and the inode generation number to compare or + * use file handles. + */ + if (pidfs_ino(pidfs_ino_nr) == 0) + pidfs_ino_nr += 2; + + pid->ino = pidfs_ino_nr; + pid->stashed = NULL; + pidfs_ino_nr++; + + write_seqcount_begin(&pidmap_lock_seq); + rb_find_add_rcu(&pid->pidfs_node, &pidfs_ino_tree, pidfs_ino_cmp); + write_seqcount_end(&pidmap_lock_seq); +} + +void pidfs_remove_pid(struct pid *pid) +{ + write_seqcount_begin(&pidmap_lock_seq); + rb_erase(&pid->pidfs_node, &pidfs_ino_tree); + write_seqcount_end(&pidmap_lock_seq); +} + #ifdef CONFIG_PROC_FS /** * pidfd_show_fdinfo - print information about a pidfd @@ -190,6 +282,37 @@ static long pidfd_info(struct task_struct *task, unsigned int cmd, unsigned long return 0; } +static bool pidfs_ioctl_valid(unsigned int cmd) +{ + switch (cmd) { + case FS_IOC_GETVERSION: + case PIDFD_GET_CGROUP_NAMESPACE: + case PIDFD_GET_IPC_NAMESPACE: + case PIDFD_GET_MNT_NAMESPACE: + case PIDFD_GET_NET_NAMESPACE: + case PIDFD_GET_PID_FOR_CHILDREN_NAMESPACE: + case PIDFD_GET_TIME_NAMESPACE: + case PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE: + case PIDFD_GET_UTS_NAMESPACE: + case PIDFD_GET_USER_NAMESPACE: + case PIDFD_GET_PID_NAMESPACE: + return true; + } + + /* Extensible ioctls require some more careful checks. */ + switch (_IOC_NR(cmd)) { + case _IOC_NR(PIDFD_GET_INFO): + /* + * Try to prevent performing a pidfd ioctl when someone + * erronously mistook the file descriptor for a pidfd. + * This is not perfect but will catch most cases. + */ + return (_IOC_TYPE(cmd) == _IOC_TYPE(PIDFD_GET_INFO)); + } + + return false; +} + static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct task_struct *task __free(put_task) = NULL; @@ -198,6 +321,17 @@ static long pidfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct ns_common *ns_common = NULL; struct pid_namespace *pid_ns; + if (!pidfs_ioctl_valid(cmd)) + return -ENOIOCTLCMD; + + if (cmd == FS_IOC_GETVERSION) { + if (!arg) + return -EINVAL; + + __u32 __user *argp = (__u32 __user *)arg; + return put_user(file_inode(file)->i_generation, argp); + } + task = get_pid_task(pid, PIDTYPE_PID); if (!task) return -ESRCH; @@ -318,40 +452,6 @@ struct pid *pidfd_pid(const struct file *file) static struct vfsmount *pidfs_mnt __ro_after_init; -#if BITS_PER_LONG == 32 -/* - * Provide a fallback mechanism for 32-bit systems so processes remain - * reliably comparable by inode number even on those systems. - */ -static DEFINE_IDA(pidfd_inum_ida); - -static int pidfs_inum(struct pid *pid, unsigned long *ino) -{ - int ret; - - ret = ida_alloc_range(&pidfd_inum_ida, RESERVED_PIDS + 1, - UINT_MAX, GFP_ATOMIC); - if (ret < 0) - return -ENOSPC; - - *ino = ret; - return 0; -} - -static inline void pidfs_free_inum(unsigned long ino) -{ - if (ino > 0) - ida_free(&pidfd_inum_ida, ino); -} -#else -static inline int pidfs_inum(struct pid *pid, unsigned long *ino) -{ - *ino = pid->ino; - return 0; -} -#define pidfs_free_inum(ino) ((void)(ino)) -#endif - /* * The vfs falls back to simple_setattr() if i_op->setattr() isn't * implemented. Let's reject it completely until we have a clean @@ -403,7 +503,6 @@ static void pidfs_evict_inode(struct inode *inode) clear_inode(inode); put_pid(pid); - pidfs_free_inum(inode->i_ino); } static const struct super_operations pidfs_sops = { @@ -421,25 +520,149 @@ static char *pidfs_dname(struct dentry *dentry, char *buffer, int buflen) return dynamic_dname(buffer, buflen, "anon_inode:[pidfd]"); } -static const struct dentry_operations pidfs_dentry_operations = { +const struct dentry_operations pidfs_dentry_operations = { .d_delete = always_delete_dentry, .d_dname = pidfs_dname, .d_prune = stashed_dentry_prune, }; +static int pidfs_encode_fh(struct inode *inode, u32 *fh, int *max_len, + struct inode *parent) +{ + const struct pid *pid = inode->i_private; + + if (*max_len < 2) { + *max_len = 2; + return FILEID_INVALID; + } + + *max_len = 2; + *(u64 *)fh = pid->ino; + return FILEID_KERNFS; +} + +static int pidfs_ino_find(const void *key, const struct rb_node *node) +{ + const u64 pid_ino = *(u64 *)key; + const struct pid *pid = rb_entry(node, struct pid, pidfs_node); + + if (pid_ino < pid->ino) + return -1; + if (pid_ino > pid->ino) + return 1; + return 0; +} + +/* Find a struct pid based on the inode number. */ +static struct pid *pidfs_ino_get_pid(u64 ino) +{ + struct pid *pid; + struct rb_node *node; + unsigned int seq; + + guard(rcu)(); + do { + seq = read_seqcount_begin(&pidmap_lock_seq); + node = rb_find_rcu(&ino, &pidfs_ino_tree, pidfs_ino_find); + if (node) + break; + } while (read_seqcount_retry(&pidmap_lock_seq, seq)); + + if (!node) + return NULL; + + pid = rb_entry(node, struct pid, pidfs_node); + + /* Within our pid namespace hierarchy? */ + if (pid_vnr(pid) == 0) + return NULL; + + return get_pid(pid); +} + +static struct dentry *pidfs_fh_to_dentry(struct super_block *sb, + struct fid *fid, int fh_len, + int fh_type) +{ + int ret; + u64 pid_ino; + struct path path; + struct pid *pid; + + if (fh_len < 2) + return NULL; + + switch (fh_type) { + case FILEID_KERNFS: + pid_ino = *(u64 *)fid; + break; + default: + return NULL; + } + + pid = pidfs_ino_get_pid(pid_ino); + if (!pid) + return NULL; + + ret = path_from_stashed(&pid->stashed, pidfs_mnt, pid, &path); + if (ret < 0) + return ERR_PTR(ret); + + mntput(path.mnt); + return path.dentry; +} + +/* + * Make sure that we reject any nonsensical flags that users pass via + * open_by_handle_at(). Note that PIDFD_THREAD is defined as O_EXCL, and + * PIDFD_NONBLOCK as O_NONBLOCK. + */ +#define VALID_FILE_HANDLE_OPEN_FLAGS \ + (O_RDONLY | O_WRONLY | O_RDWR | O_NONBLOCK | O_CLOEXEC | O_EXCL) + +static int pidfs_export_permission(struct handle_to_path_ctx *ctx, + unsigned int oflags) +{ + if (oflags & ~(VALID_FILE_HANDLE_OPEN_FLAGS | O_LARGEFILE)) + return -EINVAL; + + /* + * pidfd_ino_get_pid() will verify that the struct pid is part + * of the caller's pid namespace hierarchy. No further + * permission checks are needed. + */ + return 0; +} + +static struct file *pidfs_export_open(struct path *path, unsigned int oflags) +{ + /* + * Clear O_LARGEFILE as open_by_handle_at() forces it and raise + * O_RDWR as pidfds always are. + */ + oflags &= ~O_LARGEFILE; + return dentry_open(path, oflags | O_RDWR, current_cred()); +} + +static const struct export_operations pidfs_export_operations = { + .encode_fh = pidfs_encode_fh, + .fh_to_dentry = pidfs_fh_to_dentry, + .open = pidfs_export_open, + .permission = pidfs_export_permission, +}; + static int pidfs_init_inode(struct inode *inode, void *data) { + const struct pid *pid = data; + inode->i_private = data; inode->i_flags |= S_PRIVATE; inode->i_mode |= S_IRWXU; inode->i_op = &pidfs_inode_operations; inode->i_fop = &pidfs_file_operations; - /* - * Inode numbering for pidfs start at RESERVED_PIDS + 1. This - * avoids collisions with the root inode which is 1 for pseudo - * filesystems. - */ - return pidfs_inum(data, &inode->i_ino); + inode->i_ino = pidfs_ino(pid->ino); + inode->i_generation = pidfs_gen(pid->ino); + return 0; } static void pidfs_put_data(void *data) @@ -462,6 +685,7 @@ static int pidfs_init_fs_context(struct fs_context *fc) return -ENOMEM; ctx->ops = &pidfs_sops; + ctx->eops = &pidfs_export_operations; ctx->dops = &pidfs_dentry_operations; fc->s_fs_info = (void *)&pidfs_stashed_ops; return 0; diff --git a/fs/pipe.c b/fs/pipe.c index 12b22c2723b7..ce1af7592780 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -253,7 +253,7 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to) size_t total_len = iov_iter_count(to); struct file *filp = iocb->ki_filp; struct pipe_inode_info *pipe = filp->private_data; - bool was_full, wake_next_reader = false; + bool wake_writer = false, wake_next_reader = false; ssize_t ret; /* Null read succeeds. */ @@ -264,14 +264,13 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to) mutex_lock(&pipe->mutex); /* - * We only wake up writers if the pipe was full when we started - * reading in order to avoid unnecessary wakeups. + * We only wake up writers if the pipe was full when we started reading + * and it is no longer full after reading to avoid unnecessary wakeups. * * But when we do wake up writers, we do so using a sync wakeup * (WF_SYNC), because we want them to get going and generate more * data for us. */ - was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage); for (;;) { /* Read ->head with a barrier vs post_one_notification() */ unsigned int head = smp_load_acquire(&pipe->head); @@ -340,8 +339,10 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to) buf->len = 0; } - if (!buf->len) + if (!buf->len) { + wake_writer |= pipe_full(head, tail, pipe->max_usage); tail = pipe_update_tail(pipe, buf, tail); + } total_len -= chars; if (!total_len) break; /* common path: read succeeded */ @@ -377,7 +378,7 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to) * _very_ unlikely case that the pipe was full, but we got * no data. */ - if (unlikely(was_full)) + if (unlikely(wake_writer)) wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM); kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT); @@ -390,15 +391,15 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to) if (wait_event_interruptible_exclusive(pipe->rd_wait, pipe_readable(pipe)) < 0) return -ERESTARTSYS; - mutex_lock(&pipe->mutex); - was_full = pipe_full(pipe->head, pipe->tail, pipe->max_usage); + wake_writer = false; wake_next_reader = true; + mutex_lock(&pipe->mutex); } if (pipe_empty(pipe->head, pipe->tail)) wake_next_reader = false; mutex_unlock(&pipe->mutex); - if (was_full) + if (wake_writer) wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM); if (wake_next_reader) wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM); @@ -959,6 +960,12 @@ int create_pipe_files(struct file **res, int flags) res[1] = f; stream_open(inode, res[0]); stream_open(inode, res[1]); + /* + * Disable permission and pre-content events, but enable legacy + * inotify events for legacy users. + */ + file_set_fsnotify_mode(res[0], FMODE_NONOTIFY_PERM); + file_set_fsnotify_mode(res[1], FMODE_NONOTIFY_PERM); return 0; } @@ -1477,7 +1484,7 @@ static int proc_dopipe_max_size(const struct ctl_table *table, int write, do_proc_dopipe_max_size_conv, NULL); } -static struct ctl_table fs_pipe_sysctls[] = { +static const struct ctl_table fs_pipe_sysctls[] = { { .procname = "pipe-max-size", .data = &pipe_max_size, diff --git a/fs/pnode.c b/fs/pnode.c index a799e0315cc9..ef048f008bdd 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -611,10 +611,10 @@ int propagate_umount(struct list_head *list) continue; } else if (child->mnt.mnt_flags & MNT_UMOUNT) { /* - * We have come accross an partially unmounted - * mount in list that has not been visited yet. - * Remember it has been visited and continue - * about our merry way. + * We have come across a partially unmounted + * mount in a list that has not been visited + * yet. Remember it has been visited and + * continue about our merry way. */ list_add_tail(&child->mnt_umounting, &visited); continue; diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig index d80a1431ef7b..6ae966c561e7 100644 --- a/fs/proc/Kconfig +++ b/fs/proc/Kconfig @@ -61,6 +61,25 @@ config PROC_VMCORE_DEVICE_DUMP as ELF notes to /proc/vmcore. You can still disable device dump using the kernel command line option 'novmcoredd'. +config NEED_PROC_VMCORE_DEVICE_RAM + bool + +config PROC_VMCORE_DEVICE_RAM + def_bool y + depends on PROC_VMCORE && NEED_PROC_VMCORE_DEVICE_RAM + depends on VIRTIO_MEM + help + If the elfcore hdr is allocated and prepared by the dump kernel + ("2nd kernel") instead of the crashed kernel, RAM provided by memory + devices such as virtio-mem will not be included in the dump + image, because only the device driver can properly detect them. + + With this config enabled, these RAM ranges will be queried from the + device drivers once the device gets probed, so they can be included + in the crash dump. + + Relevant architectures should select NEED_PROC_VMCORE_DEVICE_RAM. + config PROC_SYSCTL bool "Sysctl support (/proc/sys)" if EXPERT depends on PROC_FS diff --git a/fs/proc/array.c b/fs/proc/array.c index 55ed3510d2bb..d6a0369caa93 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -500,7 +500,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, * a program is not able to use ptrace(2) in that case. It is * safe because the task has stopped executing permanently. */ - if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE))) { + if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE|PF_POSTCOREDUMP))) { if (try_get_task_stack(task)) { eip = KSTK_EIP(task); esp = KSTK_ESP(task); diff --git a/fs/proc/base.c b/fs/proc/base.c index 0edf14a9840e..cd89e956c322 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -2058,7 +2058,8 @@ void pid_update_inode(struct task_struct *task, struct inode *inode) * performed a setuid(), etc. * */ -static int pid_revalidate(struct dentry *dentry, unsigned int flags) +static int pid_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { struct inode *inode; struct task_struct *task; @@ -2191,7 +2192,8 @@ static int dname_to_vma_addr(struct dentry *dentry, return 0; } -static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags) +static int map_files_d_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { unsigned long vm_start, vm_end; bool exact_vma_exists = false; @@ -3269,6 +3271,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { struct mm_struct *mm; + int ret = 0; mm = get_task_mm(task); if (mm) { @@ -3276,6 +3279,16 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns, seq_printf(m, "ksm_zero_pages %ld\n", mm_ksm_zero_pages(mm)); seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages); seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm)); + seq_printf(m, "ksm_merge_any: %s\n", + test_bit(MMF_VM_MERGE_ANY, &mm->flags) ? "yes" : "no"); + ret = mmap_read_lock_killable(mm); + if (ret) { + mmput(mm); + return ret; + } + seq_printf(m, "ksm_mergeable: %s\n", + ksm_process_mergeable(mm) ? "yes" : "no"); + mmap_read_unlock(mm); mmput(mm); } diff --git a/fs/proc/fd.c b/fs/proc/fd.c index 24baf23e864f..37aa778d1af7 100644 --- a/fs/proc/fd.c +++ b/fs/proc/fd.c @@ -140,7 +140,8 @@ static void tid_fd_update_inode(struct task_struct *task, struct inode *inode, security_task_to_inode(task, inode); } -static int tid_fd_revalidate(struct dentry *dentry, unsigned int flags) +static int tid_fd_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { struct task_struct *task; struct inode *inode; diff --git a/fs/proc/generic.c b/fs/proc/generic.c index dbe82cf23ee4..8ec90826a49e 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -216,7 +216,8 @@ void proc_free_inum(unsigned int inum) ida_free(&proc_inum_ida, inum - PROC_DYNAMIC_FIRST); } -static int proc_misc_d_revalidate(struct dentry *dentry, unsigned int flags) +static int proc_misc_d_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { if (flags & LOOKUP_RCU) return -ECHILD; @@ -343,7 +344,8 @@ static const struct file_operations proc_dir_operations = { .iterate_shared = proc_readdir, }; -static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags) +static int proc_net_d_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { return 0; } diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index e376f48c4b8b..1cb33771bf9f 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -65,7 +65,11 @@ static inline void kc_unxlate_dev_mem_ptr(phys_addr_t phys, void *virt) #endif static LIST_HEAD(kclist_head); -static DECLARE_RWSEM(kclist_lock); +static int kcore_nphdr; +static size_t kcore_phdrs_len; +static size_t kcore_notes_len; +static size_t kcore_data_offset; +DEFINE_STATIC_PERCPU_RWSEM(kclist_lock); static int kcore_need_update = 1; /* @@ -101,33 +105,32 @@ void __init kclist_add(struct kcore_list *new, void *addr, size_t size, list_add_tail(&new->list, &kclist_head); } -static size_t get_kcore_size(int *nphdr, size_t *phdrs_len, size_t *notes_len, - size_t *data_offset) +static void update_kcore_size(void) { size_t try, size; struct kcore_list *m; - *nphdr = 1; /* PT_NOTE */ + kcore_nphdr = 1; /* PT_NOTE */ size = 0; list_for_each_entry(m, &kclist_head, list) { try = kc_vaddr_to_offset((size_t)m->addr + m->size); if (try > size) size = try; - *nphdr = *nphdr + 1; + kcore_nphdr++; } - *phdrs_len = *nphdr * sizeof(struct elf_phdr); - *notes_len = (4 * sizeof(struct elf_note) + - 3 * ALIGN(sizeof(CORE_STR), 4) + - VMCOREINFO_NOTE_NAME_BYTES + - ALIGN(sizeof(struct elf_prstatus), 4) + - ALIGN(sizeof(struct elf_prpsinfo), 4) + - ALIGN(arch_task_struct_size, 4) + - ALIGN(vmcoreinfo_size, 4)); - *data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + *phdrs_len + - *notes_len); - return *data_offset + size; + kcore_phdrs_len = kcore_nphdr * sizeof(struct elf_phdr); + kcore_notes_len = (4 * sizeof(struct elf_note) + + 3 * ALIGN(sizeof(CORE_STR), 4) + + VMCOREINFO_NOTE_NAME_BYTES + + ALIGN(sizeof(struct elf_prstatus), 4) + + ALIGN(sizeof(struct elf_prpsinfo), 4) + + ALIGN(arch_task_struct_size, 4) + + ALIGN(vmcoreinfo_size, 4)); + kcore_data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + kcore_phdrs_len + + kcore_notes_len); + proc_root_kcore->size = kcore_data_offset + size; } #ifdef CONFIG_HIGHMEM @@ -270,12 +273,10 @@ static int kcore_update_ram(void) { LIST_HEAD(list); LIST_HEAD(garbage); - int nphdr; - size_t phdrs_len, notes_len, data_offset; struct kcore_list *tmp, *pos; int ret = 0; - down_write(&kclist_lock); + percpu_down_write(&kclist_lock); if (!xchg(&kcore_need_update, 0)) goto out; @@ -293,11 +294,10 @@ static int kcore_update_ram(void) } list_splice_tail(&list, &kclist_head); - proc_root_kcore->size = get_kcore_size(&nphdr, &phdrs_len, ¬es_len, - &data_offset); + update_kcore_size(); out: - up_write(&kclist_lock); + percpu_up_write(&kclist_lock); list_for_each_entry_safe(pos, tmp, &garbage, list) { list_del(&pos->list); kfree(pos); @@ -326,27 +326,24 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter) struct file *file = iocb->ki_filp; char *buf = file->private_data; loff_t *fpos = &iocb->ki_pos; - size_t phdrs_offset, notes_offset, data_offset; + size_t phdrs_offset, notes_offset; size_t page_offline_frozen = 1; - size_t phdrs_len, notes_len; struct kcore_list *m; size_t tsz; - int nphdr; unsigned long start; size_t buflen = iov_iter_count(iter); size_t orig_buflen = buflen; int ret = 0; - down_read(&kclist_lock); + percpu_down_read(&kclist_lock); /* * Don't race against drivers that set PageOffline() and expect no * further page access. */ page_offline_freeze(); - get_kcore_size(&nphdr, &phdrs_len, ¬es_len, &data_offset); phdrs_offset = sizeof(struct elfhdr); - notes_offset = phdrs_offset + phdrs_len; + notes_offset = phdrs_offset + kcore_phdrs_len; /* ELF file header. */ if (buflen && *fpos < sizeof(struct elfhdr)) { @@ -368,7 +365,7 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter) .e_flags = ELF_CORE_EFLAGS, .e_ehsize = sizeof(struct elfhdr), .e_phentsize = sizeof(struct elf_phdr), - .e_phnum = nphdr, + .e_phnum = kcore_nphdr, }; tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos); @@ -382,10 +379,10 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter) } /* ELF program headers. */ - if (buflen && *fpos < phdrs_offset + phdrs_len) { + if (buflen && *fpos < phdrs_offset + kcore_phdrs_len) { struct elf_phdr *phdrs, *phdr; - phdrs = kzalloc(phdrs_len, GFP_KERNEL); + phdrs = kzalloc(kcore_phdrs_len, GFP_KERNEL); if (!phdrs) { ret = -ENOMEM; goto out; @@ -393,13 +390,14 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter) phdrs[0].p_type = PT_NOTE; phdrs[0].p_offset = notes_offset; - phdrs[0].p_filesz = notes_len; + phdrs[0].p_filesz = kcore_notes_len; phdr = &phdrs[1]; list_for_each_entry(m, &kclist_head, list) { phdr->p_type = PT_LOAD; phdr->p_flags = PF_R | PF_W | PF_X; - phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset; + phdr->p_offset = kc_vaddr_to_offset(m->addr) + + kcore_data_offset; phdr->p_vaddr = (size_t)m->addr; if (m->type == KCORE_RAM) phdr->p_paddr = __pa(m->addr); @@ -412,7 +410,8 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter) phdr++; } - tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos); + tsz = min_t(size_t, buflen, + phdrs_offset + kcore_phdrs_len - *fpos); if (copy_to_iter((char *)phdrs + *fpos - phdrs_offset, tsz, iter) != tsz) { kfree(phdrs); @@ -426,7 +425,7 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter) } /* ELF note segment. */ - if (buflen && *fpos < notes_offset + notes_len) { + if (buflen && *fpos < notes_offset + kcore_notes_len) { struct elf_prstatus prstatus = {}; struct elf_prpsinfo prpsinfo = { .pr_sname = 'R', @@ -438,7 +437,7 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter) strscpy(prpsinfo.pr_psargs, saved_command_line, sizeof(prpsinfo.pr_psargs)); - notes = kzalloc(notes_len, GFP_KERNEL); + notes = kzalloc(kcore_notes_len, GFP_KERNEL); if (!notes) { ret = -ENOMEM; goto out; @@ -459,9 +458,10 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter) */ append_kcore_note(notes, &i, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data, - min(vmcoreinfo_size, notes_len - i)); + min(vmcoreinfo_size, kcore_notes_len - i)); - tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos); + tsz = min_t(size_t, buflen, + notes_offset + kcore_notes_len - *fpos); if (copy_to_iter(notes + *fpos - notes_offset, tsz, iter) != tsz) { kfree(notes); ret = -EFAULT; @@ -477,7 +477,7 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter) * Check to see if our file offset matches with any of * the addresses in the elf_phdr on our list. */ - start = kc_offset_to_vaddr(*fpos - data_offset); + start = kc_offset_to_vaddr(*fpos - kcore_data_offset); if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen) tsz = buflen; @@ -626,7 +626,7 @@ skip: out: page_offline_thaw(); - up_read(&kclist_lock); + percpu_up_read(&kclist_lock); if (ret) return ret; return orig_buflen - buflen; @@ -663,6 +663,7 @@ static int release_kcore(struct inode *inode, struct file *file) } static const struct proc_ops kcore_proc_ops = { + .proc_flags = PROC_ENTRY_PERMANENT, .proc_read_iter = read_kcore_iter, .proc_open = open_kcore, .proc_release = release_kcore, diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c index 8e159fc78c0a..c610224faf10 100644 --- a/fs/proc/namespaces.c +++ b/fs/proc/namespaces.c @@ -83,7 +83,7 @@ static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int bufl if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) { res = ns_get_name(name, sizeof(name), task, ns_ops); if (res >= 0) - res = readlink_copy(buffer, buflen, name); + res = readlink_copy(buffer, buflen, name, strlen(name)); } put_task_struct(task); return res; diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 27a283d85a6e..cc9d74a06ff0 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -884,7 +884,8 @@ static const struct inode_operations proc_sys_dir_operations = { .getattr = proc_sys_getattr, }; -static int proc_sys_revalidate(struct dentry *dentry, unsigned int flags) +static int proc_sys_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { if (flags & LOOKUP_RCU) return -ECHILD; diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 3d8a82cee63e..a00120a3c099 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -8,6 +8,8 @@ * */ +#define pr_fmt(fmt) "vmcore: " fmt + #include <linux/mm.h> #include <linux/kcore.h> #include <linux/user.h> @@ -51,9 +53,14 @@ static u64 vmcore_size; static struct proc_dir_entry *proc_vmcore; #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP +struct vmcoredd_node { + struct list_head list; /* List of dumps */ + void *buf; /* Buffer containing device's dump */ + unsigned int size; /* Size of the buffer */ +}; + /* Device Dump list and mutex to synchronize access to list */ static LIST_HEAD(vmcoredd_list); -static DEFINE_MUTEX(vmcoredd_mutex); static bool vmcoredd_disabled; core_param(novmcoredd, vmcoredd_disabled, bool, 0); @@ -62,17 +69,22 @@ core_param(novmcoredd, vmcoredd_disabled, bool, 0); /* Device Dump Size */ static size_t vmcoredd_orig_sz; -static DEFINE_SPINLOCK(vmcore_cb_lock); +static DEFINE_MUTEX(vmcore_mutex); + DEFINE_STATIC_SRCU(vmcore_cb_srcu); /* List of registered vmcore callbacks. */ static LIST_HEAD(vmcore_cb_list); /* Whether the vmcore has been opened once. */ static bool vmcore_opened; +/* Whether the vmcore is currently open. */ +static unsigned int vmcore_open; + +static void vmcore_process_device_ram(struct vmcore_cb *cb); void register_vmcore_cb(struct vmcore_cb *cb) { INIT_LIST_HEAD(&cb->next); - spin_lock(&vmcore_cb_lock); + mutex_lock(&vmcore_mutex); list_add_tail(&cb->next, &vmcore_cb_list); /* * Registering a vmcore callback after the vmcore was opened is @@ -80,13 +92,15 @@ void register_vmcore_cb(struct vmcore_cb *cb) */ if (vmcore_opened) pr_warn_once("Unexpected vmcore callback registration\n"); - spin_unlock(&vmcore_cb_lock); + if (!vmcore_open && cb->get_device_ram) + vmcore_process_device_ram(cb); + mutex_unlock(&vmcore_mutex); } EXPORT_SYMBOL_GPL(register_vmcore_cb); void unregister_vmcore_cb(struct vmcore_cb *cb) { - spin_lock(&vmcore_cb_lock); + mutex_lock(&vmcore_mutex); list_del_rcu(&cb->next); /* * Unregistering a vmcore callback after the vmcore was opened is @@ -95,7 +109,7 @@ void unregister_vmcore_cb(struct vmcore_cb *cb) */ if (vmcore_opened) pr_warn_once("Unexpected vmcore callback unregistration\n"); - spin_unlock(&vmcore_cb_lock); + mutex_unlock(&vmcore_mutex); synchronize_srcu(&vmcore_cb_srcu); } @@ -120,9 +134,23 @@ static bool pfn_is_ram(unsigned long pfn) static int open_vmcore(struct inode *inode, struct file *file) { - spin_lock(&vmcore_cb_lock); + mutex_lock(&vmcore_mutex); vmcore_opened = true; - spin_unlock(&vmcore_cb_lock); + if (vmcore_open + 1 == 0) { + mutex_unlock(&vmcore_mutex); + return -EBUSY; + } + vmcore_open++; + mutex_unlock(&vmcore_mutex); + + return 0; +} + +static int release_vmcore(struct inode *inode, struct file *file) +{ + mutex_lock(&vmcore_mutex); + vmcore_open--; + mutex_unlock(&vmcore_mutex); return 0; } @@ -243,33 +271,27 @@ static int vmcoredd_copy_dumps(struct iov_iter *iter, u64 start, size_t size) { struct vmcoredd_node *dump; u64 offset = 0; - int ret = 0; size_t tsz; char *buf; - mutex_lock(&vmcoredd_mutex); list_for_each_entry(dump, &vmcoredd_list, list) { if (start < offset + dump->size) { tsz = min(offset + (u64)dump->size - start, (u64)size); buf = dump->buf + start - offset; - if (copy_to_iter(buf, tsz, iter) < tsz) { - ret = -EFAULT; - goto out_unlock; - } + if (copy_to_iter(buf, tsz, iter) < tsz) + return -EFAULT; size -= tsz; start += tsz; /* Leave now if buffer filled already */ if (!size) - goto out_unlock; + return 0; } offset += dump->size; } -out_unlock: - mutex_unlock(&vmcoredd_mutex); - return ret; + return 0; } #ifdef CONFIG_MMU @@ -278,20 +300,16 @@ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst, { struct vmcoredd_node *dump; u64 offset = 0; - int ret = 0; size_t tsz; char *buf; - mutex_lock(&vmcoredd_mutex); list_for_each_entry(dump, &vmcoredd_list, list) { if (start < offset + dump->size) { tsz = min(offset + (u64)dump->size - start, (u64)size); buf = dump->buf + start - offset; if (remap_vmalloc_range_partial(vma, dst, buf, 0, - tsz)) { - ret = -EFAULT; - goto out_unlock; - } + tsz)) + return -EFAULT; size -= tsz; start += tsz; @@ -299,14 +317,12 @@ static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst, /* Leave now if buffer filled already */ if (!size) - goto out_unlock; + return 0; } offset += dump->size; } -out_unlock: - mutex_unlock(&vmcoredd_mutex); - return ret; + return 0; } #endif /* CONFIG_MMU */ #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ @@ -316,10 +332,10 @@ out_unlock: */ static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos) { + struct vmcore_range *m = NULL; ssize_t acc = 0, tmp; size_t tsz; u64 start; - struct vmcore *m = NULL; if (!iov_iter_count(iter) || *fpos >= vmcore_size) return 0; @@ -404,6 +420,8 @@ static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos) if (!iov_iter_count(iter)) return acc; } + + cond_resched(); } return acc; @@ -574,7 +592,7 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) { size_t size = vma->vm_end - vma->vm_start; u64 start, end, len, tsz; - struct vmcore *m; + struct vmcore_range *m; start = (u64)vma->vm_pgoff << PAGE_SHIFT; end = start + size; @@ -691,21 +709,17 @@ static int mmap_vmcore(struct file *file, struct vm_area_struct *vma) static const struct proc_ops vmcore_proc_ops = { .proc_open = open_vmcore, + .proc_release = release_vmcore, .proc_read_iter = read_vmcore, .proc_lseek = default_llseek, .proc_mmap = mmap_vmcore, }; -static struct vmcore* __init get_new_element(void) -{ - return kzalloc(sizeof(struct vmcore), GFP_KERNEL); -} - static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz, struct list_head *vc_list) { + struct vmcore_range *m; u64 size; - struct vmcore *m; size = elfsz + elfnotesegsz; list_for_each_entry(m, vc_list, list) { @@ -1107,7 +1121,6 @@ static int __init process_ptload_program_headers_elf64(char *elfptr, Elf64_Ehdr *ehdr_ptr; Elf64_Phdr *phdr_ptr; loff_t vmcore_off; - struct vmcore *new; ehdr_ptr = (Elf64_Ehdr *)elfptr; phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */ @@ -1126,13 +1139,8 @@ static int __init process_ptload_program_headers_elf64(char *elfptr, end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); size = end - start; - /* Add this contiguous chunk of memory to vmcore list.*/ - new = get_new_element(); - if (!new) + if (vmcore_alloc_add_range(vc_list, start, size)) return -ENOMEM; - new->paddr = start; - new->size = size; - list_add_tail(&new->list, vc_list); /* Update the program header offset. */ phdr_ptr->p_offset = vmcore_off + (paddr - start); @@ -1150,7 +1158,6 @@ static int __init process_ptload_program_headers_elf32(char *elfptr, Elf32_Ehdr *ehdr_ptr; Elf32_Phdr *phdr_ptr; loff_t vmcore_off; - struct vmcore *new; ehdr_ptr = (Elf32_Ehdr *)elfptr; phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */ @@ -1169,13 +1176,8 @@ static int __init process_ptload_program_headers_elf32(char *elfptr, end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE); size = end - start; - /* Add this contiguous chunk of memory to vmcore list.*/ - new = get_new_element(); - if (!new) + if (vmcore_alloc_add_range(vc_list, start, size)) return -ENOMEM; - new->paddr = start; - new->size = size; - list_add_tail(&new->list, vc_list); /* Update the program header offset */ phdr_ptr->p_offset = vmcore_off + (paddr - start); @@ -1188,8 +1190,8 @@ static int __init process_ptload_program_headers_elf32(char *elfptr, static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz, struct list_head *vc_list) { + struct vmcore_range *m; loff_t vmcore_off; - struct vmcore *m; /* Skip ELF header, program headers and ELF note segment. */ vmcore_off = elfsz + elfnotes_sz; @@ -1516,12 +1518,18 @@ int vmcore_add_device_dump(struct vmcoredd_data *data) dump->buf = buf; dump->size = data_size; - /* Add the dump to driver sysfs list */ - mutex_lock(&vmcoredd_mutex); - list_add_tail(&dump->list, &vmcoredd_list); - mutex_unlock(&vmcoredd_mutex); + /* Add the dump to driver sysfs list and update the elfcore hdr */ + mutex_lock(&vmcore_mutex); + if (vmcore_opened) + pr_warn_once("Unexpected adding of device dump\n"); + if (vmcore_open) { + ret = -EBUSY; + goto out_err; + } + list_add_tail(&dump->list, &vmcoredd_list); vmcoredd_update_size(data_size); + mutex_unlock(&vmcore_mutex); return 0; out_err: @@ -1533,11 +1541,163 @@ out_err: EXPORT_SYMBOL(vmcore_add_device_dump); #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ +#ifdef CONFIG_PROC_VMCORE_DEVICE_RAM +static int vmcore_realloc_elfcore_buffer_elf64(size_t new_size) +{ + char *elfcorebuf_new; + + if (WARN_ON_ONCE(new_size < elfcorebuf_sz)) + return -EINVAL; + if (get_order(elfcorebuf_sz_orig) == get_order(new_size)) { + elfcorebuf_sz_orig = new_size; + return 0; + } + + elfcorebuf_new = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(new_size)); + if (!elfcorebuf_new) + return -ENOMEM; + memcpy(elfcorebuf_new, elfcorebuf, elfcorebuf_sz); + free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig)); + elfcorebuf = elfcorebuf_new; + elfcorebuf_sz_orig = new_size; + return 0; +} + +static void vmcore_reset_offsets_elf64(void) +{ + Elf64_Phdr *phdr_start = (Elf64_Phdr *)(elfcorebuf + sizeof(Elf64_Ehdr)); + loff_t vmcore_off = elfcorebuf_sz + elfnotes_sz; + Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfcorebuf; + Elf64_Phdr *phdr; + int i; + + for (i = 0, phdr = phdr_start; i < ehdr->e_phnum; i++, phdr++) { + u64 start, end; + + /* + * After merge_note_headers_elf64() we should only have a single + * PT_NOTE entry that starts immediately after elfcorebuf_sz. + */ + if (phdr->p_type == PT_NOTE) { + phdr->p_offset = elfcorebuf_sz; + continue; + } + + start = rounddown(phdr->p_offset, PAGE_SIZE); + end = roundup(phdr->p_offset + phdr->p_memsz, PAGE_SIZE); + phdr->p_offset = vmcore_off + (phdr->p_offset - start); + vmcore_off = vmcore_off + end - start; + } + set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list); +} + +static int vmcore_add_device_ram_elf64(struct list_head *list, size_t count) +{ + Elf64_Phdr *phdr_start = (Elf64_Phdr *)(elfcorebuf + sizeof(Elf64_Ehdr)); + Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfcorebuf; + struct vmcore_range *cur; + Elf64_Phdr *phdr; + size_t new_size; + int rc; + + if ((Elf32_Half)(ehdr->e_phnum + count) != ehdr->e_phnum + count) { + pr_err("too many device ram ranges\n"); + return -ENOSPC; + } + + /* elfcorebuf_sz must always cover full pages. */ + new_size = sizeof(Elf64_Ehdr) + + (ehdr->e_phnum + count) * sizeof(Elf64_Phdr); + new_size = roundup(new_size, PAGE_SIZE); + + /* + * Make sure we have sufficient space to include the new PT_LOAD + * entries. + */ + rc = vmcore_realloc_elfcore_buffer_elf64(new_size); + if (rc) { + pr_err("resizing elfcore failed\n"); + return rc; + } + + /* Modify our used elfcore buffer size to cover the new entries. */ + elfcorebuf_sz = new_size; + + /* Fill the added PT_LOAD entries. */ + phdr = phdr_start + ehdr->e_phnum; + list_for_each_entry(cur, list, list) { + WARN_ON_ONCE(!IS_ALIGNED(cur->paddr | cur->size, PAGE_SIZE)); + elfcorehdr_fill_device_ram_ptload_elf64(phdr, cur->paddr, cur->size); + + /* p_offset will be adjusted later. */ + phdr++; + ehdr->e_phnum++; + } + list_splice_tail(list, &vmcore_list); + + /* We changed elfcorebuf_sz and added new entries; reset all offsets. */ + vmcore_reset_offsets_elf64(); + + /* Finally, recalculate the total vmcore size. */ + vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz, + &vmcore_list); + proc_vmcore->size = vmcore_size; + return 0; +} + +static void vmcore_process_device_ram(struct vmcore_cb *cb) +{ + unsigned char *e_ident = (unsigned char *)elfcorebuf; + struct vmcore_range *first, *m; + LIST_HEAD(list); + int count; + + /* We only support Elf64 dumps for now. */ + if (WARN_ON_ONCE(e_ident[EI_CLASS] != ELFCLASS64)) { + pr_err("device ram ranges only support Elf64\n"); + return; + } + + if (cb->get_device_ram(cb, &list)) { + pr_err("obtaining device ram ranges failed\n"); + return; + } + count = list_count_nodes(&list); + if (!count) + return; + + /* + * For some reason these ranges are already know? Might happen + * with unusual register->unregister->register sequences; we'll simply + * sanity check using the first range. + */ + first = list_first_entry(&list, struct vmcore_range, list); + list_for_each_entry(m, &vmcore_list, list) { + unsigned long long m_end = m->paddr + m->size; + unsigned long long first_end = first->paddr + first->size; + + if (first->paddr < m_end && m->paddr < first_end) + goto out_free; + } + + /* If adding the mem nodes succeeds, they must not be freed. */ + if (!vmcore_add_device_ram_elf64(&list, count)) + return; +out_free: + vmcore_free_ranges(&list); +} +#else /* !CONFIG_PROC_VMCORE_DEVICE_RAM */ +static void vmcore_process_device_ram(struct vmcore_cb *cb) +{ +} +#endif /* CONFIG_PROC_VMCORE_DEVICE_RAM */ + /* Free all dumps in vmcore device dump list */ static void vmcore_free_device_dumps(void) { #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP - mutex_lock(&vmcoredd_mutex); + mutex_lock(&vmcore_mutex); while (!list_empty(&vmcoredd_list)) { struct vmcoredd_node *dump; @@ -1547,7 +1707,7 @@ static void vmcore_free_device_dumps(void) vfree(dump->buf); vfree(dump); } - mutex_unlock(&vmcoredd_mutex); + mutex_unlock(&vmcore_mutex); #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ } @@ -1569,7 +1729,7 @@ static int __init vmcore_init(void) rc = parse_crash_elf_headers(); if (rc) { elfcorehdr_free(elfcorehdr_addr); - pr_warn("Kdump: vmcore not initialized\n"); + pr_warn("not initialized\n"); return rc; } elfcorehdr_free(elfcorehdr_addr); @@ -1590,14 +1750,7 @@ void vmcore_cleanup(void) proc_vmcore = NULL; } - /* clear the vmcore list. */ - while (!list_empty(&vmcore_list)) { - struct vmcore *m; - - m = list_first_entry(&vmcore_list, struct vmcore, list); - list_del(&m->list); - kfree(m); - } + vmcore_free_ranges(&vmcore_list); free_elfcorebuf(); /* clear vmcore device dump list */ diff --git a/fs/pstore/blk.c b/fs/pstore/blk.c index 65b2473e22ff..fa6b8cb788a1 100644 --- a/fs/pstore/blk.c +++ b/fs/pstore/blk.c @@ -89,7 +89,7 @@ static struct pstore_device_info *pstore_device_info; _##name_ = check_size(name, alignsize); \ else \ _##name_ = 0; \ - /* Synchronize module parameters with resuls. */ \ + /* Synchronize module parameters with results. */ \ name = _##name_ / 1024; \ dev->zone.name = _##name_; \ } @@ -121,7 +121,7 @@ static int __register_pstore_device(struct pstore_device_info *dev) if (pstore_device_info) return -EBUSY; - /* zero means not limit on which backends to attempt to store. */ + /* zero means no limit on which backends attempt to store. */ if (!dev->flags) dev->flags = UINT_MAX; diff --git a/fs/pstore/zone.c b/fs/pstore/zone.c index 694db616663f..ceb5639a0629 100644 --- a/fs/pstore/zone.c +++ b/fs/pstore/zone.c @@ -1212,6 +1212,11 @@ static struct pstore_zone **psz_init_zones(enum pstore_type_id type, } c = total_size / record_size; + if (unlikely(!c)) { + pr_err("zone %s total_size too small\n", name); + return ERR_PTR(-EINVAL); + } + zones = kcalloc(c, sizeof(*zones), GFP_KERNEL); if (!zones) { pr_err("allocate for zones %s failed\n", name); diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index f9578918cfb2..825c5c2e0962 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -2926,7 +2926,7 @@ static int do_proc_dqstats(const struct ctl_table *table, int write, return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); } -static struct ctl_table fs_dqstats_table[] = { +static const struct ctl_table fs_dqstats_table[] = { { .procname = "lookups", .data = &dqstats.stat[DQST_LOOKUPS], diff --git a/fs/select.c b/fs/select.c index e223d1fe9d55..7da531b1cf6b 100644 --- a/fs/select.c +++ b/fs/select.c @@ -786,7 +786,7 @@ static inline int get_sigset_argpack(struct sigset_argpack *to, } return 0; Efault: - user_access_end(); + user_read_access_end(); return -EFAULT; } @@ -1355,7 +1355,7 @@ static inline int get_compat_sigset_argpack(struct compat_sigset_argpack *to, } return 0; Efault: - user_access_end(); + user_read_access_end(); return -EFAULT; } diff --git a/fs/smb/client/asn1.c b/fs/smb/client/asn1.c index b5724ef9f182..214a44509e7b 100644 --- a/fs/smb/client/asn1.c +++ b/fs/smb/client/asn1.c @@ -52,6 +52,8 @@ int cifs_neg_token_init_mech_type(void *context, size_t hdrlen, server->sec_kerberos = true; else if (oid == OID_ntlmssp) server->sec_ntlmssp = true; + else if (oid == OID_IAKerb) + server->sec_iakerb = true; else { char buf[50]; diff --git a/fs/smb/client/cifs_spnego.c b/fs/smb/client/cifs_spnego.c index 28f568b5fc27..bc1c1e9b288a 100644 --- a/fs/smb/client/cifs_spnego.c +++ b/fs/smb/client/cifs_spnego.c @@ -138,11 +138,13 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo, dp = description + strlen(description); - /* for now, only sec=krb5 and sec=mskrb5 are valid */ + /* for now, only sec=krb5 and sec=mskrb5 and iakerb are valid */ if (server->sec_kerberos) sprintf(dp, ";sec=krb5"); else if (server->sec_mskerberos) sprintf(dp, ";sec=mskrb5"); + else if (server->sec_iakerb) + sprintf(dp, ";sec=iakerb"); else { cifs_dbg(VFS, "unknown or missing server auth type, use krb5\n"); sprintf(dp, ";sec=krb5"); diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c index ba79aa2107cc..699a3f76d083 100644 --- a/fs/smb/client/cifsacl.c +++ b/fs/smb/client/cifsacl.c @@ -1395,7 +1395,7 @@ chown_chgrp_exit: #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY struct smb_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, const struct cifs_fid *cifsfid, u32 *pacllen, - u32 __maybe_unused unused) + u32 info) { struct smb_ntsd *pntsd = NULL; unsigned int xid; @@ -1407,7 +1407,7 @@ struct smb_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, xid = get_xid(); rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), cifsfid->netfid, &pntsd, - pacllen); + pacllen, info); free_xid(xid); cifs_put_tlink(tlink); @@ -1419,7 +1419,7 @@ struct smb_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, } static struct smb_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, - const char *path, u32 *pacllen) + const char *path, u32 *pacllen, u32 info) { struct smb_ntsd *pntsd = NULL; int oplock = 0; @@ -1446,9 +1446,12 @@ static struct smb_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, .fid = &fid, }; + if (info & SACL_SECINFO) + oparms.desired_access |= SYSTEM_SECURITY; + rc = CIFS_open(xid, &oparms, &oplock, NULL); if (!rc) { - rc = CIFSSMBGetCIFSACL(xid, tcon, fid.netfid, &pntsd, pacllen); + rc = CIFSSMBGetCIFSACL(xid, tcon, fid.netfid, &pntsd, pacllen, info); CIFSSMBClose(xid, tcon, fid.netfid); } @@ -1472,7 +1475,7 @@ struct smb_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb, if (inode) open_file = find_readable_file(CIFS_I(inode), true); if (!open_file) - return get_cifs_acl_by_path(cifs_sb, path, pacllen); + return get_cifs_acl_by_path(cifs_sb, path, pacllen, info); pntsd = get_cifs_acl_by_fid(cifs_sb, &open_file->fid, pacllen, info); cifsFileInfo_put(open_file); @@ -1485,7 +1488,7 @@ int set_cifs_acl(struct smb_ntsd *pnntsd, __u32 acllen, { int oplock = 0; unsigned int xid; - int rc, access_flags; + int rc, access_flags = 0; struct cifs_tcon *tcon; struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); @@ -1498,10 +1501,12 @@ int set_cifs_acl(struct smb_ntsd *pnntsd, __u32 acllen, tcon = tlink_tcon(tlink); xid = get_xid(); - if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP) - access_flags = WRITE_OWNER; - else - access_flags = WRITE_DAC; + if (aclflag & CIFS_ACL_OWNER || aclflag & CIFS_ACL_GROUP) + access_flags |= WRITE_OWNER; + if (aclflag & CIFS_ACL_SACL) + access_flags |= SYSTEM_SECURITY; + if (aclflag & CIFS_ACL_DACL) + access_flags |= WRITE_DAC; oparms = (struct cifs_open_parms) { .tcon = tcon, diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c index 7a43daacc815..e69968e88fe7 100644 --- a/fs/smb/client/cifsencrypt.c +++ b/fs/smb/client/cifsencrypt.c @@ -315,59 +315,72 @@ build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp) return 0; } -/* Server has provided av pairs/target info in the type 2 challenge - * packet and we have plucked it and stored within smb session. - * We parse that blob here to find netbios domain name to be used - * as part of ntlmv2 authentication (in Target String), if not already - * specified on the command line. - * If this function returns without any error but without fetching - * domain name, authentication may fail against some server but - * may not fail against other (those who are not very particular - * about target string i.e. for some, just user name might suffice. +#define AV_TYPE(av) (le16_to_cpu(av->type)) +#define AV_LEN(av) (le16_to_cpu(av->length)) +#define AV_DATA_PTR(av) ((void *)av->data) + +#define av_for_each_entry(ses, av) \ + for (av = NULL; (av = find_next_av(ses, av));) + +static struct ntlmssp2_name *find_next_av(struct cifs_ses *ses, + struct ntlmssp2_name *av) +{ + u16 len; + u8 *end; + + end = (u8 *)ses->auth_key.response + ses->auth_key.len; + if (!av) { + if (unlikely(!ses->auth_key.response || !ses->auth_key.len)) + return NULL; + av = (void *)ses->auth_key.response; + } else { + av = (void *)((u8 *)av + sizeof(*av) + AV_LEN(av)); + } + + if ((u8 *)av + sizeof(*av) > end) + return NULL; + + len = AV_LEN(av); + if (AV_TYPE(av) == NTLMSSP_AV_EOL) + return NULL; + if (!len || (u8 *)av + sizeof(*av) + len > end) + return NULL; + return av; +} + +/* + * Check if server has provided av pair of @type in the NTLMSSP + * CHALLENGE_MESSAGE blob. */ -static int -find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp) +static int find_av_name(struct cifs_ses *ses, u16 type, char **name, u16 maxlen) { - unsigned int attrsize; - unsigned int type; - unsigned int onesize = sizeof(struct ntlmssp2_name); - unsigned char *blobptr; - unsigned char *blobend; - struct ntlmssp2_name *attrptr; + const struct nls_table *nlsc = ses->local_nls; + struct ntlmssp2_name *av; + u16 len, nlen; - if (!ses->auth_key.len || !ses->auth_key.response) + if (*name) return 0; - blobptr = ses->auth_key.response; - blobend = blobptr + ses->auth_key.len; - - while (blobptr + onesize < blobend) { - attrptr = (struct ntlmssp2_name *) blobptr; - type = le16_to_cpu(attrptr->type); - if (type == NTLMSSP_AV_EOL) - break; - blobptr += 2; /* advance attr type */ - attrsize = le16_to_cpu(attrptr->length); - blobptr += 2; /* advance attr size */ - if (blobptr + attrsize > blobend) + av_for_each_entry(ses, av) { + len = AV_LEN(av); + if (AV_TYPE(av) != type) + continue; + if (!IS_ALIGNED(len, sizeof(__le16))) { + cifs_dbg(VFS | ONCE, "%s: bad length(%u) for type %u\n", + __func__, len, type); + continue; + } + nlen = len / sizeof(__le16); + if (nlen <= maxlen) { + ++nlen; + *name = kmalloc(nlen, GFP_KERNEL); + if (!*name) + return -ENOMEM; + cifs_from_utf16(*name, AV_DATA_PTR(av), nlen, + len, nlsc, NO_MAP_UNI_RSVD); break; - if (type == NTLMSSP_AV_NB_DOMAIN_NAME) { - if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN) - break; - if (!ses->domainName) { - ses->domainName = - kmalloc(attrsize + 1, GFP_KERNEL); - if (!ses->domainName) - return -ENOMEM; - cifs_from_utf16(ses->domainName, - (__le16 *)blobptr, attrsize, attrsize, - nls_cp, NO_MAP_UNI_RSVD); - break; - } } - blobptr += attrsize; /* advance attr value */ } - return 0; } @@ -377,40 +390,16 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp) * as part of ntlmv2 authentication (or local current time as * default in case of failure) */ -static __le64 -find_timestamp(struct cifs_ses *ses) +static __le64 find_timestamp(struct cifs_ses *ses) { - unsigned int attrsize; - unsigned int type; - unsigned int onesize = sizeof(struct ntlmssp2_name); - unsigned char *blobptr; - unsigned char *blobend; - struct ntlmssp2_name *attrptr; + struct ntlmssp2_name *av; struct timespec64 ts; - if (!ses->auth_key.len || !ses->auth_key.response) - return 0; - - blobptr = ses->auth_key.response; - blobend = blobptr + ses->auth_key.len; - - while (blobptr + onesize < blobend) { - attrptr = (struct ntlmssp2_name *) blobptr; - type = le16_to_cpu(attrptr->type); - if (type == NTLMSSP_AV_EOL) - break; - blobptr += 2; /* advance attr type */ - attrsize = le16_to_cpu(attrptr->length); - blobptr += 2; /* advance attr size */ - if (blobptr + attrsize > blobend) - break; - if (type == NTLMSSP_AV_TIMESTAMP) { - if (attrsize == sizeof(u64)) - return *((__le64 *)blobptr); - } - blobptr += attrsize; /* advance attr value */ + av_for_each_entry(ses, av) { + if (AV_TYPE(av) == NTLMSSP_AV_TIMESTAMP && + AV_LEN(av) == sizeof(u64)) + return *((__le64 *)AV_DATA_PTR(av)); } - ktime_get_real_ts64(&ts); return cpu_to_le64(cifs_UnixTimeToNT(ts)); } @@ -563,16 +552,29 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) { if (!ses->domainName) { if (ses->domainAuto) { - rc = find_domain_name(ses, nls_cp); - if (rc) { - cifs_dbg(VFS, "error %d finding domain name\n", - rc); + /* + * Domain (workgroup) hasn't been specified in + * mount options, so try to find it in + * CHALLENGE_MESSAGE message and then use it as + * part of NTLMv2 authentication. + */ + rc = find_av_name(ses, NTLMSSP_AV_NB_DOMAIN_NAME, + &ses->domainName, + CIFS_MAX_DOMAINNAME_LEN); + if (rc) goto setup_ntlmv2_rsp_ret; - } } else { ses->domainName = kstrdup("", GFP_KERNEL); + if (!ses->domainName) { + rc = -ENOMEM; + goto setup_ntlmv2_rsp_ret; + } } } + rc = find_av_name(ses, NTLMSSP_AV_DNS_DOMAIN_NAME, + &ses->dns_dom, CIFS_MAX_DOMAINNAME_LEN); + if (rc) + goto setup_ntlmv2_rsp_ret; } else { rc = build_avpair_blob(ses, nls_cp); if (rc) { diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c index b800c9f585d8..6a3bd652d251 100644 --- a/fs/smb/client/cifsfs.c +++ b/fs/smb/client/cifsfs.c @@ -715,6 +715,12 @@ cifs_show_options(struct seq_file *s, struct dentry *root) cifs_sb->ctx->backupgid)); seq_show_option(s, "reparse", cifs_reparse_type_str(cifs_sb->ctx->reparse_type)); + if (cifs_sb->ctx->nonativesocket) + seq_puts(s, ",nonativesocket"); + else + seq_puts(s, ",nativesocket"); + seq_show_option(s, "symlink", + cifs_symlink_type_str(get_cifs_symlink_type(cifs_sb))); seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize); seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize); diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h index a762dbbbd959..831fee962c4d 100644 --- a/fs/smb/client/cifsfs.h +++ b/fs/smb/client/cifsfs.h @@ -146,6 +146,6 @@ extern const struct export_operations cifs_export_ops; #endif /* CONFIG_CIFS_NFSD_EXPORT */ /* when changing internal version - update following two lines at same time */ -#define SMB3_PRODUCT_BUILD 52 -#define CIFS_VERSION "2.52" +#define SMB3_PRODUCT_BUILD 53 +#define CIFS_VERSION "2.53" #endif /* _CIFSFS_H */ diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h index 6e63abe461fd..ac1f890a0d54 100644 --- a/fs/smb/client/cifsglob.h +++ b/fs/smb/client/cifsglob.h @@ -151,6 +151,7 @@ enum securityEnum { NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */ RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */ Kerberos, /* Kerberos via SPNEGO */ + IAKerb, /* Kerberos proxy */ }; enum upcall_target_enum { @@ -160,6 +161,7 @@ enum upcall_target_enum { }; enum cifs_reparse_type { + CIFS_REPARSE_TYPE_NONE, CIFS_REPARSE_TYPE_NFS, CIFS_REPARSE_TYPE_WSL, CIFS_REPARSE_TYPE_DEFAULT = CIFS_REPARSE_TYPE_NFS, @@ -168,6 +170,8 @@ enum cifs_reparse_type { static inline const char *cifs_reparse_type_str(enum cifs_reparse_type type) { switch (type) { + case CIFS_REPARSE_TYPE_NONE: + return "none"; case CIFS_REPARSE_TYPE_NFS: return "nfs"; case CIFS_REPARSE_TYPE_WSL: @@ -177,6 +181,39 @@ static inline const char *cifs_reparse_type_str(enum cifs_reparse_type type) } } +enum cifs_symlink_type { + CIFS_SYMLINK_TYPE_DEFAULT, + CIFS_SYMLINK_TYPE_NONE, + CIFS_SYMLINK_TYPE_NATIVE, + CIFS_SYMLINK_TYPE_UNIX, + CIFS_SYMLINK_TYPE_MFSYMLINKS, + CIFS_SYMLINK_TYPE_SFU, + CIFS_SYMLINK_TYPE_NFS, + CIFS_SYMLINK_TYPE_WSL, +}; + +static inline const char *cifs_symlink_type_str(enum cifs_symlink_type type) +{ + switch (type) { + case CIFS_SYMLINK_TYPE_NONE: + return "none"; + case CIFS_SYMLINK_TYPE_NATIVE: + return "native"; + case CIFS_SYMLINK_TYPE_UNIX: + return "unix"; + case CIFS_SYMLINK_TYPE_MFSYMLINKS: + return "mfsymlinks"; + case CIFS_SYMLINK_TYPE_SFU: + return "sfu"; + case CIFS_SYMLINK_TYPE_NFS: + return "nfs"; + case CIFS_SYMLINK_TYPE_WSL: + return "wsl"; + default: + return "unknown"; + } +} + struct session_key { unsigned int len; char *response; @@ -215,10 +252,7 @@ struct cifs_cred { struct cifs_open_info_data { bool adjust_tz; - union { - bool reparse_point; - bool symlink; - }; + bool reparse_point; struct { /* ioctl response buffer */ struct { @@ -226,10 +260,7 @@ struct cifs_open_info_data { struct kvec iov; } io; __u32 tag; - union { - struct reparse_data_buffer *buf; - struct reparse_posix_data *posix; - }; + struct reparse_data_buffer *buf; } reparse; struct { __u8 eas[SMB2_WSL_MAX_QUERY_EA_RESP_SIZE]; @@ -326,7 +357,7 @@ struct smb_version_operations { int (*handle_cancelled_mid)(struct mid_q_entry *, struct TCP_Server_Info *); void (*downgrade_oplock)(struct TCP_Server_Info *server, struct cifsInodeInfo *cinode, __u32 oplock, - unsigned int epoch, bool *purge_cache); + __u16 epoch, bool *purge_cache); /* process transaction2 response */ bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *, char *, int); @@ -521,12 +552,12 @@ struct smb_version_operations { /* if we can do cache read operations */ bool (*is_read_op)(__u32); /* set oplock level for the inode */ - void (*set_oplock_level)(struct cifsInodeInfo *, __u32, unsigned int, - bool *); + void (*set_oplock_level)(struct cifsInodeInfo *cinode, __u32 oplock, __u16 epoch, + bool *purge_cache); /* create lease context buffer for CREATE request */ char * (*create_lease_buf)(u8 *lease_key, u8 oplock); /* parse lease context buffer and return oplock/epoch info */ - __u8 (*parse_lease_buf)(void *buf, unsigned int *epoch, char *lkey); + __u8 (*parse_lease_buf)(void *buf, __u16 *epoch, char *lkey); ssize_t (*copychunk_range)(const unsigned int, struct cifsFileInfo *src_file, struct cifsFileInfo *target_file, @@ -751,6 +782,7 @@ struct TCP_Server_Info { bool sec_kerberosu2u; /* supports U2U Kerberos */ bool sec_kerberos; /* supports plain Kerberos */ bool sec_mskerberos; /* supports legacy MS Kerberos */ + bool sec_iakerb; /* supports pass-through auth for Kerberos (krb5 proxy) */ bool large_buf; /* is current buffer large? */ /* use SMBD connection instead of socket */ bool rdma; @@ -811,23 +843,15 @@ struct TCP_Server_Info { bool use_swn_dstaddr; struct sockaddr_storage swn_dstaddr; #endif - struct mutex refpath_lock; /* protects leaf_fullpath */ /* - * leaf_fullpath: Canonical DFS referral path related to this - * connection. - * It is used in DFS cache refresher, reconnect and may - * change due to nested DFS links. - * - * Protected by @refpath_lock and @srv_lock. The @refpath_lock is - * mostly used for not requiring a copy of @leaf_fullpath when getting - * cached or new DFS referrals (which might also sleep during I/O). - * While @srv_lock is held for making string and NULL comparisons against - * both fields as in mount(2) and cache refresh. + * Canonical DFS referral path used in cifs_reconnect() for failover as + * well as in DFS cache refresher. * * format: \\HOST\SHARE[\OPTIONAL PATH] */ char *leaf_fullpath; bool dfs_conn:1; + char dns_dom[CIFS_MAX_DOMAINNAME_LEN + 1]; }; static inline bool is_smb1(struct TCP_Server_Info *server) @@ -1154,6 +1178,7 @@ struct cifs_ses { /* ========= end: protected by chan_lock ======== */ struct cifs_ses *dfs_root_ses; struct nls_table *local_nls; + char *dns_dom; /* FQDN of the domain */ }; static inline bool @@ -1422,7 +1447,7 @@ struct cifs_fid { __u8 create_guid[16]; __u32 access; struct cifs_pending_open *pending_open; - unsigned int epoch; + __u16 epoch; #ifdef CONFIG_CIFS_DEBUG2 __u64 mid; #endif /* CIFS_DEBUG2 */ @@ -1455,7 +1480,7 @@ struct cifsFileInfo { bool oplock_break_cancelled:1; bool status_file_deleted:1; /* file has been deleted */ bool offload:1; /* offload final part of _put to a wq */ - unsigned int oplock_epoch; /* epoch from the lease break */ + __u16 oplock_epoch; /* epoch from the lease break */ __u32 oplock_level; /* oplock/lease level from the lease break */ int count; spinlock_t file_info_lock; /* protects four flag/count fields above */ @@ -1552,7 +1577,7 @@ struct cifsInodeInfo { spinlock_t open_file_lock; /* protects openFileList */ __u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */ unsigned int oplock; /* oplock/lease level we have */ - unsigned int epoch; /* used to track lease state changes */ + __u16 epoch; /* used to track lease state changes */ #define CIFS_INODE_PENDING_OPLOCK_BREAK (0) /* oplock break in progress */ #define CIFS_INODE_PENDING_WRITERS (1) /* Writes in progress */ #define CIFS_INODE_FLAG_UNUSED (2) /* Unused flag */ @@ -2125,6 +2150,8 @@ static inline char *get_security_type_str(enum securityEnum sectype) return "Kerberos"; case NTLMv2: return "NTLMv2"; + case IAKerb: + return "IAKerb"; default: return "Unknown"; } @@ -2180,11 +2207,13 @@ static inline size_t ntlmssp_workstation_name_size(const struct cifs_ses *ses) static inline void move_cifs_info_to_smb2(struct smb2_file_all_info *dst, const FILE_ALL_INFO *src) { - memcpy(dst, src, (size_t)((u8 *)&src->AccessFlags - (u8 *)src)); - dst->AccessFlags = src->AccessFlags; - dst->CurrentByteOffset = src->CurrentByteOffset; - dst->Mode = src->Mode; - dst->AlignmentRequirement = src->AlignmentRequirement; + memcpy(dst, src, (size_t)((u8 *)&src->EASize - (u8 *)src)); + dst->IndexNumber = 0; + dst->EASize = src->EASize; + dst->AccessFlags = 0; + dst->CurrentByteOffset = 0; + dst->Mode = 0; + dst->AlignmentRequirement = 0; dst->FileNameLength = src->FileNameLength; } @@ -2311,4 +2340,24 @@ static inline bool cifs_ses_exiting(struct cifs_ses *ses) return ret; } +static inline bool cifs_netbios_name(const char *name, size_t namelen) +{ + bool ret = false; + size_t i; + + if (namelen >= 1 && namelen <= RFC1001_NAME_LEN) { + for (i = 0; i < namelen; i++) { + const unsigned char c = name[i]; + + if (c == '\\' || c == '/' || c == ':' || c == '*' || + c == '?' || c == '"' || c == '<' || c == '>' || + c == '|' || c == '.') + return false; + if (!ret && isalpha(c)) + ret = true; + } + } + return ret; +} + #endif /* _CIFS_GLOB_H */ diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h index ee78bb6741d6..48d0d6f439cf 100644 --- a/fs/smb/client/cifspdu.h +++ b/fs/smb/client/cifspdu.h @@ -190,42 +190,82 @@ */ #define FILE_READ_DATA 0x00000001 /* Data can be read from the file */ + /* or directory child entries can */ + /* be listed together with the */ + /* associated child attributes */ + /* (so the FILE_READ_ATTRIBUTES on */ + /* the child entry is not needed) */ #define FILE_WRITE_DATA 0x00000002 /* Data can be written to the file */ + /* or new file can be created in */ + /* the directory */ #define FILE_APPEND_DATA 0x00000004 /* Data can be appended to the file */ + /* (for non-local files over SMB it */ + /* is same as FILE_WRITE_DATA) */ + /* or new subdirectory can be */ + /* created in the directory */ #define FILE_READ_EA 0x00000008 /* Extended attributes associated */ /* with the file can be read */ #define FILE_WRITE_EA 0x00000010 /* Extended attributes associated */ /* with the file can be written */ #define FILE_EXECUTE 0x00000020 /*Data can be read into memory from */ /* the file using system paging I/O */ -#define FILE_DELETE_CHILD 0x00000040 + /* for executing the file / script */ + /* or right to traverse directory */ + /* (but by default all users have */ + /* directory bypass traverse */ + /* privilege and do not need this */ + /* permission on directories at all)*/ +#define FILE_DELETE_CHILD 0x00000040 /* Child entry can be deleted from */ + /* the directory (so the DELETE on */ + /* the child entry is not needed) */ #define FILE_READ_ATTRIBUTES 0x00000080 /* Attributes associated with the */ - /* file can be read */ + /* file or directory can be read */ #define FILE_WRITE_ATTRIBUTES 0x00000100 /* Attributes associated with the */ - /* file can be written */ -#define DELETE 0x00010000 /* The file can be deleted */ -#define READ_CONTROL 0x00020000 /* The access control list and */ - /* ownership associated with the */ - /* file can be read */ -#define WRITE_DAC 0x00040000 /* The access control list and */ - /* ownership associated with the */ - /* file can be written. */ + /* file or directory can be written */ +#define DELETE 0x00010000 /* The file or dir can be deleted */ +#define READ_CONTROL 0x00020000 /* The discretionary access control */ + /* list and ownership associated */ + /* with the file or dir can be read */ +#define WRITE_DAC 0x00040000 /* The discretionary access control */ + /* list associated with the file or */ + /* directory can be written */ #define WRITE_OWNER 0x00080000 /* Ownership information associated */ - /* with the file can be written */ + /* with the file/dir can be written */ #define SYNCHRONIZE 0x00100000 /* The file handle can waited on to */ /* synchronize with the completion */ /* of an input/output request */ #define SYSTEM_SECURITY 0x01000000 /* The system access control list */ - /* can be read and changed */ -#define GENERIC_ALL 0x10000000 -#define GENERIC_EXECUTE 0x20000000 -#define GENERIC_WRITE 0x40000000 -#define GENERIC_READ 0x80000000 - /* In summary - Relevant file */ - /* access flags from CIFS are */ - /* file_read_data, file_write_data */ - /* file_execute, file_read_attributes*/ - /* write_dac, and delete. */ + /* associated with the file or */ + /* directory can be read or written */ + /* (cannot be in DACL, can in SACL) */ +#define MAXIMUM_ALLOWED 0x02000000 /* Maximal subset of GENERIC_ALL */ + /* permissions which can be granted */ + /* (cannot be in DACL nor SACL) */ +#define GENERIC_ALL 0x10000000 /* Same as: GENERIC_EXECUTE | */ + /* GENERIC_WRITE | */ + /* GENERIC_READ | */ + /* FILE_DELETE_CHILD | */ + /* DELETE | */ + /* WRITE_DAC | */ + /* WRITE_OWNER */ + /* So GENERIC_ALL contains all bits */ + /* mentioned above except these two */ + /* SYSTEM_SECURITY MAXIMUM_ALLOWED */ +#define GENERIC_EXECUTE 0x20000000 /* Same as: FILE_EXECUTE | */ + /* FILE_READ_ATTRIBUTES | */ + /* READ_CONTROL | */ + /* SYNCHRONIZE */ +#define GENERIC_WRITE 0x40000000 /* Same as: FILE_WRITE_DATA | */ + /* FILE_APPEND_DATA | */ + /* FILE_WRITE_EA | */ + /* FILE_WRITE_ATTRIBUTES | */ + /* READ_CONTROL | */ + /* SYNCHRONIZE */ +#define GENERIC_READ 0x80000000 /* Same as: FILE_READ_DATA | */ + /* FILE_READ_EA | */ + /* FILE_READ_ATTRIBUTES | */ + /* READ_CONTROL | */ + /* SYNCHRONIZE */ #define FILE_READ_RIGHTS (FILE_READ_DATA | FILE_READ_EA | FILE_READ_ATTRIBUTES) #define FILE_WRITE_RIGHTS (FILE_WRITE_DATA | FILE_APPEND_DATA \ @@ -649,7 +689,7 @@ typedef union smb_com_session_setup_andx { struct ntlmssp2_name { __le16 type; __le16 length; -/* char name[length]; */ + __u8 data[]; } __attribute__((packed)); struct ntlmv2_resp { @@ -1484,36 +1524,6 @@ struct file_notify_information { __u8 FileName[]; } __attribute__((packed)); -/* For IO_REPARSE_TAG_SYMLINK */ -struct reparse_symlink_data { - __le32 ReparseTag; - __le16 ReparseDataLength; - __u16 Reserved; - __le16 SubstituteNameOffset; - __le16 SubstituteNameLength; - __le16 PrintNameOffset; - __le16 PrintNameLength; - __le32 Flags; - char PathBuffer[]; -} __attribute__((packed)); - -/* Flag above */ -#define SYMLINK_FLAG_RELATIVE 0x00000001 - -/* For IO_REPARSE_TAG_NFS */ -#define NFS_SPECFILE_LNK 0x00000000014B4E4C -#define NFS_SPECFILE_CHR 0x0000000000524843 -#define NFS_SPECFILE_BLK 0x00000000004B4C42 -#define NFS_SPECFILE_FIFO 0x000000004F464946 -#define NFS_SPECFILE_SOCK 0x000000004B434F53 -struct reparse_posix_data { - __le32 ReparseTag; - __le16 ReparseDataLength; - __u16 Reserved; - __le64 InodeType; /* LNK, FIFO, CHR etc. */ - __u8 DataBuffer[]; -} __attribute__((packed)); - struct cifs_quota_data { __u32 rsrvd1; /* 0 */ __u32 sid_size; @@ -2280,13 +2290,7 @@ typedef struct { /* data block encoding of response to level 263 QPathInfo */ __u8 DeletePending; __u8 Directory; __u16 Pad2; - __le64 IndexNumber; __le32 EASize; - __le32 AccessFlags; - __u64 IndexNumber1; - __le64 CurrentByteOffset; - __le32 Mode; - __le32 AlignmentRequirement; __le32 FileNameLength; union { char __pad; diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h index d26f9bbb5382..81680001944d 100644 --- a/fs/smb/client/cifsproto.h +++ b/fs/smb/client/cifsproto.h @@ -474,9 +474,6 @@ extern int cifs_query_reparse_point(const unsigned int xid, const char *full_path, u32 *tag, struct kvec *rsp, int *rsp_buftype); -extern int CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon, - __u16 fid, char **symlinkinfo, - const struct nls_table *nls_codepage); extern int CIFSSMB_set_compression(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid); extern int CIFS_open(const unsigned int xid, struct cifs_open_parms *oparms, @@ -560,7 +557,7 @@ extern int CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nls_codepage, struct cifs_sb_info *cifs_sb); extern int CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, - __u16 fid, struct smb_ntsd **acl_inf, __u32 *buflen); + __u16 fid, struct smb_ntsd **acl_inf, __u32 *buflen, __u32 info); extern int CIFSSMBSetCIFSACL(const unsigned int, struct cifs_tcon *, __u16, struct smb_ntsd *pntsd, __u32 len, int aclflag); extern int cifs_do_get_acl(const unsigned int xid, struct cifs_tcon *tcon, @@ -659,7 +656,7 @@ char *extract_sharename(const char *unc); int parse_reparse_point(struct reparse_data_buffer *buf, u32 plen, struct cifs_sb_info *cifs_sb, const char *full_path, - bool unicode, struct cifs_open_info_data *data); + struct cifs_open_info_data *data); int __cifs_sfu_make_node(unsigned int xid, struct inode *inode, struct dentry *dentry, struct cifs_tcon *tcon, const char *full_path, umode_t mode, dev_t dev, @@ -680,7 +677,7 @@ static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses, } int match_target_ip(struct TCP_Server_Info *server, - const char *share, size_t share_len, + const char *host, size_t hostlen, bool *result); int cifs_inval_name_dfs_link_error(const unsigned int xid, struct cifs_tcon *tcon, diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c index 6cb1e81993f8..3feaa0f68169 100644 --- a/fs/smb/client/cifssmb.c +++ b/fs/smb/client/cifssmb.c @@ -152,8 +152,17 @@ again: spin_unlock(&ses->ses_lock); rc = cifs_negotiate_protocol(0, ses, server); - if (!rc) + if (!rc) { rc = cifs_setup_session(0, ses, server, ses->local_nls); + if ((rc == -EACCES) || (rc == -EHOSTDOWN) || (rc == -EKEYREVOKED)) { + /* + * Try alternate password for next reconnect if an alternate + * password is available. + */ + if (ses->password2) + swap(ses->password2, ses->password); + } + } /* do we need to reconnect tcon? */ if (rc || !tcon->need_reconnect) { @@ -1258,14 +1267,6 @@ openRetry: return rc; } -static void cifs_readv_worker(struct work_struct *work) -{ - struct cifs_io_subrequest *rdata = - container_of(work, struct cifs_io_subrequest, subreq.work); - - netfs_read_subreq_terminated(&rdata->subreq, rdata->result, false); -} - static void cifs_readv_callback(struct mid_q_entry *mid) { @@ -1330,11 +1331,13 @@ cifs_readv_callback(struct mid_q_entry *mid) } else if (rdata->got_bytes > 0) { __set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags); } + if (rdata->got_bytes) + __set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags); } rdata->credits.value = 0; + rdata->subreq.error = rdata->result; rdata->subreq.transferred += rdata->got_bytes; - INIT_WORK(&rdata->subreq.work, cifs_readv_worker); queue_work(cifsiod_wq, &rdata->subreq.work); release_mid(mid); add_credits(server, &credits, 0); @@ -3366,7 +3369,7 @@ validate_ntransact(char *buf, char **ppparm, char **ppdata, /* Get Security Descriptor (by handle) from remote server for a file or dir */ int CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid, - struct smb_ntsd **acl_inf, __u32 *pbuflen) + struct smb_ntsd **acl_inf, __u32 *pbuflen, __u32 info) { int rc = 0; int buf_type = 0; @@ -3389,7 +3392,7 @@ CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid, pSMB->MaxSetupCount = 0; pSMB->Fid = fid; /* file handle always le */ pSMB->AclFlags = cpu_to_le32(CIFS_ACL_OWNER | CIFS_ACL_GROUP | - CIFS_ACL_DACL); + CIFS_ACL_DACL | info); pSMB->ByteCount = cpu_to_le16(11); /* 3 bytes pad + 8 bytes parm */ inc_rfc1001_len(pSMB, 11); iov[0].iov_base = (char *)pSMB; diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c index ddcc9e514a0e..f917de020dd5 100644 --- a/fs/smb/client/connect.c +++ b/fs/smb/client/connect.c @@ -72,10 +72,8 @@ static void cifs_prune_tlinks(struct work_struct *work); */ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server) { - int rc; - int len; - char *unc; struct sockaddr_storage ss; + int rc; if (!server->hostname) return -EINVAL; @@ -84,32 +82,18 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server) if (server->hostname[0] == '\0') return 0; - len = strlen(server->hostname) + 3; - - unc = kmalloc(len, GFP_KERNEL); - if (!unc) { - cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__); - return -ENOMEM; - } - scnprintf(unc, len, "\\\\%s", server->hostname); - spin_lock(&server->srv_lock); ss = server->dstaddr; spin_unlock(&server->srv_lock); - rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL); - kfree(unc); - - if (rc < 0) { - cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n", - __func__, server->hostname, rc); - } else { + rc = dns_resolve_name(server->dns_dom, server->hostname, + strlen(server->hostname), + (struct sockaddr *)&ss); + if (!rc) { spin_lock(&server->srv_lock); memcpy(&server->dstaddr, &ss, sizeof(server->dstaddr)); spin_unlock(&server->srv_lock); - rc = 0; } - return rc; } @@ -438,7 +422,8 @@ static int __cifs_reconnect(struct TCP_Server_Info *server, } #ifdef CONFIG_CIFS_DFS_UPCALL -static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const char *target) +static int __reconnect_target_locked(struct TCP_Server_Info *server, + const char *target) { int rc; char *hostname; @@ -471,34 +456,43 @@ static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const cha return rc; } -static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_cache_tgt_list *tl, - struct dfs_cache_tgt_iterator **target_hint) +static int reconnect_target_locked(struct TCP_Server_Info *server, + struct dfs_cache_tgt_list *tl, + struct dfs_cache_tgt_iterator **target_hint) { - int rc; struct dfs_cache_tgt_iterator *tit; + int rc; *target_hint = NULL; /* If dfs target list is empty, then reconnect to last server */ tit = dfs_cache_get_tgt_iterator(tl); if (!tit) - return __reconnect_target_unlocked(server, server->hostname); + return __reconnect_target_locked(server, server->hostname); /* Otherwise, try every dfs target in @tl */ - for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) { - rc = __reconnect_target_unlocked(server, dfs_cache_get_tgt_name(tit)); + do { + const char *target = dfs_cache_get_tgt_name(tit); + + spin_lock(&server->srv_lock); + if (server->tcpStatus != CifsNeedReconnect) { + spin_unlock(&server->srv_lock); + return -ECONNRESET; + } + spin_unlock(&server->srv_lock); + rc = __reconnect_target_locked(server, target); if (!rc) { *target_hint = tit; break; } - } + } while ((tit = dfs_cache_get_next_tgt(tl, tit))); return rc; } static int reconnect_dfs_server(struct TCP_Server_Info *server) { struct dfs_cache_tgt_iterator *target_hint = NULL; - + const char *ref_path = server->leaf_fullpath + 1; DFS_CACHE_TGT_LIST(tl); int num_targets = 0; int rc = 0; @@ -511,10 +505,8 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server) * through /proc/fs/cifs/dfscache or the target list is empty due to server settings after * refreshing the referral, so, in this case, default it to 1. */ - mutex_lock(&server->refpath_lock); - if (!dfs_cache_noreq_find(server->leaf_fullpath + 1, NULL, &tl)) + if (!dfs_cache_noreq_find(ref_path, NULL, &tl)) num_targets = dfs_cache_get_nr_tgts(&tl); - mutex_unlock(&server->refpath_lock); if (!num_targets) num_targets = 1; @@ -534,7 +526,7 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server) try_to_freeze(); cifs_server_lock(server); - rc = reconnect_target_unlocked(server, &tl, &target_hint); + rc = reconnect_target_locked(server, &tl, &target_hint); if (rc) { /* Failed to reconnect socket */ cifs_server_unlock(server); @@ -558,9 +550,7 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server) mod_delayed_work(cifsiod_wq, &server->reconnect, 0); } while (server->tcpStatus == CifsNeedReconnect); - mutex_lock(&server->refpath_lock); - dfs_cache_noreq_update_tgthint(server->leaf_fullpath + 1, target_hint); - mutex_unlock(&server->refpath_lock); + dfs_cache_noreq_update_tgthint(ref_path, target_hint); dfs_cache_free_tgts(&tl); /* Need to set up echo worker again once connection has been established */ @@ -575,13 +565,8 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server) int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session) { - mutex_lock(&server->refpath_lock); - if (!server->leaf_fullpath) { - mutex_unlock(&server->refpath_lock); + if (!server->leaf_fullpath) return __cifs_reconnect(server, mark_smb_session); - } - mutex_unlock(&server->refpath_lock); - return reconnect_dfs_server(server); } #else @@ -1044,6 +1029,7 @@ clean_demultiplex_info(struct TCP_Server_Info *server) /* Release netns reference for this server. */ put_net(cifs_net_ns(server)); kfree(server->leaf_fullpath); + kfree(server->hostname); kfree(server); length = atomic_dec_return(&tcpSesAllocCount); @@ -1540,42 +1526,10 @@ static int match_server(struct TCP_Server_Info *server, if (!cifs_match_ipaddr((struct sockaddr *)&ctx->srcaddr, (struct sockaddr *)&server->srcaddr)) return 0; - /* - * When matching cifs.ko superblocks (@match_super == true), we can't - * really match either @server->leaf_fullpath or @server->dstaddr - * directly since this @server might belong to a completely different - * server -- in case of domain-based DFS referrals or DFS links -- as - * provided earlier by mount(2) through 'source' and 'ip' options. - * - * Otherwise, match the DFS referral in @server->leaf_fullpath or the - * destination address in @server->dstaddr. - * - * When using 'nodfs' mount option, we avoid sharing it with DFS - * connections as they might failover. - */ - if (!match_super) { - if (!ctx->nodfs) { - if (server->leaf_fullpath) { - if (!ctx->leaf_fullpath || - strcasecmp(server->leaf_fullpath, - ctx->leaf_fullpath)) - return 0; - } else if (ctx->leaf_fullpath) { - return 0; - } - } else if (server->leaf_fullpath) { - return 0; - } - } - /* - * Match for a regular connection (address/hostname/port) which has no - * DFS referrals set. - */ - if (!server->leaf_fullpath && - (strcasecmp(server->hostname, ctx->server_hostname) || - !match_server_address(server, addr) || - !match_port(server, addr))) + if (strcasecmp(server->hostname, ctx->server_hostname) || + !match_server_address(server, addr) || + !match_port(server, addr)) return 0; if (!match_security(server, ctx)) @@ -1670,8 +1624,6 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect) kfree_sensitive(server->session_key.response); server->session_key.response = NULL; server->session_key.len = 0; - kfree(server->hostname); - server->hostname = NULL; task = xchg(&server->tsk, NULL); if (task) @@ -1711,6 +1663,8 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx, goto out_err; } } + if (ctx->dns_dom) + strscpy(tcp_ses->dns_dom, ctx->dns_dom); if (ctx->nosharesock) tcp_ses->nosharesock = true; @@ -1759,9 +1713,6 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx, INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server); mutex_init(&tcp_ses->reconnect_mutex); -#ifdef CONFIG_CIFS_DFS_UPCALL - mutex_init(&tcp_ses->refpath_lock); -#endif memcpy(&tcp_ses->srcaddr, &ctx->srcaddr, sizeof(tcp_ses->srcaddr)); memcpy(&tcp_ses->dstaddr, &ctx->dstaddr, @@ -2277,12 +2228,13 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)), struct cifs_ses * cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) { - int rc = 0; - int retries = 0; - unsigned int xid; - struct cifs_ses *ses; - struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; + struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; + struct cifs_ses *ses; + unsigned int xid; + int retries = 0; + size_t len; + int rc = 0; xid = get_xid(); @@ -2372,6 +2324,14 @@ retry_old_session: ses->domainName = kstrdup(ctx->domainname, GFP_KERNEL); if (!ses->domainName) goto get_ses_fail; + + len = strnlen(ctx->domainname, CIFS_MAX_DOMAINNAME_LEN); + if (!cifs_netbios_name(ctx->domainname, len)) { + ses->dns_dom = kstrndup(ctx->domainname, + len, GFP_KERNEL); + if (!ses->dns_dom) + goto get_ses_fail; + } } strscpy(ses->workstation_name, ctx->workstation_name, sizeof(ses->workstation_name)); @@ -2889,6 +2849,10 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data) return 0; if (old->ctx->reparse_type != new->ctx->reparse_type) return 0; + if (old->ctx->nonativesocket != new->ctx->nonativesocket) + return 0; + if (old->ctx->symlink_type != new->ctx->symlink_type) + return 0; return 1; } diff --git a/fs/smb/client/dfs.c b/fs/smb/client/dfs.c index 4647df9e1e3b..f65a8a90ba27 100644 --- a/fs/smb/client/dfs.c +++ b/fs/smb/client/dfs.c @@ -9,6 +9,8 @@ #include "fs_context.h" #include "dfs.h" +#define DFS_DOM(ctx) (ctx->dfs_root_ses ? ctx->dfs_root_ses->dns_dom : NULL) + /** * dfs_parse_target_referral - set fs context for dfs target referral * @@ -46,8 +48,8 @@ int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_para if (rc) goto out; - rc = dns_resolve_server_name_to_ip(path, (struct sockaddr *)&ctx->dstaddr, NULL); - + rc = dns_resolve_unc(DFS_DOM(ctx), path, + (struct sockaddr *)&ctx->dstaddr); out: kfree(path); return rc; @@ -59,8 +61,9 @@ static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path) int rc; ctx->leaf_fullpath = (char *)full_path; + ctx->dns_dom = DFS_DOM(ctx); rc = cifs_mount_get_session(mnt_ctx); - ctx->leaf_fullpath = NULL; + ctx->leaf_fullpath = ctx->dns_dom = NULL; return rc; } @@ -95,15 +98,16 @@ static inline int parse_dfs_target(struct smb3_fs_context *ctx, return rc; } -static int setup_dfs_ref(struct cifs_mount_ctx *mnt_ctx, - struct dfs_info3_param *tgt, - struct dfs_ref_walk *rw) +static int setup_dfs_ref(struct dfs_info3_param *tgt, struct dfs_ref_walk *rw) { - struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; - struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb; + struct cifs_sb_info *cifs_sb = rw->mnt_ctx->cifs_sb; + struct smb3_fs_context *ctx = rw->mnt_ctx->fs_ctx; char *ref_path, *full_path; int rc; + set_root_smb_session(rw->mnt_ctx); + ref_walk_ses(rw) = ctx->dfs_root_ses; + full_path = smb3_fs_context_fullpath(ctx, CIFS_DIR_SEP(cifs_sb)); if (IS_ERR(full_path)) return PTR_ERR(full_path); @@ -120,35 +124,22 @@ static int setup_dfs_ref(struct cifs_mount_ctx *mnt_ctx, } ref_walk_path(rw) = ref_path; ref_walk_fpath(rw) = full_path; - ref_walk_ses(rw) = ctx->dfs_root_ses; - return 0; + + return dfs_get_referral(rw->mnt_ctx, + ref_walk_path(rw) + 1, + ref_walk_tl(rw)); } -static int __dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx, - struct dfs_ref_walk *rw) +static int __dfs_referral_walk(struct dfs_ref_walk *rw) { - struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; + struct smb3_fs_context *ctx = rw->mnt_ctx->fs_ctx; + struct cifs_mount_ctx *mnt_ctx = rw->mnt_ctx; struct dfs_info3_param tgt = {}; int rc = -ENOENT; again: do { ctx->dfs_root_ses = ref_walk_ses(rw); - if (ref_walk_empty(rw)) { - rc = dfs_get_referral(mnt_ctx, ref_walk_path(rw) + 1, - NULL, ref_walk_tl(rw)); - if (rc) { - rc = cifs_mount_get_tcon(mnt_ctx); - if (!rc) - rc = cifs_is_path_remote(mnt_ctx); - continue; - } - if (!ref_walk_num_tgts(rw)) { - rc = -ENOENT; - continue; - } - } - while (ref_walk_next_tgt(rw)) { rc = parse_dfs_target(ctx, rw, &tgt); if (rc) @@ -159,32 +150,31 @@ again: if (rc) continue; - ref_walk_set_tgt_hint(rw); - if (tgt.flags & DFSREF_STORAGE_SERVER) { - rc = cifs_mount_get_tcon(mnt_ctx); - if (!rc) - rc = cifs_is_path_remote(mnt_ctx); - if (!rc) + rc = cifs_mount_get_tcon(mnt_ctx); + if (rc) { + if (tgt.server_type == DFS_TYPE_LINK && + DFS_INTERLINK(tgt.flags)) + rc = -EREMOTE; + } else { + rc = cifs_is_path_remote(mnt_ctx); + if (!rc) { + ref_walk_set_tgt_hint(rw); break; - if (rc != -EREMOTE) - continue; + } } - - set_root_smb_session(mnt_ctx); - rc = ref_walk_advance(rw); - if (!rc) { - rc = setup_dfs_ref(mnt_ctx, &tgt, rw); + if (rc == -EREMOTE) { + rc = ref_walk_advance(rw); if (!rc) { - rc = -EREMOTE; + rc = setup_dfs_ref(&tgt, rw); + if (rc) + break; + ref_walk_mark_end(rw); goto again; } } - if (rc != -ELOOP) - goto out; } } while (rc && ref_walk_descend(rw)); -out: free_dfs_info_param(&tgt); return rc; } @@ -201,10 +191,10 @@ static int dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx, return rc; } - ref_walk_init(*rw); - rc = setup_dfs_ref(mnt_ctx, NULL, *rw); + ref_walk_init(*rw, mnt_ctx); + rc = setup_dfs_ref(NULL, *rw); if (!rc) - rc = __dfs_referral_walk(mnt_ctx, *rw); + rc = __dfs_referral_walk(*rw); return rc; } @@ -264,7 +254,7 @@ static int update_fs_context_dstaddr(struct smb3_fs_context *ctx) int rc = 0; if (!ctx->nodfs && ctx->dfs_automount) { - rc = dns_resolve_server_name_to_ip(ctx->source, addr, NULL); + rc = dns_resolve_unc(NULL, ctx->source, addr); if (!rc) cifs_set_port(addr, ctx->port); ctx->dfs_automount = false; @@ -294,7 +284,7 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx) * to respond with PATH_NOT_COVERED to requests that include the prefix. */ if (!nodfs) { - rc = dfs_get_referral(mnt_ctx, ctx->UNC + 1, NULL, NULL); + rc = dfs_get_referral(mnt_ctx, ctx->UNC + 1, NULL); if (rc) { cifs_dbg(FYI, "%s: no dfs referral for %s: %d\n", __func__, ctx->UNC + 1, rc); @@ -314,10 +304,8 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx) cifs_mount_put_conns(mnt_ctx); rc = get_session(mnt_ctx, NULL); } - if (!rc) { - set_root_smb_session(mnt_ctx); + if (!rc) rc = __dfs_mount_share(mnt_ctx); - } return rc; } diff --git a/fs/smb/client/dfs.h b/fs/smb/client/dfs.h index 1aa2bc65b3bc..e60f0a24a8a1 100644 --- a/fs/smb/client/dfs.h +++ b/fs/smb/client/dfs.h @@ -12,6 +12,7 @@ #include "dfs_cache.h" #include "cifs_unicode.h" #include <linux/namei.h> +#include <linux/errno.h> #define DFS_INTERLINK(v) \ (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER)) @@ -25,8 +26,9 @@ struct dfs_ref { }; struct dfs_ref_walk { - struct dfs_ref *ref; - struct dfs_ref refs[MAX_NESTED_LINKS]; + struct cifs_mount_ctx *mnt_ctx; + struct dfs_ref *ref; + struct dfs_ref refs[MAX_NESTED_LINKS]; }; #define ref_walk_start(w) ((w)->refs) @@ -35,7 +37,6 @@ struct dfs_ref_walk { #define ref_walk_descend(w) (--ref_walk_cur(w) >= ref_walk_start(w)) #define ref_walk_tit(w) (ref_walk_cur(w)->tit) -#define ref_walk_empty(w) (!ref_walk_tit(w)) #define ref_walk_path(w) (ref_walk_cur(w)->path) #define ref_walk_fpath(w) (ref_walk_cur(w)->full_path) #define ref_walk_tl(w) (&ref_walk_cur(w)->tl) @@ -51,9 +52,11 @@ static inline struct dfs_ref_walk *ref_walk_alloc(void) return rw; } -static inline void ref_walk_init(struct dfs_ref_walk *rw) +static inline void ref_walk_init(struct dfs_ref_walk *rw, + struct cifs_mount_ctx *mnt_ctx) { memset(rw, 0, sizeof(*rw)); + rw->mnt_ctx = mnt_ctx; ref_walk_cur(rw) = ref_walk_start(rw); } @@ -93,15 +96,23 @@ static inline int ref_walk_advance(struct dfs_ref_walk *rw) static inline struct dfs_cache_tgt_iterator * ref_walk_next_tgt(struct dfs_ref_walk *rw) { - struct dfs_cache_tgt_iterator *tit; struct dfs_ref *ref = ref_walk_cur(rw); + struct dfs_cache_tgt_iterator *tit; + + if (IS_ERR(ref->tit)) + return NULL; if (!ref->tit) tit = dfs_cache_get_tgt_iterator(&ref->tl); else tit = dfs_cache_get_next_tgt(&ref->tl, ref->tit); + + if (!tit) { + ref->tit = ERR_PTR(-ENOENT); + return NULL; + } ref->tit = tit; - return tit; + return ref->tit; } static inline int ref_walk_get_tgt(struct dfs_ref_walk *rw, @@ -112,11 +123,6 @@ static inline int ref_walk_get_tgt(struct dfs_ref_walk *rw, ref_walk_tit(rw), tgt); } -static inline int ref_walk_num_tgts(struct dfs_ref_walk *rw) -{ - return dfs_cache_get_nr_tgts(ref_walk_tl(rw)); -} - static inline void ref_walk_set_tgt_hint(struct dfs_ref_walk *rw) { dfs_cache_noreq_update_tgthint(ref_walk_path(rw) + 1, @@ -136,6 +142,15 @@ static inline void ref_walk_set_tcon(struct dfs_ref_walk *rw, } } +static inline void ref_walk_mark_end(struct dfs_ref_walk *rw) +{ + struct dfs_ref *ref = ref_walk_cur(rw) - 1; + + WARN_ON_ONCE(ref < ref_walk_start(rw)); + dfs_cache_noreq_update_tgthint(ref->path + 1, ref->tit); + ref->tit = ERR_PTR(-ENOENT); /* end marker */ +} + int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_param *ref, struct smb3_fs_context *ctx); int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx); @@ -145,15 +160,16 @@ static inline char *dfs_get_path(struct cifs_sb_info *cifs_sb, const char *path) return dfs_cache_canonical_path(path, cifs_sb->local_nls, cifs_remap(cifs_sb)); } -static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx, const char *path, - struct dfs_info3_param *ref, struct dfs_cache_tgt_list *tl) +static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx, + const char *path, + struct dfs_cache_tgt_list *tl) { struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb; struct cifs_ses *rses = ctx->dfs_root_ses ?: mnt_ctx->ses; return dfs_cache_find(mnt_ctx->xid, rses, cifs_sb->local_nls, - cifs_remap(cifs_sb), path, ref, tl); + cifs_remap(cifs_sb), path, NULL, tl); } /* @@ -172,4 +188,11 @@ static inline void dfs_put_root_smb_sessions(struct list_head *head) } } +static inline const char *dfs_ses_refpath(struct cifs_ses *ses) +{ + const char *path = ses->server->leaf_fullpath; + + return path ? path + 1 : ERR_PTR(-ENOENT); +} + #endif /* _CIFS_DFS_H */ diff --git a/fs/smb/client/dfs_cache.c b/fs/smb/client/dfs_cache.c index 541608b0267e..4dada26d56b5 100644 --- a/fs/smb/client/dfs_cache.c +++ b/fs/smb/client/dfs_cache.c @@ -1096,11 +1096,8 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, static bool target_share_equal(struct cifs_tcon *tcon, const char *s1) { struct TCP_Server_Info *server = tcon->ses->server; - struct sockaddr_storage ss; - const char *host; const char *s2 = &tcon->tree_name[1]; - size_t hostlen; - char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0}; + struct sockaddr_storage ss; bool match; int rc; @@ -1111,18 +1108,13 @@ static bool target_share_equal(struct cifs_tcon *tcon, const char *s1) * Resolve share's hostname and check if server address matches. Otherwise just ignore it * as we could not have upcall to resolve hostname or failed to convert ip address. */ - extract_unc_hostname(s1, &host, &hostlen); - scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host); - - rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL); - if (rc < 0) { - cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n", - __func__, (int)hostlen, host); + rc = dns_resolve_unc(server->dns_dom, s1, (struct sockaddr *)&ss); + if (rc < 0) return true; - } cifs_server_lock(server); match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss); + cifs_dbg(FYI, "%s: [share=%s] ipaddr matched: %s\n", __func__, s1, str_yes_no(match)); cifs_server_unlock(server); return match; @@ -1144,35 +1136,19 @@ static bool is_ses_good(struct cifs_ses *ses) return ret; } -static char *get_ses_refpath(struct cifs_ses *ses) -{ - struct TCP_Server_Info *server = ses->server; - char *path = ERR_PTR(-ENOENT); - - mutex_lock(&server->refpath_lock); - if (server->leaf_fullpath) { - path = kstrdup(server->leaf_fullpath + 1, GFP_ATOMIC); - if (!path) - path = ERR_PTR(-ENOMEM); - } - mutex_unlock(&server->refpath_lock); - return path; -} - /* Refresh dfs referral of @ses */ static void refresh_ses_referral(struct cifs_ses *ses) { struct cache_entry *ce; unsigned int xid; - char *path; + const char *path; int rc = 0; xid = get_xid(); - path = get_ses_refpath(ses); + path = dfs_ses_refpath(ses); if (IS_ERR(path)) { rc = PTR_ERR(path); - path = NULL; goto out; } @@ -1191,7 +1167,6 @@ static void refresh_ses_referral(struct cifs_ses *ses) out: free_xid(xid); - kfree(path); } static int __refresh_tcon_referral(struct cifs_tcon *tcon, @@ -1241,19 +1216,18 @@ static void refresh_tcon_referral(struct cifs_tcon *tcon, bool force_refresh) struct dfs_info3_param *refs = NULL; struct cache_entry *ce; struct cifs_ses *ses; - unsigned int xid; bool needs_refresh; - char *path; + const char *path; + unsigned int xid; int numrefs = 0; int rc = 0; xid = get_xid(); ses = tcon->ses; - path = get_ses_refpath(ses); + path = dfs_ses_refpath(ses); if (IS_ERR(path)) { rc = PTR_ERR(path); - path = NULL; goto out; } @@ -1281,7 +1255,6 @@ static void refresh_tcon_referral(struct cifs_tcon *tcon, bool force_refresh) out: free_xid(xid); - kfree(path); free_dfs_info_array(refs, numrefs); } diff --git a/fs/smb/client/dir.c b/fs/smb/client/dir.c index 864b194dbaa0..d1e95632ac54 100644 --- a/fs/smb/client/dir.c +++ b/fs/smb/client/dir.c @@ -627,7 +627,7 @@ int cifs_mknod(struct mnt_idmap *idmap, struct inode *inode, goto mknod_out; } - trace_smb3_mknod_enter(xid, tcon->ses->Suid, tcon->tid, full_path); + trace_smb3_mknod_enter(xid, tcon->tid, tcon->ses->Suid, full_path); rc = tcon->ses->server->ops->make_node(xid, inode, direntry, tcon, full_path, mode, @@ -635,9 +635,9 @@ int cifs_mknod(struct mnt_idmap *idmap, struct inode *inode, mknod_out: if (rc) - trace_smb3_mknod_err(xid, tcon->ses->Suid, tcon->tid, rc); + trace_smb3_mknod_err(xid, tcon->tid, tcon->ses->Suid, rc); else - trace_smb3_mknod_done(xid, tcon->ses->Suid, tcon->tid); + trace_smb3_mknod_done(xid, tcon->tid, tcon->ses->Suid); free_dentry_path(page); free_xid(xid); @@ -737,7 +737,8 @@ again: } static int -cifs_d_revalidate(struct dentry *direntry, unsigned int flags) +cifs_d_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *direntry, unsigned int flags) { struct inode *inode; int rc; diff --git a/fs/smb/client/dns_resolve.c b/fs/smb/client/dns_resolve.c index 8bf8978bc5d6..de7f4b384718 100644 --- a/fs/smb/client/dns_resolve.c +++ b/fs/smb/client/dns_resolve.c @@ -20,69 +20,77 @@ #include "cifsproto.h" #include "cifs_debug.h" -/** - * dns_resolve_server_name_to_ip - Resolve UNC server name to ip address. - * @unc: UNC path specifying the server (with '/' as delimiter) - * @ip_addr: Where to return the IP address. - * @expiry: Where to return the expiry time for the dns record. - * - * Returns zero success, -ve on error. - */ -int -dns_resolve_server_name_to_ip(const char *unc, struct sockaddr *ip_addr, time64_t *expiry) +static int resolve_name(const char *name, size_t namelen, struct sockaddr *addr) { - const char *hostname, *sep; char *ip; - int len, rc; + int rc; - if (!ip_addr || !unc) - return -EINVAL; + rc = dns_query(current->nsproxy->net_ns, NULL, name, + namelen, NULL, &ip, NULL, false); + if (rc < 0) { + cifs_dbg(FYI, "%s: unable to resolve: %*.*s\n", + __func__, (int)namelen, (int)namelen, name); + } else { + cifs_dbg(FYI, "%s: resolved: %*.*s to %s\n", + __func__, (int)namelen, (int)namelen, name, ip); - len = strlen(unc); - if (len < 3) { - cifs_dbg(FYI, "%s: unc is too short: %s\n", __func__, unc); - return -EINVAL; + rc = cifs_convert_address(addr, ip, strlen(ip)); + kfree(ip); + if (!rc) { + cifs_dbg(FYI, "%s: unable to determine ip address\n", + __func__); + rc = -EHOSTUNREACH; + } else { + rc = 0; + } } + return rc; +} - /* Discount leading slashes for cifs */ - len -= 2; - hostname = unc + 2; +/** + * dns_resolve_name - Perform an upcall to resolve hostname to an ip address. + * @dom: DNS domain name (or NULL) + * @name: Name to look up + * @namelen: Length of name + * @ip_addr: Where to return the IP address + * + * Returns zero on success, -ve code otherwise. + */ +int dns_resolve_name(const char *dom, const char *name, + size_t namelen, struct sockaddr *ip_addr) +{ + size_t len; + char *s; + int rc; - /* Search for server name delimiter */ - sep = memchr(hostname, '/', len); - if (sep) - len = sep - hostname; - else - cifs_dbg(FYI, "%s: probably server name is whole unc: %s\n", - __func__, unc); + cifs_dbg(FYI, "%s: dom=%s name=%.*s\n", __func__, dom, (int)namelen, name); + if (!ip_addr || !name || !*name || !namelen) + return -EINVAL; + cifs_dbg(FYI, "%s: hostname=%.*s\n", __func__, (int)namelen, name); /* Try to interpret hostname as an IPv4 or IPv6 address */ - rc = cifs_convert_address(ip_addr, hostname, len); + rc = cifs_convert_address(ip_addr, name, namelen); if (rc > 0) { - cifs_dbg(FYI, "%s: unc is IP, skipping dns upcall: %*.*s\n", __func__, len, len, - hostname); + cifs_dbg(FYI, "%s: unc is IP, skipping dns upcall: %*.*s\n", + __func__, (int)namelen, (int)namelen, name); return 0; } - /* Perform the upcall */ - rc = dns_query(current->nsproxy->net_ns, NULL, hostname, len, - NULL, &ip, expiry, false); - if (rc < 0) { - cifs_dbg(FYI, "%s: unable to resolve: %*.*s\n", - __func__, len, len, hostname); - } else { - cifs_dbg(FYI, "%s: resolved: %*.*s to %s expiry %llu\n", - __func__, len, len, hostname, ip, - expiry ? (*expiry) : 0); - - rc = cifs_convert_address(ip_addr, ip, strlen(ip)); - kfree(ip); + /* + * If @name contains a NetBIOS name and @dom has been specified, then + * convert @name to an FQDN and try resolving it first. + */ + if (dom && *dom && cifs_netbios_name(name, namelen)) { + len = strnlen(dom, CIFS_MAX_DOMAINNAME_LEN) + namelen + 2; + s = kmalloc(len, GFP_KERNEL); + if (!s) + return -ENOMEM; - if (!rc) { - cifs_dbg(FYI, "%s: unable to determine ip address\n", __func__); - rc = -EHOSTUNREACH; - } else - rc = 0; + scnprintf(s, len, "%.*s.%s", (int)namelen, name, dom); + rc = resolve_name(s, len - 1, ip_addr); + kfree(s); + if (!rc) + return 0; } - return rc; + return resolve_name(name, namelen, ip_addr); } diff --git a/fs/smb/client/dns_resolve.h b/fs/smb/client/dns_resolve.h index 6eb0c15a2440..0dc706f2c422 100644 --- a/fs/smb/client/dns_resolve.h +++ b/fs/smb/client/dns_resolve.h @@ -12,9 +12,30 @@ #define _DNS_RESOLVE_H #include <linux/net.h> +#include "cifsglob.h" +#include "cifsproto.h" #ifdef __KERNEL__ -int dns_resolve_server_name_to_ip(const char *unc, struct sockaddr *ip_addr, time64_t *expiry); + +int dns_resolve_name(const char *dom, const char *name, + size_t namelen, struct sockaddr *ip_addr); + +static inline int dns_resolve_unc(const char *dom, const char *unc, + struct sockaddr *ip_addr) +{ + const char *name; + size_t namelen; + + if (!unc || strlen(unc) < 3) + return -EINVAL; + + extract_unc_hostname(unc, &name, &namelen); + if (!namelen) + return -EINVAL; + + return dns_resolve_name(dom, name, namelen, ip_addr); +} + #endif /* KERNEL */ #endif /* _DNS_RESOLVE_H */ diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c index 3b2d33291a7e..79de2f2f9c41 100644 --- a/fs/smb/client/file.c +++ b/fs/smb/client/file.c @@ -227,7 +227,8 @@ static void cifs_issue_read(struct netfs_io_subrequest *subreq) return; failed: - netfs_read_subreq_terminated(subreq, rc, false); + subreq->error = rc; + netfs_read_subreq_terminated(subreq); } /* diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c index 49123f458d0c..e9b286d9a7ba 100644 --- a/fs/smb/client/fs_context.c +++ b/fs/smb/client/fs_context.c @@ -133,6 +133,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = { fsparam_flag("rootfs", Opt_rootfs), fsparam_flag("compress", Opt_compress), fsparam_flag("witness", Opt_witness), + fsparam_flag_no("nativesocket", Opt_nativesocket), /* Mount options which take uid or gid */ fsparam_uid("backupuid", Opt_backupuid), @@ -185,6 +186,8 @@ const struct fs_parameter_spec smb3_fs_parameters[] = { fsparam_string("cache", Opt_cache), fsparam_string("reparse", Opt_reparse), fsparam_string("upcall_target", Opt_upcalltarget), + fsparam_string("symlink", Opt_symlink), + fsparam_string("symlinkroot", Opt_symlinkroot), /* Arguments that should be ignored */ fsparam_flag("guest", Opt_ignore), @@ -332,6 +335,7 @@ cifs_parse_cache_flavor(struct fs_context *fc, char *value, struct smb3_fs_conte static const match_table_t reparse_flavor_tokens = { { Opt_reparse_default, "default" }, + { Opt_reparse_none, "none" }, { Opt_reparse_nfs, "nfs" }, { Opt_reparse_wsl, "wsl" }, { Opt_reparse_err, NULL }, @@ -346,6 +350,9 @@ static int parse_reparse_flavor(struct fs_context *fc, char *value, case Opt_reparse_default: ctx->reparse_type = CIFS_REPARSE_TYPE_DEFAULT; break; + case Opt_reparse_none: + ctx->reparse_type = CIFS_REPARSE_TYPE_NONE; + break; case Opt_reparse_nfs: ctx->reparse_type = CIFS_REPARSE_TYPE_NFS; break; @@ -359,6 +366,55 @@ static int parse_reparse_flavor(struct fs_context *fc, char *value, return 0; } +static const match_table_t symlink_flavor_tokens = { + { Opt_symlink_default, "default" }, + { Opt_symlink_none, "none" }, + { Opt_symlink_native, "native" }, + { Opt_symlink_unix, "unix" }, + { Opt_symlink_mfsymlinks, "mfsymlinks" }, + { Opt_symlink_sfu, "sfu" }, + { Opt_symlink_nfs, "nfs" }, + { Opt_symlink_wsl, "wsl" }, + { Opt_symlink_err, NULL }, +}; + +static int parse_symlink_flavor(struct fs_context *fc, char *value, + struct smb3_fs_context *ctx) +{ + substring_t args[MAX_OPT_ARGS]; + + switch (match_token(value, symlink_flavor_tokens, args)) { + case Opt_symlink_default: + ctx->symlink_type = CIFS_SYMLINK_TYPE_DEFAULT; + break; + case Opt_symlink_none: + ctx->symlink_type = CIFS_SYMLINK_TYPE_NONE; + break; + case Opt_symlink_native: + ctx->symlink_type = CIFS_SYMLINK_TYPE_NATIVE; + break; + case Opt_symlink_unix: + ctx->symlink_type = CIFS_SYMLINK_TYPE_UNIX; + break; + case Opt_symlink_mfsymlinks: + ctx->symlink_type = CIFS_SYMLINK_TYPE_MFSYMLINKS; + break; + case Opt_symlink_sfu: + ctx->symlink_type = CIFS_SYMLINK_TYPE_SFU; + break; + case Opt_symlink_nfs: + ctx->symlink_type = CIFS_SYMLINK_TYPE_NFS; + break; + case Opt_symlink_wsl: + ctx->symlink_type = CIFS_SYMLINK_TYPE_WSL; + break; + default: + cifs_errorf(fc, "bad symlink= option: %s\n", value); + return 1; + } + return 0; +} + #define DUP_CTX_STR(field) \ do { \ if (ctx->field) { \ @@ -385,6 +441,8 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx new_ctx->source = NULL; new_ctx->iocharset = NULL; new_ctx->leaf_fullpath = NULL; + new_ctx->dns_dom = NULL; + new_ctx->symlinkroot = NULL; /* * Make sure to stay in sync with smb3_cleanup_fs_context_contents() */ @@ -399,6 +457,8 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx DUP_CTX_STR(nodename); DUP_CTX_STR(iocharset); DUP_CTX_STR(leaf_fullpath); + DUP_CTX_STR(dns_dom); + DUP_CTX_STR(symlinkroot); return 0; } @@ -1725,6 +1785,23 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, if (parse_reparse_flavor(fc, param->string, ctx)) goto cifs_parse_mount_err; break; + case Opt_nativesocket: + ctx->nonativesocket = result.negated; + break; + case Opt_symlink: + if (parse_symlink_flavor(fc, param->string, ctx)) + goto cifs_parse_mount_err; + break; + case Opt_symlinkroot: + if (param->string[0] != '/') { + cifs_errorf(fc, "symlinkroot mount options must be absolute path\n"); + goto cifs_parse_mount_err; + } + kfree(ctx->symlinkroot); + ctx->symlinkroot = kstrdup(param->string, GFP_KERNEL); + if (!ctx->symlinkroot) + goto cifs_parse_mount_err; + break; } /* case Opt_ignore: - is ignored as expected ... */ @@ -1733,6 +1810,13 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, goto cifs_parse_mount_err; } + /* + * By default resolve all native absolute symlinks relative to "/mnt/". + * Same default has drvfs driver running in WSL for resolving SMB shares. + */ + if (!ctx->symlinkroot) + ctx->symlinkroot = kstrdup("/mnt/", GFP_KERNEL); + return 0; cifs_parse_mount_err: @@ -1743,6 +1827,24 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, return -EINVAL; } +enum cifs_symlink_type get_cifs_symlink_type(struct cifs_sb_info *cifs_sb) +{ + if (cifs_sb->ctx->symlink_type == CIFS_SYMLINK_TYPE_DEFAULT) { + if (cifs_sb->ctx->mfsymlinks) + return CIFS_SYMLINK_TYPE_MFSYMLINKS; + else if (cifs_sb->ctx->sfu_emul) + return CIFS_SYMLINK_TYPE_SFU; + else if (cifs_sb->ctx->linux_ext && !cifs_sb->ctx->no_linux_ext) + return CIFS_SYMLINK_TYPE_UNIX; + else if (cifs_sb->ctx->reparse_type != CIFS_REPARSE_TYPE_NONE) + return CIFS_SYMLINK_TYPE_NATIVE; + else + return CIFS_SYMLINK_TYPE_NONE; + } else { + return cifs_sb->ctx->symlink_type; + } +} + int smb3_init_fs_context(struct fs_context *fc) { struct smb3_fs_context *ctx; @@ -1819,6 +1921,8 @@ int smb3_init_fs_context(struct fs_context *fc) ctx->retrans = 1; ctx->reparse_type = CIFS_REPARSE_TYPE_DEFAULT; + ctx->symlink_type = CIFS_SYMLINK_TYPE_DEFAULT; + ctx->nonativesocket = 0; /* * short int override_uid = -1; @@ -1863,6 +1967,10 @@ smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx) ctx->prepath = NULL; kfree(ctx->leaf_fullpath); ctx->leaf_fullpath = NULL; + kfree(ctx->dns_dom); + ctx->dns_dom = NULL; + kfree(ctx->symlinkroot); + ctx->symlinkroot = NULL; } void diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h index ac6baa774ad3..881bfc08667e 100644 --- a/fs/smb/client/fs_context.h +++ b/fs/smb/client/fs_context.h @@ -43,11 +43,24 @@ enum { enum cifs_reparse_parm { Opt_reparse_default, + Opt_reparse_none, Opt_reparse_nfs, Opt_reparse_wsl, Opt_reparse_err }; +enum cifs_symlink_parm { + Opt_symlink_default, + Opt_symlink_none, + Opt_symlink_native, + Opt_symlink_unix, + Opt_symlink_mfsymlinks, + Opt_symlink_sfu, + Opt_symlink_nfs, + Opt_symlink_wsl, + Opt_symlink_err +}; + enum cifs_sec_param { Opt_sec_krb5, Opt_sec_krb5i, @@ -166,6 +179,9 @@ enum cifs_param { Opt_cache, Opt_reparse, Opt_upcalltarget, + Opt_nativesocket, + Opt_symlink, + Opt_symlinkroot, /* Mount options to be ignored */ Opt_ignore, @@ -294,11 +310,17 @@ struct smb3_fs_context { struct cifs_ses *dfs_root_ses; bool dfs_automount:1; /* set for dfs automount only */ enum cifs_reparse_type reparse_type; + enum cifs_symlink_type symlink_type; + bool nonativesocket:1; bool dfs_conn:1; /* set for dfs mounts */ + char *dns_dom; + char *symlinkroot; /* top level directory for native SMB symlinks in absolute format */ }; extern const struct fs_parameter_spec smb3_fs_parameters[]; +extern enum cifs_symlink_type get_cifs_symlink_type(struct cifs_sb_info *cifs_sb); + extern int smb3_init_fs_context(struct fs_context *fc); extern void smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx); extern void smb3_cleanup_fs_context(struct smb3_fs_context *ctx); diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c index f146e06c97eb..9cc31cf6ebd0 100644 --- a/fs/smb/client/inode.c +++ b/fs/smb/client/inode.c @@ -990,7 +990,7 @@ cifs_get_file_info(struct file *filp) /* TODO: add support to query reparse tag */ data.adjust_tz = false; if (data.symlink_target) { - data.symlink = true; + data.reparse_point = true; data.reparse.tag = IO_REPARSE_TAG_SYMLINK; } path = build_path_from_dentry(dentry, page); @@ -1216,6 +1216,11 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data, full_path, iov, data); } + + if (data->reparse.tag == IO_REPARSE_TAG_SYMLINK && !rc) { + bool directory = le32_to_cpu(data->fi.Attributes) & ATTR_DIRECTORY; + rc = smb2_fix_symlink_target_type(&data->symlink_target, directory, cifs_sb); + } break; } @@ -2392,6 +2397,13 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry, if (to_dentry->d_parent != from_dentry->d_parent) goto do_rename_exit; + /* + * CIFSSMBRenameOpenFile() uses SMB_SET_FILE_RENAME_INFORMATION + * which is SMB PASSTHROUGH level. + */ + if (!(tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)) + goto do_rename_exit; + oparms = (struct cifs_open_parms) { .tcon = tcon, .cifs_sb = cifs_sb, diff --git a/fs/smb/client/link.c b/fs/smb/client/link.c index 47ddeb7fa111..6e6c09cc5ce7 100644 --- a/fs/smb/client/link.c +++ b/fs/smb/client/link.c @@ -18,6 +18,7 @@ #include "cifs_unicode.h" #include "smb2proto.h" #include "cifs_ioctl.h" +#include "fs_context.h" /* * M-F Symlink Functions - Begin @@ -604,22 +605,53 @@ cifs_symlink(struct mnt_idmap *idmap, struct inode *inode, cifs_dbg(FYI, "symname is %s\n", symname); /* BB what if DFS and this volume is on different share? BB */ - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) { - rc = create_mf_symlink(xid, pTcon, cifs_sb, full_path, symname); - } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) { - rc = __cifs_sfu_make_node(xid, inode, direntry, pTcon, - full_path, S_IFLNK, 0, symname); + rc = -EOPNOTSUPP; + switch (get_cifs_symlink_type(cifs_sb)) { + case CIFS_SYMLINK_TYPE_DEFAULT: + /* should not happen, get_cifs_symlink_type() resolves the default */ + break; + + case CIFS_SYMLINK_TYPE_NONE: + break; + + case CIFS_SYMLINK_TYPE_UNIX: #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY - } else if (pTcon->unix_ext) { - rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname, - cifs_sb->local_nls, - cifs_remap(cifs_sb)); + if (pTcon->unix_ext) { + rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, + symname, + cifs_sb->local_nls, + cifs_remap(cifs_sb)); + } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ - } else if (server->ops->create_reparse_symlink) { - rc = server->ops->create_reparse_symlink(xid, inode, direntry, - pTcon, full_path, - symname); - goto symlink_exit; + break; + + case CIFS_SYMLINK_TYPE_MFSYMLINKS: + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) { + rc = create_mf_symlink(xid, pTcon, cifs_sb, + full_path, symname); + } + break; + + case CIFS_SYMLINK_TYPE_SFU: + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) { + rc = __cifs_sfu_make_node(xid, inode, direntry, pTcon, + full_path, S_IFLNK, + 0, symname); + } + break; + + case CIFS_SYMLINK_TYPE_NATIVE: + case CIFS_SYMLINK_TYPE_NFS: + case CIFS_SYMLINK_TYPE_WSL: + if (server->ops->create_reparse_symlink) { + rc = server->ops->create_reparse_symlink(xid, inode, + direntry, + pTcon, + full_path, + symname); + goto symlink_exit; + } + break; } if (rc == 0) { diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c index 4373dd64b66d..b328dc5c7988 100644 --- a/fs/smb/client/misc.c +++ b/fs/smb/client/misc.c @@ -101,6 +101,7 @@ sesInfoFree(struct cifs_ses *buf_to_free) kfree_sensitive(buf_to_free->password2); kfree(buf_to_free->user_name); kfree(buf_to_free->domainName); + kfree(buf_to_free->dns_dom); kfree_sensitive(buf_to_free->auth_key.response); spin_lock(&buf_to_free->iface_lock); list_for_each_entry_safe(iface, niface, &buf_to_free->iface_list, @@ -908,9 +909,9 @@ parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size, *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals); if (*num_of_nodes < 1) { - cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n", - *num_of_nodes); - rc = -EINVAL; + cifs_dbg(VFS | ONCE, "%s: [path=%s] num_referrals must be at least > 0, but we got %d\n", + __func__, searchName, *num_of_nodes); + rc = -ENOENT; goto parse_DFS_referrals_exit; } @@ -1171,33 +1172,25 @@ void cifs_put_tcp_super(struct super_block *sb) #ifdef CONFIG_CIFS_DFS_UPCALL int match_target_ip(struct TCP_Server_Info *server, - const char *share, size_t share_len, + const char *host, size_t hostlen, bool *result) { - int rc; - char *target; struct sockaddr_storage ss; + int rc; - *result = false; - - target = kzalloc(share_len + 3, GFP_KERNEL); - if (!target) - return -ENOMEM; - - scnprintf(target, share_len + 3, "\\\\%.*s", (int)share_len, share); - - cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2); + cifs_dbg(FYI, "%s: hostname=%.*s\n", __func__, (int)hostlen, host); - rc = dns_resolve_server_name_to_ip(target, (struct sockaddr *)&ss, NULL); - kfree(target); + *result = false; + rc = dns_resolve_name(server->dns_dom, host, hostlen, + (struct sockaddr *)&ss); if (rc < 0) return rc; spin_lock(&server->srv_lock); *result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss); spin_unlock(&server->srv_lock); - cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result); + cifs_dbg(FYI, "%s: ip addresses matched: %s\n", __func__, str_yes_no(*result)); return 0; } diff --git a/fs/smb/client/netmisc.c b/fs/smb/client/netmisc.c index 2a8d71221e5e..9ec20601cee2 100644 --- a/fs/smb/client/netmisc.c +++ b/fs/smb/client/netmisc.c @@ -313,7 +313,6 @@ static const struct { ERRDOS, 2215, NT_STATUS_NO_LOGON_SERVERS}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_LOGON_SESSION}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_PRIVILEGE}, { - ERRDOS, ERRnoaccess, NT_STATUS_PRIVILEGE_NOT_HELD}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_ACCOUNT_NAME}, { ERRHRD, ERRgeneral, NT_STATUS_USER_EXISTS}, /* { This NT error code was 'sqashed' @@ -775,10 +774,10 @@ cifs_print_status(__u32 status_code) int idx = 0; while (nt_errs[idx].nt_errstr != NULL) { - if (((nt_errs[idx].nt_errcode) & 0xFFFFFF) == - (status_code & 0xFFFFFF)) { + if (nt_errs[idx].nt_errcode == status_code) { pr_notice("Status code returned 0x%08x %s\n", status_code, nt_errs[idx].nt_errstr); + return; } idx++; } @@ -871,6 +870,15 @@ map_smb_to_linux_error(char *buf, bool logErr) } /* else ERRHRD class errors or junk - return EIO */ + /* special cases for NT status codes which cannot be translated to DOS codes */ + if (smb->Flags2 & SMBFLG2_ERR_STATUS) { + __u32 err = le32_to_cpu(smb->Status.CifsError); + if (err == (NT_STATUS_NOT_A_REPARSE_POINT)) + rc = -ENODATA; + else if (err == (NT_STATUS_PRIVILEGE_NOT_HELD)) + rc = -EPERM; + } + cifs_dbg(FYI, "Mapping smb error code 0x%x to POSIX err %d\n", le32_to_cpu(smb->Status.CifsError), rc); diff --git a/fs/smb/client/nterr.c b/fs/smb/client/nterr.c index 358a766375b4..8f0bc441295e 100644 --- a/fs/smb/client/nterr.c +++ b/fs/smb/client/nterr.c @@ -13,6 +13,13 @@ const struct nt_err_code_struct nt_errs[] = { {"NT_STATUS_OK", NT_STATUS_OK}, + {"NT_STATUS_MEDIA_CHANGED", NT_STATUS_MEDIA_CHANGED}, + {"NT_STATUS_END_OF_MEDIA", NT_STATUS_END_OF_MEDIA}, + {"NT_STATUS_MEDIA_CHECK", NT_STATUS_MEDIA_CHECK}, + {"NT_STATUS_NO_DATA_DETECTED", NT_STATUS_NO_DATA_DETECTED}, + {"NT_STATUS_STOPPED_ON_SYMLINK", NT_STATUS_STOPPED_ON_SYMLINK}, + {"NT_STATUS_DEVICE_REQUIRES_CLEANING", NT_STATUS_DEVICE_REQUIRES_CLEANING}, + {"NT_STATUS_DEVICE_DOOR_OPEN", NT_STATUS_DEVICE_DOOR_OPEN}, {"NT_STATUS_UNSUCCESSFUL", NT_STATUS_UNSUCCESSFUL}, {"NT_STATUS_NOT_IMPLEMENTED", NT_STATUS_NOT_IMPLEMENTED}, {"NT_STATUS_INVALID_INFO_CLASS", NT_STATUS_INVALID_INFO_CLASS}, @@ -667,8 +674,10 @@ const struct nt_err_code_struct nt_errs[] = { {"NT_STATUS_QUOTA_LIST_INCONSISTENT", NT_STATUS_QUOTA_LIST_INCONSISTENT}, {"NT_STATUS_FILE_IS_OFFLINE", NT_STATUS_FILE_IS_OFFLINE}, + {"NT_STATUS_NOT_A_REPARSE_POINT", NT_STATUS_NOT_A_REPARSE_POINT}, {"NT_STATUS_NO_MORE_ENTRIES", NT_STATUS_NO_MORE_ENTRIES}, {"NT_STATUS_MORE_ENTRIES", NT_STATUS_MORE_ENTRIES}, {"NT_STATUS_SOME_UNMAPPED", NT_STATUS_SOME_UNMAPPED}, + {"NT_STATUS_NO_SUCH_JOB", NT_STATUS_NO_SUCH_JOB}, {NULL, 0} }; diff --git a/fs/smb/client/nterr.h b/fs/smb/client/nterr.h index edd4741cab0a..180602c22355 100644 --- a/fs/smb/client/nterr.h +++ b/fs/smb/client/nterr.h @@ -546,6 +546,7 @@ extern const struct nt_err_code_struct nt_errs[]; #define NT_STATUS_TOO_MANY_LINKS 0xC0000000 | 0x0265 #define NT_STATUS_QUOTA_LIST_INCONSISTENT 0xC0000000 | 0x0266 #define NT_STATUS_FILE_IS_OFFLINE 0xC0000000 | 0x0267 +#define NT_STATUS_NOT_A_REPARSE_POINT 0xC0000000 | 0x0275 #define NT_STATUS_NO_SUCH_JOB 0xC0000000 | 0xEDE /* scheduler */ #endif /* _NTERR_H */ diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c index 273358d20a46..50f96259d9ad 100644 --- a/fs/smb/client/readdir.c +++ b/fs/smb/client/readdir.c @@ -413,7 +413,7 @@ ffirst_retry: cifsFile->invalidHandle = false; } else if ((rc == -EOPNOTSUPP) && (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)) { - cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM; + cifs_autodisable_serverino(cifs_sb); goto ffirst_retry; } error_exit: diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c index d88b41133e00..0a5a52a8a7dd 100644 --- a/fs/smb/client/reparse.c +++ b/fs/smb/client/reparse.c @@ -14,6 +14,20 @@ #include "fs_context.h" #include "reparse.h" +static int mknod_nfs(unsigned int xid, struct inode *inode, + struct dentry *dentry, struct cifs_tcon *tcon, + const char *full_path, umode_t mode, dev_t dev, + const char *symname); + +static int mknod_wsl(unsigned int xid, struct inode *inode, + struct dentry *dentry, struct cifs_tcon *tcon, + const char *full_path, umode_t mode, dev_t dev, + const char *symname); + +static int create_native_symlink(const unsigned int xid, struct inode *inode, + struct dentry *dentry, struct cifs_tcon *tcon, + const char *full_path, const char *symname); + static int detect_directory_symlink_target(struct cifs_sb_info *cifs_sb, const unsigned int xid, const char *full_path, @@ -24,37 +38,148 @@ int smb2_create_reparse_symlink(const unsigned int xid, struct inode *inode, struct dentry *dentry, struct cifs_tcon *tcon, const char *full_path, const char *symname) { + switch (get_cifs_symlink_type(CIFS_SB(inode->i_sb))) { + case CIFS_SYMLINK_TYPE_NATIVE: + return create_native_symlink(xid, inode, dentry, tcon, full_path, symname); + case CIFS_SYMLINK_TYPE_NFS: + return mknod_nfs(xid, inode, dentry, tcon, full_path, S_IFLNK, 0, symname); + case CIFS_SYMLINK_TYPE_WSL: + return mknod_wsl(xid, inode, dentry, tcon, full_path, S_IFLNK, 0, symname); + default: + return -EOPNOTSUPP; + } +} + +static int create_native_symlink(const unsigned int xid, struct inode *inode, + struct dentry *dentry, struct cifs_tcon *tcon, + const char *full_path, const char *symname) +{ struct reparse_symlink_data_buffer *buf = NULL; - struct cifs_open_info_data data; + struct cifs_open_info_data data = {}; struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct inode *new; struct kvec iov; - __le16 *path; + __le16 *path = NULL; bool directory; - char *sym, sep = CIFS_DIR_SEP(cifs_sb); - u16 len, plen; + char *symlink_target = NULL; + char *sym = NULL; + char sep = CIFS_DIR_SEP(cifs_sb); + u16 len, plen, poff, slen; int rc = 0; if (strlen(symname) > REPARSE_SYM_PATH_MAX) return -ENAMETOOLONG; - sym = kstrdup(symname, GFP_KERNEL); - if (!sym) - return -ENOMEM; + symlink_target = kstrdup(symname, GFP_KERNEL); + if (!symlink_target) { + rc = -ENOMEM; + goto out; + } data = (struct cifs_open_info_data) { .reparse_point = true, .reparse = { .tag = IO_REPARSE_TAG_SYMLINK, }, - .symlink_target = sym, + .symlink_target = symlink_target, }; - convert_delimiter(sym, sep); + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) && symname[0] == '/') { + /* + * This is a request to create an absolute symlink on the server + * which does not support POSIX paths, and expects symlink in + * NT-style path. So convert absolute Linux symlink target path + * to the absolute NT-style path. Root of the NT-style path for + * symlinks is specified in "symlinkroot" mount option. This will + * ensure compatibility of this symlink stored in absolute form + * on the SMB server. + */ + if (!strstarts(symname, cifs_sb->ctx->symlinkroot)) { + /* + * If the absolute Linux symlink target path is not + * inside "symlinkroot" location then there is no way + * to convert such Linux symlink to NT-style path. + */ + cifs_dbg(VFS, + "absolute symlink '%s' cannot be converted to NT format " + "because it is outside of symlinkroot='%s'\n", + symname, cifs_sb->ctx->symlinkroot); + rc = -EINVAL; + goto out; + } + len = strlen(cifs_sb->ctx->symlinkroot); + if (cifs_sb->ctx->symlinkroot[len-1] != '/') + len++; + if (symname[len] >= 'a' && symname[len] <= 'z' && + (symname[len+1] == '/' || symname[len+1] == '\0')) { + /* + * Symlink points to Linux target /symlinkroot/x/path/... + * where 'x' is the lowercase local Windows drive. + * NT-style path for 'x' has common form \??\X:\path\... + * with uppercase local Windows drive. + */ + int common_path_len = strlen(symname+len+1)+1; + sym = kzalloc(6+common_path_len, GFP_KERNEL); + if (!sym) { + rc = -ENOMEM; + goto out; + } + memcpy(sym, "\\??\\", 4); + sym[4] = symname[len] - ('a'-'A'); + sym[5] = ':'; + memcpy(sym+6, symname+len+1, common_path_len); + } else { + /* Unhandled absolute symlink. Report an error. */ + cifs_dbg( + VFS, + "absolute symlink '%s' cannot be converted to NT format " + "because it points to unknown target\n", + symname); + rc = -EINVAL; + goto out; + } + } else { + /* + * This is request to either create an absolute symlink on + * server which expects POSIX paths or it is an request to + * create a relative symlink from the current directory. + * These paths have same format as relative SMB symlinks, + * so no conversion is needed. So just take symname as-is. + */ + sym = kstrdup(symname, GFP_KERNEL); + if (!sym) { + rc = -ENOMEM; + goto out; + } + } + + if (sep == '\\') + convert_delimiter(sym, sep); + + /* + * For absolute NT symlinks it is required to pass also leading + * backslash and to not mangle NT object prefix "\\??\\" and not to + * mangle colon in drive letter. But cifs_convert_path_to_utf16() + * removes leading backslash and replaces '?' and ':'. So temporary + * mask these characters in NT object prefix by '_' and then change + * them back. + */ + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) && symname[0] == '/') + sym[0] = sym[1] = sym[2] = sym[5] = '_'; + path = cifs_convert_path_to_utf16(sym, cifs_sb); if (!path) { rc = -ENOMEM; goto out; } + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) && symname[0] == '/') { + sym[0] = '\\'; + sym[1] = sym[2] = '?'; + sym[5] = ':'; + path[0] = cpu_to_le16('\\'); + path[1] = path[2] = cpu_to_le16('?'); + path[5] = cpu_to_le16(':'); + } + /* * SMB distinguish between symlink to directory and symlink to file. * They cannot be exchanged (symlink of file type which points to @@ -67,8 +192,18 @@ int smb2_create_reparse_symlink(const unsigned int xid, struct inode *inode, if (rc < 0) goto out; - plen = 2 * UniStrnlen((wchar_t *)path, REPARSE_SYM_PATH_MAX); - len = sizeof(*buf) + plen * 2; + slen = 2 * UniStrnlen((wchar_t *)path, REPARSE_SYM_PATH_MAX); + poff = 0; + plen = slen; + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) && symname[0] == '/') { + /* + * For absolute NT symlinks skip leading "\\??\\" in PrintName as + * PrintName is user visible location in DOS/Win32 format (not in NT format). + */ + poff = 4; + plen -= 2 * poff; + } + len = sizeof(*buf) + plen + slen; buf = kzalloc(len, GFP_KERNEL); if (!buf) { rc = -ENOMEM; @@ -77,17 +212,17 @@ int smb2_create_reparse_symlink(const unsigned int xid, struct inode *inode, buf->ReparseTag = cpu_to_le32(IO_REPARSE_TAG_SYMLINK); buf->ReparseDataLength = cpu_to_le16(len - sizeof(struct reparse_data_buffer)); + buf->SubstituteNameOffset = cpu_to_le16(plen); - buf->SubstituteNameLength = cpu_to_le16(plen); - memcpy(&buf->PathBuffer[plen], path, plen); + buf->SubstituteNameLength = cpu_to_le16(slen); + memcpy(&buf->PathBuffer[plen], path, slen); + buf->PrintNameOffset = 0; buf->PrintNameLength = cpu_to_le16(plen); - memcpy(buf->PathBuffer, path, plen); + memcpy(buf->PathBuffer, path+poff, plen); + buf->Flags = cpu_to_le32(*symname != '/' ? SYMLINK_FLAG_RELATIVE : 0); - if (*sym != sep) - buf->Flags = cpu_to_le32(SYMLINK_FLAG_RELATIVE); - convert_delimiter(sym, '/'); iov.iov_base = buf; iov.iov_len = len; new = smb2_get_reparse_inode(&data, inode->i_sb, xid, @@ -98,6 +233,7 @@ int smb2_create_reparse_symlink(const unsigned int xid, struct inode *inode, else rc = PTR_ERR(new); out: + kfree(sym); kfree(path); cifs_free_open_info(&data); kfree(buf); @@ -242,8 +378,39 @@ static int detect_directory_symlink_target(struct cifs_sb_info *cifs_sb, return 0; } -static int nfs_set_reparse_buf(struct reparse_posix_data *buf, +static int create_native_socket(const unsigned int xid, struct inode *inode, + struct dentry *dentry, struct cifs_tcon *tcon, + const char *full_path) +{ + struct reparse_data_buffer buf = { + .ReparseTag = cpu_to_le32(IO_REPARSE_TAG_AF_UNIX), + .ReparseDataLength = cpu_to_le16(0), + }; + struct cifs_open_info_data data = { + .reparse_point = true, + .reparse = { .tag = IO_REPARSE_TAG_AF_UNIX, .buf = &buf, }, + }; + struct kvec iov = { + .iov_base = &buf, + .iov_len = sizeof(buf), + }; + struct inode *new; + int rc = 0; + + new = smb2_get_reparse_inode(&data, inode->i_sb, xid, + tcon, full_path, false, &iov, NULL); + if (!IS_ERR(new)) + d_instantiate(dentry, new); + else + rc = PTR_ERR(new); + cifs_free_open_info(&data); + return rc; +} + +static int nfs_set_reparse_buf(struct reparse_nfs_data_buffer *buf, mode_t mode, dev_t dev, + __le16 *symname_utf16, + int symname_utf16_len, struct kvec *iov) { u64 type; @@ -254,7 +421,13 @@ static int nfs_set_reparse_buf(struct reparse_posix_data *buf, switch ((type = reparse_mode_nfs_type(mode))) { case NFS_SPECFILE_BLK: case NFS_SPECFILE_CHR: - dlen = sizeof(__le64); + dlen = 2 * sizeof(__le32); + ((__le32 *)buf->DataBuffer)[0] = cpu_to_le32(MAJOR(dev)); + ((__le32 *)buf->DataBuffer)[1] = cpu_to_le32(MINOR(dev)); + break; + case NFS_SPECFILE_LNK: + dlen = symname_utf16_len; + memcpy(buf->DataBuffer, symname_utf16, symname_utf16_len); break; case NFS_SPECFILE_FIFO: case NFS_SPECFILE_SOCK: @@ -269,8 +442,6 @@ static int nfs_set_reparse_buf(struct reparse_posix_data *buf, buf->InodeType = cpu_to_le64(type); buf->ReparseDataLength = cpu_to_le16(len + dlen - sizeof(struct reparse_data_buffer)); - *(__le64 *)buf->DataBuffer = cpu_to_le64(((u64)MINOR(dev) << 32) | - MAJOR(dev)); iov->iov_base = buf; iov->iov_len = len + dlen; return 0; @@ -278,23 +449,45 @@ static int nfs_set_reparse_buf(struct reparse_posix_data *buf, static int mknod_nfs(unsigned int xid, struct inode *inode, struct dentry *dentry, struct cifs_tcon *tcon, - const char *full_path, umode_t mode, dev_t dev) + const char *full_path, umode_t mode, dev_t dev, + const char *symname) { + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifs_open_info_data data; - struct reparse_posix_data *p; + struct reparse_nfs_data_buffer *p = NULL; + __le16 *symname_utf16 = NULL; + int symname_utf16_len = 0; struct inode *new; struct kvec iov; __u8 buf[sizeof(*p) + sizeof(__le64)]; int rc; - p = (struct reparse_posix_data *)buf; - rc = nfs_set_reparse_buf(p, mode, dev, &iov); + if (S_ISLNK(mode)) { + symname_utf16 = cifs_strndup_to_utf16(symname, strlen(symname), + &symname_utf16_len, + cifs_sb->local_nls, + NO_MAP_UNI_RSVD); + if (!symname_utf16) { + rc = -ENOMEM; + goto out; + } + symname_utf16_len -= 2; /* symlink is without trailing wide-nul */ + p = kzalloc(sizeof(*p) + symname_utf16_len, GFP_KERNEL); + if (!p) { + rc = -ENOMEM; + goto out; + } + } else { + p = (struct reparse_nfs_data_buffer *)buf; + } + rc = nfs_set_reparse_buf(p, mode, dev, symname_utf16, symname_utf16_len, &iov); if (rc) - return rc; + goto out; data = (struct cifs_open_info_data) { .reparse_point = true, - .reparse = { .tag = IO_REPARSE_TAG_NFS, .posix = p, }, + .reparse = { .tag = IO_REPARSE_TAG_NFS, .buf = (struct reparse_data_buffer *)p, }, + .symlink_target = kstrdup(symname, GFP_KERNEL), }; new = smb2_get_reparse_inode(&data, inode->i_sb, xid, @@ -304,12 +497,25 @@ static int mknod_nfs(unsigned int xid, struct inode *inode, else rc = PTR_ERR(new); cifs_free_open_info(&data); +out: + if (S_ISLNK(mode)) { + kfree(symname_utf16); + kfree(p); + } return rc; } -static int wsl_set_reparse_buf(struct reparse_data_buffer *buf, - mode_t mode, struct kvec *iov) +static int wsl_set_reparse_buf(struct reparse_data_buffer **buf, + mode_t mode, const char *symname, + struct cifs_sb_info *cifs_sb, + struct kvec *iov) { + struct reparse_wsl_symlink_data_buffer *symlink_buf; + __le16 *symname_utf16; + int symname_utf16_len; + int symname_utf8_maxlen; + int symname_utf8_len; + size_t buf_len; u32 tag; switch ((tag = reparse_mode_wsl_tag(mode))) { @@ -317,16 +523,45 @@ static int wsl_set_reparse_buf(struct reparse_data_buffer *buf, case IO_REPARSE_TAG_LX_CHR: case IO_REPARSE_TAG_LX_FIFO: case IO_REPARSE_TAG_AF_UNIX: + buf_len = sizeof(struct reparse_data_buffer); + *buf = kzalloc(buf_len, GFP_KERNEL); + if (!*buf) + return -ENOMEM; + break; + case IO_REPARSE_TAG_LX_SYMLINK: + symname_utf16 = cifs_strndup_to_utf16(symname, strlen(symname), + &symname_utf16_len, + cifs_sb->local_nls, + NO_MAP_UNI_RSVD); + if (!symname_utf16) + return -ENOMEM; + symname_utf8_maxlen = symname_utf16_len/2*3; + symlink_buf = kzalloc(sizeof(struct reparse_wsl_symlink_data_buffer) + + symname_utf8_maxlen, GFP_KERNEL); + if (!symlink_buf) { + kfree(symname_utf16); + return -ENOMEM; + } + /* Flag 0x02000000 is unknown, but all wsl symlinks have this value */ + symlink_buf->Flags = cpu_to_le32(0x02000000); + /* PathBuffer is in UTF-8 but without trailing null-term byte */ + symname_utf8_len = utf16s_to_utf8s((wchar_t *)symname_utf16, symname_utf16_len/2, + UTF16_LITTLE_ENDIAN, + symlink_buf->PathBuffer, + symname_utf8_maxlen); + *buf = (struct reparse_data_buffer *)symlink_buf; + buf_len = sizeof(struct reparse_wsl_symlink_data_buffer) + symname_utf8_len; + kfree(symname_utf16); break; default: return -EOPNOTSUPP; } - buf->ReparseTag = cpu_to_le32(tag); - buf->Reserved = 0; - buf->ReparseDataLength = 0; - iov->iov_base = buf; - iov->iov_len = sizeof(*buf); + (*buf)->ReparseTag = cpu_to_le32(tag); + (*buf)->Reserved = 0; + (*buf)->ReparseDataLength = cpu_to_le16(buf_len - sizeof(struct reparse_data_buffer)); + iov->iov_base = *buf; + iov->iov_len = buf_len; return 0; } @@ -415,27 +650,32 @@ static int wsl_set_xattrs(struct inode *inode, umode_t _mode, static int mknod_wsl(unsigned int xid, struct inode *inode, struct dentry *dentry, struct cifs_tcon *tcon, - const char *full_path, umode_t mode, dev_t dev) + const char *full_path, umode_t mode, dev_t dev, + const char *symname) { + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifs_open_info_data data; - struct reparse_data_buffer buf; + struct reparse_data_buffer *buf; struct smb2_create_ea_ctx *cc; struct inode *new; unsigned int len; struct kvec reparse_iov, xattr_iov; int rc; - rc = wsl_set_reparse_buf(&buf, mode, &reparse_iov); + rc = wsl_set_reparse_buf(&buf, mode, symname, cifs_sb, &reparse_iov); if (rc) return rc; rc = wsl_set_xattrs(inode, mode, dev, &xattr_iov); - if (rc) + if (rc) { + kfree(buf); return rc; + } data = (struct cifs_open_info_data) { .reparse_point = true, - .reparse = { .tag = le32_to_cpu(buf.ReparseTag), .buf = &buf, }, + .reparse = { .tag = le32_to_cpu(buf->ReparseTag), .buf = buf, }, + .symlink_target = kstrdup(symname, GFP_KERNEL), }; cc = xattr_iov.iov_base; @@ -452,6 +692,7 @@ static int mknod_wsl(unsigned int xid, struct inode *inode, rc = PTR_ERR(new); cifs_free_open_info(&data); kfree(xattr_iov.iov_base); + kfree(buf); return rc; } @@ -460,21 +701,22 @@ int smb2_mknod_reparse(unsigned int xid, struct inode *inode, const char *full_path, umode_t mode, dev_t dev) { struct smb3_fs_context *ctx = CIFS_SB(inode->i_sb)->ctx; - int rc = -EOPNOTSUPP; + + if (S_ISSOCK(mode) && !ctx->nonativesocket && ctx->reparse_type != CIFS_REPARSE_TYPE_NONE) + return create_native_socket(xid, inode, dentry, tcon, full_path); switch (ctx->reparse_type) { case CIFS_REPARSE_TYPE_NFS: - rc = mknod_nfs(xid, inode, dentry, tcon, full_path, mode, dev); - break; + return mknod_nfs(xid, inode, dentry, tcon, full_path, mode, dev, NULL); case CIFS_REPARSE_TYPE_WSL: - rc = mknod_wsl(xid, inode, dentry, tcon, full_path, mode, dev); - break; + return mknod_wsl(xid, inode, dentry, tcon, full_path, mode, dev, NULL); + default: + return -EOPNOTSUPP; } - return rc; } /* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */ -static int parse_reparse_posix(struct reparse_posix_data *buf, +static int parse_reparse_nfs(struct reparse_nfs_data_buffer *buf, struct cifs_sb_info *cifs_sb, struct cifs_open_info_data *data) { @@ -536,43 +778,160 @@ static int parse_reparse_posix(struct reparse_posix_data *buf, } int smb2_parse_native_symlink(char **target, const char *buf, unsigned int len, - bool unicode, bool relative, + bool relative, const char *full_path, struct cifs_sb_info *cifs_sb) { char sep = CIFS_DIR_SEP(cifs_sb); char *linux_target = NULL; char *smb_target = NULL; + int symlinkroot_len; + int abs_path_len; + char *abs_path; int levels; int rc; int i; - /* Check that length it valid for unicode/non-unicode mode */ - if (!len || (unicode && (len % 2))) { + /* Check that length it valid */ + if (!len || (len % 2)) { cifs_dbg(VFS, "srv returned malformed symlink buffer\n"); rc = -EIO; goto out; } /* - * Check that buffer does not contain UTF-16 null codepoint in unicode - * mode or null byte in non-unicode mode because Linux cannot process - * symlink with null byte. + * Check that buffer does not contain UTF-16 null codepoint + * because Linux cannot process symlink with null byte. */ - if ((unicode && UniStrnlen((wchar_t *)buf, len/2) != len/2) || - (!unicode && strnlen(buf, len) != len)) { + if (UniStrnlen((wchar_t *)buf, len/2) != len/2) { cifs_dbg(VFS, "srv returned null byte in native symlink target location\n"); rc = -EIO; goto out; } - smb_target = cifs_strndup_from_utf16(buf, len, unicode, cifs_sb->local_nls); + smb_target = cifs_strndup_from_utf16(buf, len, true, cifs_sb->local_nls); if (!smb_target) { rc = -ENOMEM; goto out; } - if (smb_target[0] == sep && relative) { + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) && !relative) { + /* + * This is an absolute symlink from the server which does not + * support POSIX paths, so the symlink is in NT-style path. + * So convert it to absolute Linux symlink target path. Root of + * the NT-style path for symlinks is specified in "symlinkroot" + * mount option. + * + * Root of the DOS and Win32 paths is at NT path \??\ + * It means that DOS/Win32 path C:\folder\file.txt is + * NT path \??\C:\folder\file.txt + * + * NT systems have some well-known object symlinks in their NT + * hierarchy, which is needed to take into account when resolving + * other symlinks. Most commonly used symlink paths are: + * \?? -> \GLOBAL?? + * \DosDevices -> \?? + * \GLOBAL??\GLOBALROOT -> \ + * \GLOBAL??\Global -> \GLOBAL?? + * \GLOBAL??\NUL -> \Device\Null + * \GLOBAL??\UNC -> \Device\Mup + * \GLOBAL??\PhysicalDrive0 -> \Device\Harddisk0\DR0 (for each harddisk) + * \GLOBAL??\A: -> \Device\Floppy0 (if A: is the first floppy) + * \GLOBAL??\C: -> \Device\HarddiskVolume1 (if C: is the first harddisk) + * \GLOBAL??\D: -> \Device\CdRom0 (if D: is first cdrom) + * \SystemRoot -> \Device\Harddisk0\Partition1\WINDOWS (or where is NT system installed) + * \Volume{...} -> \Device\HarddiskVolume1 (where ... is system generated guid) + * + * In most common cases, absolute NT symlinks points to path on + * DOS/Win32 drive letter, system-specific Volume or on UNC share. + * Here are few examples of commonly used absolute NT symlinks + * created by mklink.exe tool: + * \??\C:\folder\file.txt + * \??\\C:\folder\file.txt + * \??\UNC\server\share\file.txt + * \??\\UNC\server\share\file.txt + * \??\Volume{b75e2c83-0000-0000-0000-602f00000000}\folder\file.txt + * + * It means that the most common path prefix \??\ is also NT path + * symlink (to \GLOBAL??). It is less common that second path + * separator is double backslash, but it is valid. + * + * Volume guid is randomly generated by the target system and so + * only the target system knows the mapping between guid and the + * hardisk number. Over SMB it is not possible to resolve this + * mapping, therefore symlinks pointing to target location of + * volume guids are totally unusable over SMB. + * + * For now parse only symlink paths available for DOS and Win32. + * Those are paths with \??\ prefix or paths which points to \??\ + * via other NT symlink (\DosDevices\, \GLOBAL??\, ...). + */ + abs_path = smb_target; +globalroot: + if (strstarts(abs_path, "\\??\\")) + abs_path += sizeof("\\??\\")-1; + else if (strstarts(abs_path, "\\DosDevices\\")) + abs_path += sizeof("\\DosDevices\\")-1; + else if (strstarts(abs_path, "\\GLOBAL??\\")) + abs_path += sizeof("\\GLOBAL??\\")-1; + else { + /* Unhandled absolute symlink, points outside of DOS/Win32 */ + cifs_dbg(VFS, + "absolute symlink '%s' cannot be converted from NT format " + "because points to unknown target\n", + smb_target); + rc = -EIO; + goto out; + } + + /* Sometimes path separator after \?? is double backslash */ + if (abs_path[0] == '\\') + abs_path++; + + while (strstarts(abs_path, "Global\\")) + abs_path += sizeof("Global\\")-1; + + if (strstarts(abs_path, "GLOBALROOT\\")) { + /* Label globalroot requires path with leading '\\', so do not trim '\\' */ + abs_path += sizeof("GLOBALROOT")-1; + goto globalroot; + } + + /* For now parse only paths to drive letters */ + if (((abs_path[0] >= 'A' && abs_path[0] <= 'Z') || + (abs_path[0] >= 'a' && abs_path[0] <= 'z')) && + abs_path[1] == ':' && + (abs_path[2] == '\\' || abs_path[2] == '\0')) { + /* Convert drive letter to lowercase and drop colon */ + char drive_letter = abs_path[0]; + if (drive_letter >= 'A' && drive_letter <= 'Z') + drive_letter += 'a'-'A'; + abs_path++; + abs_path[0] = drive_letter; + } else { + /* Unhandled absolute symlink. Report an error. */ + cifs_dbg(VFS, + "absolute symlink '%s' cannot be converted from NT format " + "because points to unknown target\n", + smb_target); + rc = -EIO; + goto out; + } + + abs_path_len = strlen(abs_path)+1; + symlinkroot_len = strlen(cifs_sb->ctx->symlinkroot); + if (cifs_sb->ctx->symlinkroot[symlinkroot_len-1] == '/') + symlinkroot_len--; + linux_target = kmalloc(symlinkroot_len + 1 + abs_path_len, GFP_KERNEL); + if (!linux_target) { + rc = -ENOMEM; + goto out; + } + memcpy(linux_target, cifs_sb->ctx->symlinkroot, symlinkroot_len); + linux_target[symlinkroot_len] = '/'; + memcpy(linux_target + symlinkroot_len + 1, abs_path, abs_path_len); + } else if (smb_target[0] == sep && relative) { /* * This is a relative SMB symlink from the top of the share, * which is the top level directory of the Linux mount point. @@ -601,6 +960,12 @@ int smb2_parse_native_symlink(char **target, const char *buf, unsigned int len, } memcpy(linux_target + levels*3, smb_target+1, smb_target_len); /* +1 to skip leading sep */ } else { + /* + * This is either an absolute symlink in POSIX-style format + * or relative SMB symlink from the current directory. + * These paths have same format as Linux symlinks, so no + * conversion is needed. + */ linux_target = smb_target; smb_target = NULL; } @@ -620,8 +985,8 @@ out: return rc; } -static int parse_reparse_symlink(struct reparse_symlink_data_buffer *sym, - u32 plen, bool unicode, +static int parse_reparse_native_symlink(struct reparse_symlink_data_buffer *sym, + u32 plen, struct cifs_sb_info *cifs_sb, const char *full_path, struct cifs_open_info_data *data) @@ -641,7 +1006,6 @@ static int parse_reparse_symlink(struct reparse_symlink_data_buffer *sym, return smb2_parse_native_symlink(&data->symlink_target, sym->PathBuffer + offs, len, - unicode, le32_to_cpu(sym->Flags) & SYMLINK_FLAG_RELATIVE, full_path, cifs_sb); @@ -696,7 +1060,7 @@ static int parse_reparse_wsl_symlink(struct reparse_wsl_symlink_data_buffer *buf int parse_reparse_point(struct reparse_data_buffer *buf, u32 plen, struct cifs_sb_info *cifs_sb, const char *full_path, - bool unicode, struct cifs_open_info_data *data) + struct cifs_open_info_data *data) { struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); @@ -705,12 +1069,12 @@ int parse_reparse_point(struct reparse_data_buffer *buf, /* See MS-FSCC 2.1.2 */ switch (le32_to_cpu(buf->ReparseTag)) { case IO_REPARSE_TAG_NFS: - return parse_reparse_posix((struct reparse_posix_data *)buf, + return parse_reparse_nfs((struct reparse_nfs_data_buffer *)buf, cifs_sb, data); case IO_REPARSE_TAG_SYMLINK: - return parse_reparse_symlink( + return parse_reparse_native_symlink( (struct reparse_symlink_data_buffer *)buf, - plen, unicode, cifs_sb, full_path, data); + plen, cifs_sb, full_path, data); case IO_REPARSE_TAG_LX_SYMLINK: return parse_reparse_wsl_symlink( (struct reparse_wsl_symlink_data_buffer *)buf, @@ -744,14 +1108,15 @@ int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb, buf = (struct reparse_data_buffer *)((u8 *)io + le32_to_cpu(io->OutputOffset)); - return parse_reparse_point(buf, plen, cifs_sb, full_path, true, data); + return parse_reparse_point(buf, plen, cifs_sb, full_path, data); } -static void wsl_to_fattr(struct cifs_open_info_data *data, +static bool wsl_to_fattr(struct cifs_open_info_data *data, struct cifs_sb_info *cifs_sb, u32 tag, struct cifs_fattr *fattr) { struct smb2_file_full_ea_info *ea; + bool have_xattr_dev = false; u32 next = 0; switch (tag) { @@ -794,21 +1159,31 @@ static void wsl_to_fattr(struct cifs_open_info_data *data, fattr->cf_uid = wsl_make_kuid(cifs_sb, v); else if (!strncmp(name, SMB2_WSL_XATTR_GID, nlen)) fattr->cf_gid = wsl_make_kgid(cifs_sb, v); - else if (!strncmp(name, SMB2_WSL_XATTR_MODE, nlen)) + else if (!strncmp(name, SMB2_WSL_XATTR_MODE, nlen)) { + /* File type in reparse point tag and in xattr mode must match. */ + if (S_DT(fattr->cf_mode) != S_DT(le32_to_cpu(*(__le32 *)v))) + return false; fattr->cf_mode = (umode_t)le32_to_cpu(*(__le32 *)v); - else if (!strncmp(name, SMB2_WSL_XATTR_DEV, nlen)) + } else if (!strncmp(name, SMB2_WSL_XATTR_DEV, nlen)) { fattr->cf_rdev = reparse_mkdev(v); + have_xattr_dev = true; + } } while (next); out: + + /* Major and minor numbers for char and block devices are mandatory. */ + if (!have_xattr_dev && (tag == IO_REPARSE_TAG_LX_CHR || tag == IO_REPARSE_TAG_LX_BLK)) + return false; + fattr->cf_dtype = S_DT(fattr->cf_mode); + return true; } static bool posix_reparse_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, struct cifs_open_info_data *data) { - struct reparse_posix_data *buf = data->reparse.posix; - + struct reparse_nfs_data_buffer *buf = (struct reparse_nfs_data_buffer *)data->reparse.buf; if (buf == NULL) return true; @@ -874,7 +1249,9 @@ bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb, case IO_REPARSE_TAG_AF_UNIX: case IO_REPARSE_TAG_LX_CHR: case IO_REPARSE_TAG_LX_BLK: - wsl_to_fattr(data, cifs_sb, tag, fattr); + ok = wsl_to_fattr(data, cifs_sb, tag, fattr); + if (!ok) + return false; break; case IO_REPARSE_TAG_NFS: ok = posix_reparse_to_fattr(cifs_sb, fattr, data); diff --git a/fs/smb/client/reparse.h b/fs/smb/client/reparse.h index ff05b0e75c92..5a753fec7e2c 100644 --- a/fs/smb/client/reparse.h +++ b/fs/smb/client/reparse.h @@ -50,6 +50,7 @@ static inline kgid_t wsl_make_kgid(struct cifs_sb_info *cifs_sb, static inline u64 reparse_mode_nfs_type(mode_t mode) { switch (mode & S_IFMT) { + case S_IFLNK: return NFS_SPECFILE_LNK; case S_IFBLK: return NFS_SPECFILE_BLK; case S_IFCHR: return NFS_SPECFILE_CHR; case S_IFIFO: return NFS_SPECFILE_FIFO; @@ -61,6 +62,7 @@ static inline u64 reparse_mode_nfs_type(mode_t mode) static inline u32 reparse_mode_wsl_tag(mode_t mode) { switch (mode & S_IFMT) { + case S_IFLNK: return IO_REPARSE_TAG_LX_SYMLINK; case S_IFBLK: return IO_REPARSE_TAG_LX_BLK; case S_IFCHR: return IO_REPARSE_TAG_LX_CHR; case S_IFIFO: return IO_REPARSE_TAG_LX_FIFO; diff --git a/fs/smb/client/rfc1002pdu.h b/fs/smb/client/rfc1002pdu.h index ae1d025da294..ac82c2f3a4a2 100644 --- a/fs/smb/client/rfc1002pdu.h +++ b/fs/smb/client/rfc1002pdu.h @@ -24,7 +24,7 @@ struct rfc1002_session_packet { __u8 type; __u8 flags; - __u16 length; + __be16 length; union { struct { __u8 called_len; @@ -35,8 +35,8 @@ struct rfc1002_session_packet { __u8 scope2; /* null */ } __attribute__((packed)) session_req; struct { - __u32 retarget_ip_addr; - __u16 port; + __be32 retarget_ip_addr; + __be16 port; } __attribute__((packed)) retarget_resp; __u8 neg_ses_resp_error_code; /* POSITIVE_SESSION_RESPONSE packet does not include trailer. diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c index 91d4d409cb1d..faa80e7d54a6 100644 --- a/fs/smb/client/sess.c +++ b/fs/smb/client/sess.c @@ -1235,12 +1235,13 @@ cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) switch (requested) { case Kerberos: case RawNTLMSSP: + case IAKerb: return requested; case Unspecified: if (server->sec_ntlmssp && (global_secflags & CIFSSEC_MAY_NTLMSSP)) return RawNTLMSSP; - if ((server->sec_kerberos || server->sec_mskerberos) && + if ((server->sec_kerberos || server->sec_mskerberos || server->sec_iakerb) && (global_secflags & CIFSSEC_MAY_KRB5)) return Kerberos; fallthrough; diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c index db3695eddcf9..d6e2fb669c40 100644 --- a/fs/smb/client/smb1ops.c +++ b/fs/smb/client/smb1ops.c @@ -377,7 +377,7 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr) static void cifs_downgrade_oplock(struct TCP_Server_Info *server, struct cifsInodeInfo *cinode, __u32 oplock, - unsigned int epoch, bool *purge_cache) + __u16 epoch, bool *purge_cache) { cifs_set_oplock_level(cinode, oplock); } @@ -551,7 +551,7 @@ static int cifs_query_path_info(const unsigned int xid, int rc; FILE_ALL_INFO fi = {}; - data->symlink = false; + data->reparse_point = false; data->adjust_tz = false; /* could do find first instead but this returns more info */ @@ -569,32 +569,8 @@ static int cifs_query_path_info(const unsigned int xid, } if (!rc) { - int tmprc; - int oplock = 0; - struct cifs_fid fid; - struct cifs_open_parms oparms; - move_cifs_info_to_smb2(&data->fi, &fi); - - if (!(le32_to_cpu(fi.Attributes) & ATTR_REPARSE)) - return 0; - - oparms = (struct cifs_open_parms) { - .tcon = tcon, - .cifs_sb = cifs_sb, - .desired_access = FILE_READ_ATTRIBUTES, - .create_options = cifs_create_options(cifs_sb, 0), - .disposition = FILE_OPEN, - .path = full_path, - .fid = &fid, - }; - - /* Need to check if this is a symbolic link or not */ - tmprc = CIFS_open(xid, &oparms, &oplock, NULL); - if (tmprc == -EOPNOTSUPP) - data->symlink = true; - else if (tmprc == 0) - CIFSSMBClose(xid, tcon, fid.netfid); + data->reparse_point = le32_to_cpu(fi.Attributes) & ATTR_REPARSE; } return rc; @@ -614,7 +590,13 @@ static int cifs_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon, * There may be higher info levels that work but are there Windows * server or network appliances for which IndexNumber field is not * guaranteed unique? + * + * CIFSGetSrvInodeNumber() uses SMB_QUERY_FILE_INTERNAL_INFO + * which is SMB PASSTHROUGH level therefore check for capability. + * Note that this function can be called with tcon == NULL. */ + if (tcon && !(tcon->ses->capabilities & CAP_INFOLEVEL_PASSTHRU)) + return -EOPNOTSUPP; return CIFSGetSrvInodeNumber(xid, tcon, full_path, uniqueid, cifs_sb->local_nls, cifs_remap(cifs_sb)); @@ -1004,7 +986,7 @@ static int cifs_parse_reparse_point(struct cifs_sb_info *cifs_sb, buf = (struct reparse_data_buffer *)((__u8 *)&io->hdr.Protocol + le32_to_cpu(io->DataOffset)); - return parse_reparse_point(buf, plen, cifs_sb, full_path, true, data); + return parse_reparse_point(buf, plen, cifs_sb, full_path, data); } static bool diff --git a/fs/smb/client/smb2file.c b/fs/smb/client/smb2file.c index e836bc2193dd..d609a20fb98a 100644 --- a/fs/smb/client/smb2file.c +++ b/fs/smb/client/smb2file.c @@ -42,14 +42,14 @@ static struct smb2_symlink_err_rsp *symlink_data(const struct kvec *iov) end = (struct smb2_error_context_rsp *)((u8 *)err + iov->iov_len); do { if (le32_to_cpu(p->ErrorId) == SMB2_ERROR_ID_DEFAULT) { - sym = (struct smb2_symlink_err_rsp *)&p->ErrorContextData; + sym = (struct smb2_symlink_err_rsp *)p->ErrorContextData; break; } cifs_dbg(FYI, "%s: skipping unhandled error context: 0x%x\n", __func__, le32_to_cpu(p->ErrorId)); len = ALIGN(le32_to_cpu(p->ErrorDataLength), 8); - p = (struct smb2_error_context_rsp *)((u8 *)&p->ErrorContextData + len); + p = (struct smb2_error_context_rsp *)(p->ErrorContextData + len); } while (p < end); } else if (le32_to_cpu(err->ByteCount) >= sizeof(*sym) && iov->iov_len >= SMB2_SYMLINK_STRUCT_SIZE) { @@ -63,6 +63,52 @@ static struct smb2_symlink_err_rsp *symlink_data(const struct kvec *iov) return sym; } +int smb2_fix_symlink_target_type(char **target, bool directory, struct cifs_sb_info *cifs_sb) +{ + char *buf; + int len; + + /* + * POSIX server does not distinguish between symlinks to file and + * symlink directory. So nothing is needed to fix on the client side. + */ + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) + return 0; + + if (!*target) + return -EIO; + + len = strlen(*target); + if (!len) + return -EIO; + + /* + * If this is directory symlink and it does not have trailing slash then + * append it. Trailing slash simulates Windows/SMB behavior which do not + * allow resolving directory symlink to file. + */ + if (directory && (*target)[len-1] != '/') { + buf = krealloc(*target, len+2, GFP_KERNEL); + if (!buf) + return -ENOMEM; + buf[len] = '/'; + buf[len+1] = '\0'; + *target = buf; + len++; + } + + /* + * If this is a file (non-directory) symlink and it points to path name + * with trailing slash then this is an invalid symlink because file name + * cannot contain slash character. File name with slash is invalid on + * both Windows and Linux systems. So return an error for such symlink. + */ + if (!directory && (*target)[len-1] == '/') + return -EIO; + + return 0; +} + int smb2_parse_symlink_response(struct cifs_sb_info *cifs_sb, const struct kvec *iov, const char *full_path, char **path) { @@ -89,7 +135,6 @@ int smb2_parse_symlink_response(struct cifs_sb_info *cifs_sb, const struct kvec return smb2_parse_native_symlink(path, (char *)sym->PathBuffer + sub_offs, sub_len, - true, le32_to_cpu(sym->Flags) & SYMLINK_FLAG_RELATIVE, full_path, cifs_sb); @@ -133,6 +178,11 @@ int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 NULL, NULL, NULL); oparms->create_options &= ~OPEN_REPARSE_POINT; } + if (!rc) { + bool directory = le32_to_cpu(data->fi.Attributes) & ATTR_DIRECTORY; + rc = smb2_fix_symlink_target_type(&data->symlink_target, + directory, oparms->cifs_sb); + } } } diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c index a55f0044d30b..5dfb30b0a852 100644 --- a/fs/smb/client/smb2inode.c +++ b/fs/smb/client/smb2inode.c @@ -176,27 +176,27 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon, struct kvec *out_iov, int *out_buftype, struct dentry *dentry) { - struct reparse_data_buffer *rbuf; + struct smb2_query_info_rsp *qi_rsp = NULL; struct smb2_compound_vars *vars = NULL; - struct kvec *rsp_iov, *iov; - struct smb_rqst *rqst; - int rc; - __le16 *utf16_path = NULL; __u8 oplock = SMB2_OPLOCK_LEVEL_NONE; - struct cifs_fid fid; + struct cifs_open_info_data *idata; struct cifs_ses *ses = tcon->ses; + struct reparse_data_buffer *rbuf; struct TCP_Server_Info *server; - int num_rqst = 0, i; int resp_buftype[MAX_COMPOUND]; - struct smb2_query_info_rsp *qi_rsp = NULL; - struct cifs_open_info_data *idata; + int retries = 0, cur_sleep = 1; + __u8 delete_pending[8] = {1,}; + struct kvec *rsp_iov, *iov; struct inode *inode = NULL; - int flags = 0; - __u8 delete_pending[8] = {1, 0, 0, 0, 0, 0, 0, 0}; + __le16 *utf16_path = NULL; + struct smb_rqst *rqst; unsigned int size[2]; - void *data[2]; + struct cifs_fid fid; + int num_rqst = 0, i; unsigned int len; - int retries = 0, cur_sleep = 1; + int tmp_rc, rc; + int flags = 0; + void *data[2]; replay_again: /* reinitialize for possible replay */ @@ -298,8 +298,8 @@ replay_again: goto finished; } num_rqst++; - trace_smb3_query_info_compound_enter(xid, ses->Suid, - tcon->tid, full_path); + trace_smb3_query_info_compound_enter(xid, tcon->tid, + ses->Suid, full_path); break; case SMB2_OP_POSIX_QUERY_INFO: rqst[num_rqst].rq_iov = &vars->qi_iov; @@ -334,18 +334,18 @@ replay_again: goto finished; } num_rqst++; - trace_smb3_posix_query_info_compound_enter(xid, ses->Suid, - tcon->tid, full_path); + trace_smb3_posix_query_info_compound_enter(xid, tcon->tid, + ses->Suid, full_path); break; case SMB2_OP_DELETE: - trace_smb3_delete_enter(xid, ses->Suid, tcon->tid, full_path); + trace_smb3_delete_enter(xid, tcon->tid, ses->Suid, full_path); break; case SMB2_OP_MKDIR: /* * Directories are created through parameters in the * SMB2_open() call. */ - trace_smb3_mkdir_enter(xid, ses->Suid, tcon->tid, full_path); + trace_smb3_mkdir_enter(xid, tcon->tid, ses->Suid, full_path); break; case SMB2_OP_RMDIR: rqst[num_rqst].rq_iov = &vars->si_iov[0]; @@ -363,7 +363,7 @@ replay_again: goto finished; smb2_set_next_command(tcon, &rqst[num_rqst]); smb2_set_related(&rqst[num_rqst++]); - trace_smb3_rmdir_enter(xid, ses->Suid, tcon->tid, full_path); + trace_smb3_rmdir_enter(xid, tcon->tid, ses->Suid, full_path); break; case SMB2_OP_SET_EOF: rqst[num_rqst].rq_iov = &vars->si_iov[0]; @@ -398,7 +398,7 @@ replay_again: goto finished; } num_rqst++; - trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path); + trace_smb3_set_eof_enter(xid, tcon->tid, ses->Suid, full_path); break; case SMB2_OP_SET_INFO: rqst[num_rqst].rq_iov = &vars->si_iov[0]; @@ -429,8 +429,8 @@ replay_again: goto finished; } num_rqst++; - trace_smb3_set_info_compound_enter(xid, ses->Suid, - tcon->tid, full_path); + trace_smb3_set_info_compound_enter(xid, tcon->tid, + ses->Suid, full_path); break; case SMB2_OP_RENAME: rqst[num_rqst].rq_iov = &vars->si_iov[0]; @@ -469,7 +469,7 @@ replay_again: goto finished; } num_rqst++; - trace_smb3_rename_enter(xid, ses->Suid, tcon->tid, full_path); + trace_smb3_rename_enter(xid, tcon->tid, ses->Suid, full_path); break; case SMB2_OP_HARDLINK: rqst[num_rqst].rq_iov = &vars->si_iov[0]; @@ -496,7 +496,7 @@ replay_again: goto finished; smb2_set_next_command(tcon, &rqst[num_rqst]); smb2_set_related(&rqst[num_rqst++]); - trace_smb3_hardlink_enter(xid, ses->Suid, tcon->tid, full_path); + trace_smb3_hardlink_enter(xid, tcon->tid, ses->Suid, full_path); break; case SMB2_OP_SET_REPARSE: rqst[num_rqst].rq_iov = vars->io_iov; @@ -523,8 +523,8 @@ replay_again: goto finished; } num_rqst++; - trace_smb3_set_reparse_compound_enter(xid, ses->Suid, - tcon->tid, full_path); + trace_smb3_set_reparse_compound_enter(xid, tcon->tid, + ses->Suid, full_path); break; case SMB2_OP_GET_REPARSE: rqst[num_rqst].rq_iov = vars->io_iov; @@ -549,8 +549,8 @@ replay_again: goto finished; } num_rqst++; - trace_smb3_get_reparse_compound_enter(xid, ses->Suid, - tcon->tid, full_path); + trace_smb3_get_reparse_compound_enter(xid, tcon->tid, + ses->Suid, full_path); break; case SMB2_OP_QUERY_WSL_EA: rqst[num_rqst].rq_iov = &vars->ea_iov; @@ -584,6 +584,8 @@ replay_again: goto finished; } num_rqst++; + trace_smb3_query_wsl_ea_compound_enter(xid, tcon->tid, + ses->Suid, full_path); break; default: cifs_dbg(VFS, "Invalid command\n"); @@ -637,7 +639,14 @@ finished: tcon->need_reconnect = true; } + tmp_rc = rc; for (i = 0; i < num_cmds; i++) { + char *buf = rsp_iov[i + i].iov_base; + + if (buf && resp_buftype[i + 1] != CIFS_NO_BUFFER) + rc = server->ops->map_error(buf, false); + else + rc = tmp_rc; switch (cmds[i]) { case SMB2_OP_QUERY_INFO: idata = in_iov[i].iov_base; @@ -656,11 +665,11 @@ finished: } SMB2_query_info_free(&rqst[num_rqst++]); if (rc) - trace_smb3_query_info_compound_err(xid, ses->Suid, - tcon->tid, rc); + trace_smb3_query_info_compound_err(xid, tcon->tid, + ses->Suid, rc); else - trace_smb3_query_info_compound_done(xid, ses->Suid, - tcon->tid); + trace_smb3_query_info_compound_done(xid, tcon->tid, + ses->Suid); break; case SMB2_OP_POSIX_QUERY_INFO: idata = in_iov[i].iov_base; @@ -683,15 +692,15 @@ finished: SMB2_query_info_free(&rqst[num_rqst++]); if (rc) - trace_smb3_posix_query_info_compound_err(xid, ses->Suid, - tcon->tid, rc); + trace_smb3_posix_query_info_compound_err(xid, tcon->tid, + ses->Suid, rc); else - trace_smb3_posix_query_info_compound_done(xid, ses->Suid, - tcon->tid); + trace_smb3_posix_query_info_compound_done(xid, tcon->tid, + ses->Suid); break; case SMB2_OP_DELETE: if (rc) - trace_smb3_delete_err(xid, ses->Suid, tcon->tid, rc); + trace_smb3_delete_err(xid, tcon->tid, ses->Suid, rc); else { /* * If dentry (hence, inode) is NULL, lease break is going to @@ -699,59 +708,59 @@ finished: */ if (inode) cifs_mark_open_handles_for_deleted_file(inode, full_path); - trace_smb3_delete_done(xid, ses->Suid, tcon->tid); + trace_smb3_delete_done(xid, tcon->tid, ses->Suid); } break; case SMB2_OP_MKDIR: if (rc) - trace_smb3_mkdir_err(xid, ses->Suid, tcon->tid, rc); + trace_smb3_mkdir_err(xid, tcon->tid, ses->Suid, rc); else - trace_smb3_mkdir_done(xid, ses->Suid, tcon->tid); + trace_smb3_mkdir_done(xid, tcon->tid, ses->Suid); break; case SMB2_OP_HARDLINK: if (rc) - trace_smb3_hardlink_err(xid, ses->Suid, tcon->tid, rc); + trace_smb3_hardlink_err(xid, tcon->tid, ses->Suid, rc); else - trace_smb3_hardlink_done(xid, ses->Suid, tcon->tid); + trace_smb3_hardlink_done(xid, tcon->tid, ses->Suid); SMB2_set_info_free(&rqst[num_rqst++]); break; case SMB2_OP_RENAME: if (rc) - trace_smb3_rename_err(xid, ses->Suid, tcon->tid, rc); + trace_smb3_rename_err(xid, tcon->tid, ses->Suid, rc); else - trace_smb3_rename_done(xid, ses->Suid, tcon->tid); + trace_smb3_rename_done(xid, tcon->tid, ses->Suid); SMB2_set_info_free(&rqst[num_rqst++]); break; case SMB2_OP_RMDIR: if (rc) - trace_smb3_rmdir_err(xid, ses->Suid, tcon->tid, rc); + trace_smb3_rmdir_err(xid, tcon->tid, ses->Suid, rc); else - trace_smb3_rmdir_done(xid, ses->Suid, tcon->tid); + trace_smb3_rmdir_done(xid, tcon->tid, ses->Suid); SMB2_set_info_free(&rqst[num_rqst++]); break; case SMB2_OP_SET_EOF: if (rc) - trace_smb3_set_eof_err(xid, ses->Suid, tcon->tid, rc); + trace_smb3_set_eof_err(xid, tcon->tid, ses->Suid, rc); else - trace_smb3_set_eof_done(xid, ses->Suid, tcon->tid); + trace_smb3_set_eof_done(xid, tcon->tid, ses->Suid); SMB2_set_info_free(&rqst[num_rqst++]); break; case SMB2_OP_SET_INFO: if (rc) - trace_smb3_set_info_compound_err(xid, ses->Suid, - tcon->tid, rc); + trace_smb3_set_info_compound_err(xid, tcon->tid, + ses->Suid, rc); else - trace_smb3_set_info_compound_done(xid, ses->Suid, - tcon->tid); + trace_smb3_set_info_compound_done(xid, tcon->tid, + ses->Suid); SMB2_set_info_free(&rqst[num_rqst++]); break; case SMB2_OP_SET_REPARSE: if (rc) { - trace_smb3_set_reparse_compound_err(xid, ses->Suid, - tcon->tid, rc); + trace_smb3_set_reparse_compound_err(xid, tcon->tid, + ses->Suid, rc); } else { - trace_smb3_set_reparse_compound_done(xid, ses->Suid, - tcon->tid); + trace_smb3_set_reparse_compound_done(xid, tcon->tid, + ses->Suid); } SMB2_ioctl_free(&rqst[num_rqst++]); break; @@ -764,18 +773,18 @@ finished: rbuf = reparse_buf_ptr(iov); if (IS_ERR(rbuf)) { rc = PTR_ERR(rbuf); - trace_smb3_set_reparse_compound_err(xid, ses->Suid, - tcon->tid, rc); + trace_smb3_get_reparse_compound_err(xid, tcon->tid, + ses->Suid, rc); } else { idata->reparse.tag = le32_to_cpu(rbuf->ReparseTag); - trace_smb3_set_reparse_compound_done(xid, ses->Suid, - tcon->tid); + trace_smb3_get_reparse_compound_done(xid, tcon->tid, + ses->Suid); } memset(iov, 0, sizeof(*iov)); resp_buftype[i + 1] = CIFS_NO_BUFFER; } else { - trace_smb3_set_reparse_compound_err(xid, ses->Suid, - tcon->tid, rc); + trace_smb3_get_reparse_compound_err(xid, tcon->tid, + ses->Suid, rc); } SMB2_ioctl_free(&rqst[num_rqst++]); break; @@ -792,17 +801,18 @@ finished: } } if (!rc) { - trace_smb3_query_wsl_ea_compound_done(xid, ses->Suid, - tcon->tid); + trace_smb3_query_wsl_ea_compound_done(xid, tcon->tid, + ses->Suid); } else { - trace_smb3_query_wsl_ea_compound_err(xid, ses->Suid, - tcon->tid, rc); + trace_smb3_query_wsl_ea_compound_err(xid, tcon->tid, + ses->Suid, rc); } SMB2_query_info_free(&rqst[num_rqst++]); break; } } SMB2_close_free(&rqst[num_rqst]); + rc = tmp_rc; num_cmds += 2; if (out_iov && out_buftype) { @@ -858,22 +868,52 @@ static int parse_create_response(struct cifs_open_info_data *data, return rc; } +/* Check only if SMB2_OP_QUERY_WSL_EA command failed in the compound chain */ +static bool ea_unsupported(int *cmds, int num_cmds, + struct kvec *out_iov, int *out_buftype) +{ + int i; + + if (cmds[num_cmds - 1] != SMB2_OP_QUERY_WSL_EA) + return false; + + for (i = 1; i < num_cmds - 1; i++) { + struct smb2_hdr *hdr = out_iov[i].iov_base; + + if (out_buftype[i] == CIFS_NO_BUFFER || !hdr || + hdr->Status != STATUS_SUCCESS) + return false; + } + return true; +} + +static inline void free_rsp_iov(struct kvec *iovs, int *buftype, int count) +{ + int i; + + for (i = 0; i < count; i++) { + free_rsp_buf(buftype[i], iovs[i].iov_base); + memset(&iovs[i], 0, sizeof(*iovs)); + buftype[i] = CIFS_NO_BUFFER; + } +} + int smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, struct cifs_open_info_data *data) { + struct kvec in_iov[3], out_iov[5] = {}; + struct cached_fid *cfid = NULL; struct cifs_open_parms oparms; - __u32 create_options = 0; struct cifsFileInfo *cfile; - struct cached_fid *cfid = NULL; + __u32 create_options = 0; + int out_buftype[5] = {}; struct smb2_hdr *hdr; - struct kvec in_iov[3], out_iov[3] = {}; - int out_buftype[3] = {}; + int num_cmds = 0; int cmds[3]; bool islink; - int i, num_cmds = 0; int rc, rc2; data->adjust_tz = false; @@ -943,14 +983,14 @@ int smb2_query_path_info(const unsigned int xid, if (rc || !data->reparse_point) goto out; - if (!tcon->posix_extensions) - cmds[num_cmds++] = SMB2_OP_QUERY_WSL_EA; /* * Skip SMB2_OP_GET_REPARSE if symlink already parsed in create * response. */ if (data->reparse.tag != IO_REPARSE_TAG_SYMLINK) cmds[num_cmds++] = SMB2_OP_GET_REPARSE; + if (!tcon->posix_extensions) + cmds[num_cmds++] = SMB2_OP_QUERY_WSL_EA; oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_READ_ATTRIBUTES | @@ -958,9 +998,23 @@ int smb2_query_path_info(const unsigned int xid, FILE_OPEN, create_options | OPEN_REPARSE_POINT, ACL_NO_MODE); cifs_get_readable_path(tcon, full_path, &cfile); + free_rsp_iov(out_iov, out_buftype, ARRAY_SIZE(out_iov)); rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms, in_iov, cmds, num_cmds, - cfile, NULL, NULL, NULL); + cfile, out_iov, out_buftype, NULL); + if (rc && ea_unsupported(cmds, num_cmds, + out_iov, out_buftype)) { + if (data->reparse.tag != IO_REPARSE_TAG_LX_BLK && + data->reparse.tag != IO_REPARSE_TAG_LX_CHR) + rc = 0; + else + rc = -EOPNOTSUPP; + } + + if (data->reparse.tag == IO_REPARSE_TAG_SYMLINK && !rc) { + bool directory = le32_to_cpu(data->fi.Attributes) & ATTR_DIRECTORY; + rc = smb2_fix_symlink_target_type(&data->symlink_target, directory, cifs_sb); + } break; case -EREMOTE: break; @@ -978,8 +1032,7 @@ int smb2_query_path_info(const unsigned int xid, } out: - for (i = 0; i < ARRAY_SIZE(out_buftype); i++) - free_rsp_buf(out_buftype[i], out_iov[i].iov_base); + free_rsp_iov(out_iov, out_buftype, ARRAY_SIZE(out_iov)); return rc; } diff --git a/fs/smb/client/smb2maperror.c b/fs/smb/client/smb2maperror.c index b05313acf9b2..12c2b868789f 100644 --- a/fs/smb/client/smb2maperror.c +++ b/fs/smb/client/smb2maperror.c @@ -380,7 +380,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = { {STATUS_NO_LOGON_SERVERS, -EIO, "STATUS_NO_LOGON_SERVERS"}, {STATUS_NO_SUCH_LOGON_SESSION, -EIO, "STATUS_NO_SUCH_LOGON_SESSION"}, {STATUS_NO_SUCH_PRIVILEGE, -EIO, "STATUS_NO_SUCH_PRIVILEGE"}, - {STATUS_PRIVILEGE_NOT_HELD, -EIO, "STATUS_PRIVILEGE_NOT_HELD"}, + {STATUS_PRIVILEGE_NOT_HELD, -EPERM, "STATUS_PRIVILEGE_NOT_HELD"}, {STATUS_INVALID_ACCOUNT_NAME, -EIO, "STATUS_INVALID_ACCOUNT_NAME"}, {STATUS_USER_EXISTS, -EIO, "STATUS_USER_EXISTS"}, {STATUS_NO_SUCH_USER, -EIO, "STATUS_NO_SUCH_USER"}, @@ -871,7 +871,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = { {STATUS_VALIDATE_CONTINUE, -EIO, "STATUS_VALIDATE_CONTINUE"}, {STATUS_NO_MATCH, -EIO, "STATUS_NO_MATCH"}, {STATUS_NO_MORE_MATCHES, -EIO, "STATUS_NO_MORE_MATCHES"}, - {STATUS_NOT_A_REPARSE_POINT, -EIO, "STATUS_NOT_A_REPARSE_POINT"}, + {STATUS_NOT_A_REPARSE_POINT, -ENODATA, "STATUS_NOT_A_REPARSE_POINT"}, {STATUS_IO_REPARSE_TAG_INVALID, -EIO, "STATUS_IO_REPARSE_TAG_INVALID"}, {STATUS_IO_REPARSE_TAG_MISMATCH, -EIO, "STATUS_IO_REPARSE_TAG_MISMATCH"}, diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index 87cb1872db28..ec36bed54b0b 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -658,7 +658,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, while (bytes_left >= (ssize_t)sizeof(*p)) { memset(&tmp_iface, 0, sizeof(tmp_iface)); - tmp_iface.speed = le64_to_cpu(p->LinkSpeed); + /* default to 1Gbps when link speed is unset */ + tmp_iface.speed = le64_to_cpu(p->LinkSpeed) ?: 1000000000; tmp_iface.rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0; tmp_iface.rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0; @@ -3007,9 +3008,9 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses, num_of_nodes, target_nodes, nls_codepage, remap, search_name, true /* is_unicode */); - if (rc) { - cifs_tcon_dbg(VFS, "parse error in %s rc=%d\n", __func__, rc); - goto out; + if (rc && rc != -ENOENT) { + cifs_tcon_dbg(VFS, "%s: failed to parse DFS referral %s: %d\n", + __func__, search_name, rc); } out: @@ -3903,22 +3904,22 @@ static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode, static void smb2_downgrade_oplock(struct TCP_Server_Info *server, struct cifsInodeInfo *cinode, __u32 oplock, - unsigned int epoch, bool *purge_cache) + __u16 epoch, bool *purge_cache) { server->ops->set_oplock_level(cinode, oplock, 0, NULL); } static void smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, - unsigned int epoch, bool *purge_cache); + __u16 epoch, bool *purge_cache); static void smb3_downgrade_oplock(struct TCP_Server_Info *server, struct cifsInodeInfo *cinode, __u32 oplock, - unsigned int epoch, bool *purge_cache) + __u16 epoch, bool *purge_cache) { unsigned int old_state = cinode->oplock; - unsigned int old_epoch = cinode->epoch; + __u16 old_epoch = cinode->epoch; unsigned int new_state; if (epoch > old_epoch) { @@ -3938,7 +3939,7 @@ smb3_downgrade_oplock(struct TCP_Server_Info *server, static void smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, - unsigned int epoch, bool *purge_cache) + __u16 epoch, bool *purge_cache) { oplock &= 0xFF; cinode->lease_granted = false; @@ -3962,7 +3963,7 @@ smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, static void smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, - unsigned int epoch, bool *purge_cache) + __u16 epoch, bool *purge_cache) { char message[5] = {0}; unsigned int new_oplock = 0; @@ -3999,7 +4000,7 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, static void smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, - unsigned int epoch, bool *purge_cache) + __u16 epoch, bool *purge_cache) { unsigned int old_oplock = cinode->oplock; @@ -4113,7 +4114,7 @@ smb3_create_lease_buf(u8 *lease_key, u8 oplock) } static __u8 -smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key) +smb2_parse_lease_buf(void *buf, __u16 *epoch, char *lease_key) { struct create_lease *lc = (struct create_lease *)buf; @@ -4124,7 +4125,7 @@ smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key) } static __u8 -smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key) +smb3_parse_lease_buf(void *buf, __u16 *epoch, char *lease_key) { struct create_lease_v2 *lc = (struct create_lease_v2 *)buf; @@ -4388,7 +4389,7 @@ static struct folio_queue *cifs_alloc_folioq_buffer(ssize_t size) p = kmalloc(sizeof(*p), GFP_NOFS); if (!p) goto nomem; - folioq_init(p); + folioq_init(p, 0); if (tail) { tail->next = p; p->prev = tail; @@ -5076,6 +5077,7 @@ int __cifs_sfu_make_node(unsigned int xid, struct inode *inode, { struct TCP_Server_Info *server = tcon->ses->server; struct cifs_open_parms oparms; + struct cifs_open_info_data idata; struct cifs_io_parms io_parms = {}; struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifs_fid fid; @@ -5145,10 +5147,20 @@ int __cifs_sfu_make_node(unsigned int xid, struct inode *inode, CREATE_OPTION_SPECIAL, ACL_NO_MODE); oparms.fid = &fid; - rc = server->ops->open(xid, &oparms, &oplock, NULL); + rc = server->ops->open(xid, &oparms, &oplock, &idata); if (rc) goto out; + /* + * Check if the server honored ATTR_SYSTEM flag by CREATE_OPTION_SPECIAL + * option. If not then server does not support ATTR_SYSTEM and newly + * created file is not SFU compatible, which means that the call failed. + */ + if (!(le32_to_cpu(idata.fi.Attributes) & ATTR_SYSTEM)) { + rc = -EOPNOTSUPP; + goto out_close; + } + if (type_len + data_len > 0) { io_parms.pid = current->tgid; io_parms.tcon = tcon; @@ -5163,8 +5175,18 @@ int __cifs_sfu_make_node(unsigned int xid, struct inode *inode, iov, ARRAY_SIZE(iov)-1); } +out_close: server->ops->close(xid, tcon, &fid); + /* + * If CREATE was successful but either setting ATTR_SYSTEM failed or + * writing type/data information failed then remove the intermediate + * object created by CREATE. Otherwise intermediate empty object stay + * on the server. + */ + if (rc) + server->ops->unlink(xid, tcon, full_path, cifs_sb, NULL); + out: kfree(symname_utf16); return rc; diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c index 0577556f0a41..ed7812247ebc 100644 --- a/fs/smb/client/smb2pdu.c +++ b/fs/smb/client/smb2pdu.c @@ -1429,7 +1429,7 @@ smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) if (server->sec_ntlmssp && (global_secflags & CIFSSEC_MAY_NTLMSSP)) return RawNTLMSSP; - if ((server->sec_kerberos || server->sec_mskerberos) && + if ((server->sec_kerberos || server->sec_mskerberos || server->sec_iakerb) && (global_secflags & CIFSSEC_MAY_KRB5)) return Kerberos; fallthrough; @@ -2169,7 +2169,7 @@ tcon_exit: tcon_error_exit: if (rsp && rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) - cifs_tcon_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); + cifs_dbg(VFS | ONCE, "BAD_NETWORK_NAME: %s\n", tree); goto tcon_exit; } @@ -2329,7 +2329,7 @@ parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info, int smb2_parse_contexts(struct TCP_Server_Info *server, struct kvec *rsp_iov, - unsigned int *epoch, + __u16 *epoch, char *lease_key, __u8 *oplock, struct smb2_file_all_info *buf, struct create_posix_rsp *posix) @@ -4500,14 +4500,6 @@ smb2_new_read_req(void **buf, unsigned int *total_len, return rc; } -static void smb2_readv_worker(struct work_struct *work) -{ - struct cifs_io_subrequest *rdata = - container_of(work, struct cifs_io_subrequest, subreq.work); - - netfs_read_subreq_terminated(&rdata->subreq, rdata->result, false); -} - static void smb2_readv_callback(struct mid_q_entry *mid) { @@ -4615,16 +4607,17 @@ smb2_readv_callback(struct mid_q_entry *mid) __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags); rdata->result = 0; } - __set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags); + if (rdata->got_bytes) + __set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags); } trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value, server->credits, server->in_flight, 0, cifs_trace_rw_credits_read_response_clear); rdata->credits.value = 0; + rdata->subreq.error = rdata->result; rdata->subreq.transferred += rdata->got_bytes; trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_progress); - INIT_WORK(&rdata->subreq.work, smb2_readv_worker); - queue_work(cifsiod_wq, &rdata->subreq.work); + netfs_read_subreq_terminated(&rdata->subreq); release_mid(mid); trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0, server->credits, server->in_flight, diff --git a/fs/smb/client/smb2pdu.h b/fs/smb/client/smb2pdu.h index 076d9e83e1a0..3c09a58dfd07 100644 --- a/fs/smb/client/smb2pdu.h +++ b/fs/smb/client/smb2pdu.h @@ -79,7 +79,7 @@ struct smb2_symlink_err_rsp { struct smb2_error_context_rsp { __le32 ErrorDataLength; __le32 ErrorId; - __u8 ErrorContextData; /* ErrorDataLength long array */ + __u8 ErrorContextData[] __counted_by_le(ErrorDataLength); } __packed; /* ErrorId values */ diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h index 09349fa8da03..4662c7e2d259 100644 --- a/fs/smb/client/smb2proto.h +++ b/fs/smb/client/smb2proto.h @@ -111,8 +111,9 @@ extern int smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const unsigned char *path, char *pbuf, unsigned int *pbytes_read); +int smb2_fix_symlink_target_type(char **target, bool directory, struct cifs_sb_info *cifs_sb); int smb2_parse_native_symlink(char **target, const char *buf, unsigned int len, - bool unicode, bool relative, + bool relative, const char *full_path, struct cifs_sb_info *cifs_sb); int smb2_parse_symlink_response(struct cifs_sb_info *cifs_sb, @@ -282,7 +283,7 @@ extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *, enum securityEnum); int smb2_parse_contexts(struct TCP_Server_Info *server, struct kvec *rsp_iov, - unsigned int *epoch, + __u16 *epoch, char *lease_key, __u8 *oplock, struct smb2_file_all_info *buf, struct create_posix_rsp *posix); diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h index 12cbd3428a6d..52bcb55d9952 100644 --- a/fs/smb/client/trace.h +++ b/fs/smb/client/trace.h @@ -674,6 +674,7 @@ DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_eof_enter); DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_info_compound_enter); DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(set_reparse_compound_enter); DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(get_reparse_compound_enter); +DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(query_wsl_ea_compound_enter); DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(delete_enter); DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(mkdir_enter); DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(tdis_enter); diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h index 3c7c706c797d..3336df2ea5d4 100644 --- a/fs/smb/common/smb2pdu.h +++ b/fs/smb/common/smb2pdu.h @@ -1550,7 +1550,19 @@ struct reparse_symlink_data_buffer { __u8 PathBuffer[]; /* Variable Length */ } __packed; -/* See MS-FSCC 2.1.2.6 and cifspdu.h for struct reparse_posix_data */ +/* For IO_REPARSE_TAG_NFS - see MS-FSCC 2.1.2.6 */ +#define NFS_SPECFILE_LNK 0x00000000014B4E4C +#define NFS_SPECFILE_CHR 0x0000000000524843 +#define NFS_SPECFILE_BLK 0x00000000004B4C42 +#define NFS_SPECFILE_FIFO 0x000000004F464946 +#define NFS_SPECFILE_SOCK 0x000000004B434F53 +struct reparse_nfs_data_buffer { + __le32 ReparseTag; + __le16 ReparseDataLength; + __u16 Reserved; + __le64 InodeType; /* NFS_SPECFILE_* */ + __u8 DataBuffer[]; +} __packed; /* For IO_REPARSE_TAG_LX_SYMLINK */ struct reparse_wsl_symlink_data_buffer { diff --git a/fs/smb/server/ksmbd_netlink.h b/fs/smb/server/ksmbd_netlink.h index 3d01d9d15293..3f07a612c05b 100644 --- a/fs/smb/server/ksmbd_netlink.h +++ b/fs/smb/server/ksmbd_netlink.h @@ -111,7 +111,8 @@ struct ksmbd_startup_request { __u32 smb2_max_credits; /* MAX credits */ __u32 smbd_max_io_size; /* smbd read write size */ __u32 max_connections; /* Number of maximum simultaneous connections */ - __u32 reserved[126]; /* Reserved room */ + __s8 bind_interfaces_only; + __s8 reserved[503]; /* Reserved room */ __u32 ifc_list_sz; /* interfaces list size */ __s8 ____payload[]; }; diff --git a/fs/smb/server/server.h b/fs/smb/server/server.h index 94187628ff08..995555febe7d 100644 --- a/fs/smb/server/server.h +++ b/fs/smb/server/server.h @@ -46,6 +46,7 @@ struct ksmbd_server_config { char *conf[SERVER_CONF_WORK_GROUP + 1]; struct task_struct *dh_task; + bool bind_interfaces_only; }; extern struct ksmbd_server_config server_conf; diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index 772deec5b90f..f1efcd027475 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -38,6 +38,7 @@ #include "mgmt/user_session.h" #include "mgmt/ksmbd_ida.h" #include "ndr.h" +#include "transport_tcp.h" static void __wbuf(struct ksmbd_work *work, void **req, void **rsp) { @@ -7759,6 +7760,9 @@ static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn, if (netdev->type == ARPHRD_LOOPBACK) continue; + if (!ksmbd_find_netdev_name_iface_list(netdev->name)) + continue; + flags = dev_get_flags(netdev); if (!(flags & IFF_RUNNING)) continue; diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c index f51cd0816b1a..425c756bcfb8 100644 --- a/fs/smb/server/smb_common.c +++ b/fs/smb/server/smb_common.c @@ -781,10 +781,6 @@ int __ksmbd_override_fsids(struct ksmbd_work *work, WARN_ON(work->saved_cred); work->saved_cred = override_creds(cred); - if (!work->saved_cred) { - abort_creds(cred); - return -EINVAL; - } return 0; } @@ -796,13 +792,11 @@ int ksmbd_override_fsids(struct ksmbd_work *work) void ksmbd_revert_fsids(struct ksmbd_work *work) { const struct cred *cred; - WARN_ON(!work->saved_cred); - cred = current_cred(); - revert_creds(work->saved_cred); - put_cred(cred); + cred = revert_creds(work->saved_cred); work->saved_cred = NULL; + put_cred(cred); } __le32 smb_map_generic_desired_access(__le32 daccess) diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c index befaf42b84cc..0460ebea6ff0 100644 --- a/fs/smb/server/transport_ipc.c +++ b/fs/smb/server/transport_ipc.c @@ -333,6 +333,7 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req) ret = ksmbd_set_netbios_name(req->netbios_name); ret |= ksmbd_set_server_string(req->server_string); ret |= ksmbd_set_work_group(req->work_group); + server_conf.bind_interfaces_only = req->bind_interfaces_only; ret |= ksmbd_tcp_set_interfaces(KSMBD_STARTUP_CONFIG_INTERFACES(req), req->ifc_list_sz); if (ret) { @@ -626,6 +627,9 @@ ksmbd_ipc_spnego_authen_request(const char *spnego_blob, int blob_len) struct ksmbd_spnego_authen_request *req; struct ksmbd_spnego_authen_response *resp; + if (blob_len > KSMBD_IPC_MAX_PAYLOAD) + return NULL; + msg = ipc_msg_alloc(sizeof(struct ksmbd_spnego_authen_request) + blob_len + 1); if (!msg) @@ -805,6 +809,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle struct ksmbd_rpc_command *req; struct ksmbd_rpc_command *resp; + if (payload_sz > KSMBD_IPC_MAX_PAYLOAD) + return NULL; + msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1); if (!msg) return NULL; @@ -853,6 +860,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle struct ksmbd_rpc_command *req; struct ksmbd_rpc_command *resp; + if (payload_sz > KSMBD_IPC_MAX_PAYLOAD) + return NULL; + msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1); if (!msg) return NULL; @@ -871,31 +881,6 @@ struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle return resp; } -struct ksmbd_rpc_command *ksmbd_rpc_rap(struct ksmbd_session *sess, void *payload, - size_t payload_sz) -{ - struct ksmbd_ipc_msg *msg; - struct ksmbd_rpc_command *req; - struct ksmbd_rpc_command *resp; - - msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1); - if (!msg) - return NULL; - - msg->type = KSMBD_EVENT_RPC_REQUEST; - req = (struct ksmbd_rpc_command *)msg->payload; - req->handle = ksmbd_acquire_id(&ipc_ida); - req->flags = rpc_context_flags(sess); - req->flags |= KSMBD_RPC_RAP_METHOD; - req->payload_sz = payload_sz; - memcpy(req->payload, payload, payload_sz); - - resp = ipc_msg_send_request(msg, req->handle); - ipc_msg_handle_free(req->handle); - ipc_msg_free(msg); - return resp; -} - static int __ipc_heartbeat(void) { unsigned long delta; diff --git a/fs/smb/server/transport_ipc.h b/fs/smb/server/transport_ipc.h index d9b6737f8cd0..e51850f1423b 100644 --- a/fs/smb/server/transport_ipc.h +++ b/fs/smb/server/transport_ipc.h @@ -41,8 +41,6 @@ struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle struct ksmbd_rpc_command *ksmbd_rpc_read(struct ksmbd_session *sess, int handle); struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle, void *payload, size_t payload_sz); -struct ksmbd_rpc_command *ksmbd_rpc_rap(struct ksmbd_session *sess, void *payload, - size_t payload_sz); void ksmbd_ipc_release(void); void ksmbd_ipc_soft_reset(void); int ksmbd_ipc_init(void); diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c index 0d9007285e30..7f38a3c3f5bd 100644 --- a/fs/smb/server/transport_tcp.c +++ b/fs/smb/server/transport_tcp.c @@ -504,32 +504,37 @@ out_clear: return ret; } +struct interface *ksmbd_find_netdev_name_iface_list(char *netdev_name) +{ + struct interface *iface; + + list_for_each_entry(iface, &iface_list, entry) + if (!strcmp(iface->name, netdev_name)) + return iface; + return NULL; +} + static int ksmbd_netdev_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *netdev = netdev_notifier_info_to_dev(ptr); struct interface *iface; - int ret, found = 0; + int ret; switch (event) { case NETDEV_UP: if (netif_is_bridge_port(netdev)) return NOTIFY_OK; - list_for_each_entry(iface, &iface_list, entry) { - if (!strcmp(iface->name, netdev->name)) { - found = 1; - if (iface->state != IFACE_STATE_DOWN) - break; - ksmbd_debug(CONN, "netdev-up event: netdev(%s) is going up\n", - iface->name); - ret = create_socket(iface); - if (ret) - return NOTIFY_OK; - break; - } + iface = ksmbd_find_netdev_name_iface_list(netdev->name); + if (iface && iface->state == IFACE_STATE_DOWN) { + ksmbd_debug(CONN, "netdev-up event: netdev(%s) is going up\n", + iface->name); + ret = create_socket(iface); + if (ret) + return NOTIFY_OK; } - if (!found && bind_additional_ifaces) { + if (!iface && bind_additional_ifaces) { iface = alloc_iface(kstrdup(netdev->name, KSMBD_DEFAULT_GFP)); if (!iface) return NOTIFY_OK; @@ -541,21 +546,19 @@ static int ksmbd_netdev_event(struct notifier_block *nb, unsigned long event, } break; case NETDEV_DOWN: - list_for_each_entry(iface, &iface_list, entry) { - if (!strcmp(iface->name, netdev->name) && - iface->state == IFACE_STATE_CONFIGURED) { - ksmbd_debug(CONN, "netdev-down event: netdev(%s) is going down\n", - iface->name); - tcp_stop_kthread(iface->ksmbd_kthread); - iface->ksmbd_kthread = NULL; - mutex_lock(&iface->sock_release_lock); - tcp_destroy_socket(iface->ksmbd_socket); - iface->ksmbd_socket = NULL; - mutex_unlock(&iface->sock_release_lock); - - iface->state = IFACE_STATE_DOWN; - break; - } + iface = ksmbd_find_netdev_name_iface_list(netdev->name); + if (iface && iface->state == IFACE_STATE_CONFIGURED) { + ksmbd_debug(CONN, "netdev-down event: netdev(%s) is going down\n", + iface->name); + tcp_stop_kthread(iface->ksmbd_kthread); + iface->ksmbd_kthread = NULL; + mutex_lock(&iface->sock_release_lock); + tcp_destroy_socket(iface->ksmbd_socket); + iface->ksmbd_socket = NULL; + mutex_unlock(&iface->sock_release_lock); + + iface->state = IFACE_STATE_DOWN; + break; } break; } @@ -624,18 +627,6 @@ int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz) int sz = 0; if (!ifc_list_sz) { - struct net_device *netdev; - - rtnl_lock(); - for_each_netdev(&init_net, netdev) { - if (netif_is_bridge_port(netdev)) - continue; - if (!alloc_iface(kstrdup(netdev->name, KSMBD_DEFAULT_GFP))) { - rtnl_unlock(); - return -ENOMEM; - } - } - rtnl_unlock(); bind_additional_ifaces = 1; return 0; } diff --git a/fs/smb/server/transport_tcp.h b/fs/smb/server/transport_tcp.h index e338bebe322f..8c9aa624cfe3 100644 --- a/fs/smb/server/transport_tcp.h +++ b/fs/smb/server/transport_tcp.h @@ -7,6 +7,7 @@ #define __KSMBD_TRANSPORT_TCP_H__ int ksmbd_tcp_set_interfaces(char *ifc_list, int ifc_list_sz); +struct interface *ksmbd_find_netdev_name_iface_list(char *netdev_name); int ksmbd_tcp_init(void); void ksmbd_tcp_destroy(void); diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c index 40f08eac519c..6890016e1923 100644 --- a/fs/smb/server/vfs.c +++ b/fs/smb/server/vfs.c @@ -1856,13 +1856,6 @@ void ksmbd_vfs_posix_lock_wait(struct file_lock *flock) wait_event(flock->c.flc_wait, !flock->c.flc_blocker); } -int ksmbd_vfs_posix_lock_wait_timeout(struct file_lock *flock, long timeout) -{ - return wait_event_interruptible_timeout(flock->c.flc_wait, - !flock->c.flc_blocker, - timeout); -} - void ksmbd_vfs_posix_lock_unblock(struct file_lock *flock) { locks_delete_block(flock); diff --git a/fs/smb/server/vfs.h b/fs/smb/server/vfs.h index 06903024a2d8..2893f59803a6 100644 --- a/fs/smb/server/vfs.h +++ b/fs/smb/server/vfs.h @@ -140,7 +140,6 @@ int ksmbd_vfs_fill_dentry_attrs(struct ksmbd_work *work, struct dentry *dentry, struct ksmbd_kstat *ksmbd_kstat); void ksmbd_vfs_posix_lock_wait(struct file_lock *flock); -int ksmbd_vfs_posix_lock_wait_timeout(struct file_lock *flock, long timeout); void ksmbd_vfs_posix_lock_unblock(struct file_lock *flock); int ksmbd_vfs_remove_acl_xattrs(struct mnt_idmap *idmap, const struct path *path); diff --git a/fs/splice.c b/fs/splice.c index 2898fa1e9e63..28cfa63aa236 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -342,7 +342,7 @@ ssize_t copy_splice_read(struct file *in, loff_t *ppos, return -ENOMEM; pages = (struct page **)(bv + npages); - npages = alloc_pages_bulk_array(GFP_USER, npages, pages); + npages = alloc_pages_bulk(GFP_USER, npages, pages); if (!npages) { kfree(bv); return -ENOMEM; diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig index 60fc98bdf421..b1091e70434a 100644 --- a/fs/squashfs/Kconfig +++ b/fs/squashfs/Kconfig @@ -5,8 +5,8 @@ config SQUASHFS help Saying Y here includes support for SquashFS 4.0 (a Compressed Read-Only File System). Squashfs is a highly compressed read-only - filesystem for Linux. It uses zlib, lzo or xz compression to - compress both files, inodes and directories. Inodes in the system + filesystem for Linux. It uses zlib, lz4, lzo, xz or zstd compression + to compress both files, inodes and directories. Inodes in the system are very small and all blocks are packed to minimise data overhead. Block sizes greater than 4K are supported up to a maximum of 1 Mbytes (default block size 128K). SquashFS 4.0 supports 64 bit filesystems @@ -16,7 +16,7 @@ config SQUASHFS Squashfs is intended for general read-only filesystem use, for archival use (i.e. in cases where a .tar.gz file may be used), and in embedded systems where low overhead is needed. Further information - and tools are available from http://squashfs.sourceforge.net. + and tools are available from github.com/plougher/squashfs-tools. If you want to compile this as a module ( = code which can be inserted in and removed from the running kernel whenever you want), diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c index 5062326d0efb..4db0d2b0aab8 100644 --- a/fs/squashfs/cache.c +++ b/fs/squashfs/cache.c @@ -224,11 +224,15 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries, int block_size) { int i, j; - struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL); + struct squashfs_cache *cache; + if (entries == 0) + return NULL; + + cache = kzalloc(sizeof(*cache), GFP_KERNEL); if (cache == NULL) { ERROR("Failed to allocate %s cache\n", name); - return NULL; + return ERR_PTR(-ENOMEM); } cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL); @@ -281,7 +285,7 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries, cleanup: squashfs_cache_delete(cache); - return NULL; + return ERR_PTR(-ENOMEM); } diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index 21aaa96856c1..5ca2baa16dc2 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c @@ -362,29 +362,33 @@ static int read_blocklist(struct inode *inode, int index, u64 *block) return squashfs_block_size(size); } -void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail) +static bool squashfs_fill_page(struct folio *folio, + struct squashfs_cache_entry *buffer, size_t offset, + size_t avail) { - int copied; + size_t copied; void *pageaddr; - pageaddr = kmap_atomic(page); + pageaddr = kmap_local_folio(folio, 0); copied = squashfs_copy_data(pageaddr, buffer, offset, avail); memset(pageaddr + copied, 0, PAGE_SIZE - copied); - kunmap_atomic(pageaddr); + kunmap_local(pageaddr); - flush_dcache_page(page); - if (copied == avail) - SetPageUptodate(page); + flush_dcache_folio(folio); + + return copied == avail; } /* Copy data into page cache */ -void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, - int bytes, int offset) +void squashfs_copy_cache(struct folio *folio, + struct squashfs_cache_entry *buffer, size_t bytes, + size_t offset) { - struct inode *inode = page->mapping->host; + struct address_space *mapping = folio->mapping; + struct inode *inode = mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; - int start_index = page->index & ~mask, end_index = start_index | mask; + int start_index = folio->index & ~mask, end_index = start_index | mask; /* * Loop copying datablock into pages. As the datablock likely covers @@ -394,32 +398,35 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, */ for (i = start_index; i <= end_index && bytes > 0; i++, bytes -= PAGE_SIZE, offset += PAGE_SIZE) { - struct page *push_page; - int avail = buffer ? min_t(int, bytes, PAGE_SIZE) : 0; + struct folio *push_folio; + size_t avail = buffer ? min(bytes, PAGE_SIZE) : 0; + bool updated = false; - TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail); + TRACE("bytes %zu, i %d, available_bytes %zu\n", bytes, i, avail); - push_page = (i == page->index) ? page : - grab_cache_page_nowait(page->mapping, i); + push_folio = (i == folio->index) ? folio : + __filemap_get_folio(mapping, i, + FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, + mapping_gfp_mask(mapping)); - if (!push_page) + if (IS_ERR(push_folio)) continue; - if (PageUptodate(push_page)) - goto skip_page; + if (folio_test_uptodate(push_folio)) + goto skip_folio; - squashfs_fill_page(push_page, buffer, offset, avail); -skip_page: - unlock_page(push_page); - if (i != page->index) - put_page(push_page); + updated = squashfs_fill_page(push_folio, buffer, offset, avail); +skip_folio: + folio_end_read(push_folio, updated); + if (i != folio->index) + folio_put(push_folio); } } /* Read datablock stored packed inside a fragment (tail-end packed block) */ -static int squashfs_readpage_fragment(struct page *page, int expected) +static int squashfs_readpage_fragment(struct folio *folio, int expected) { - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, squashfs_i(inode)->fragment_block, squashfs_i(inode)->fragment_size); @@ -430,36 +437,34 @@ static int squashfs_readpage_fragment(struct page *page, int expected) squashfs_i(inode)->fragment_block, squashfs_i(inode)->fragment_size); else - squashfs_copy_cache(page, buffer, expected, + squashfs_copy_cache(folio, buffer, expected, squashfs_i(inode)->fragment_offset); squashfs_cache_put(buffer); return res; } -static int squashfs_readpage_sparse(struct page *page, int expected) +static int squashfs_readpage_sparse(struct folio *folio, int expected) { - squashfs_copy_cache(page, NULL, expected, 0); + squashfs_copy_cache(folio, NULL, expected, 0); return 0; } static int squashfs_read_folio(struct file *file, struct folio *folio) { - struct page *page = &folio->page; - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; - int index = page->index >> (msblk->block_log - PAGE_SHIFT); + int index = folio->index >> (msblk->block_log - PAGE_SHIFT); int file_end = i_size_read(inode) >> msblk->block_log; int expected = index == file_end ? (i_size_read(inode) & (msblk->block_size - 1)) : msblk->block_size; int res = 0; - void *pageaddr; TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n", - page->index, squashfs_i(inode)->start); + folio->index, squashfs_i(inode)->start); - if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >> + if (folio->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT)) goto out; @@ -472,23 +477,18 @@ static int squashfs_read_folio(struct file *file, struct folio *folio) goto out; if (res == 0) - res = squashfs_readpage_sparse(page, expected); + res = squashfs_readpage_sparse(folio, expected); else - res = squashfs_readpage_block(page, block, res, expected); + res = squashfs_readpage_block(folio, block, res, expected); } else - res = squashfs_readpage_fragment(page, expected); + res = squashfs_readpage_fragment(folio, expected); if (!res) return 0; out: - pageaddr = kmap_atomic(page); - memset(pageaddr, 0, PAGE_SIZE); - kunmap_atomic(pageaddr); - flush_dcache_page(page); - if (res == 0) - SetPageUptodate(page); - unlock_page(page); + folio_zero_segment(folio, 0, folio_size(folio)); + folio_end_read(folio, res == 0); return res; } diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c index 54c17b7c85fd..40e59a43d098 100644 --- a/fs/squashfs/file_cache.c +++ b/fs/squashfs/file_cache.c @@ -18,9 +18,9 @@ #include "squashfs.h" /* Read separately compressed datablock and memcopy into page cache */ -int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected) +int squashfs_readpage_block(struct folio *folio, u64 block, int bsize, int expected) { - struct inode *i = page->mapping->host; + struct inode *i = folio->mapping->host; struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, block, bsize); int res = buffer->error; @@ -29,7 +29,7 @@ int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expecte ERROR("Unable to read page, block %llx, size %x\n", block, bsize); else - squashfs_copy_cache(page, buffer, expected, 0); + squashfs_copy_cache(folio, buffer, expected, 0); squashfs_cache_put(buffer); return res; diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c index d19d4db74af8..2c3e809d6891 100644 --- a/fs/squashfs/file_direct.c +++ b/fs/squashfs/file_direct.c @@ -19,12 +19,11 @@ #include "page_actor.h" /* Read separately compressed datablock directly into page cache */ -int squashfs_readpage_block(struct page *target_page, u64 block, int bsize, - int expected) - +int squashfs_readpage_block(struct folio *folio, u64 block, int bsize, + int expected) { - struct folio *folio = page_folio(target_page); - struct inode *inode = target_page->mapping->host; + struct page *target_page = &folio->page; + struct inode *inode = folio->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; loff_t file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT; int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; @@ -48,7 +47,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize, /* Try to grab all the pages covered by the Squashfs block */ for (i = 0, index = start_index; index <= end_index; index++) { page[i] = (index == folio->index) ? target_page : - grab_cache_page_nowait(target_page->mapping, index); + grab_cache_page_nowait(folio->mapping, index); if (page[i] == NULL) continue; diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h index 5a756e6790b5..218868b20f16 100644 --- a/fs/squashfs/squashfs.h +++ b/fs/squashfs/squashfs.h @@ -14,6 +14,12 @@ #define WARNING(s, args...) pr_warn("SQUASHFS: "s, ## args) +#ifdef CONFIG_SQUASHFS_FILE_CACHE +#define SQUASHFS_READ_PAGES msblk->max_thread_num +#else +#define SQUASHFS_READ_PAGES 0 +#endif + /* block.c */ extern int squashfs_read_data(struct super_block *, u64, int, u64 *, struct squashfs_page_actor *); @@ -67,12 +73,11 @@ extern __le64 *squashfs_read_fragment_index_table(struct super_block *, u64, u64, unsigned int); /* file.c */ -void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int); -void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int, - int); +void squashfs_copy_cache(struct folio *, struct squashfs_cache_entry *, + size_t bytes, size_t offset); /* file_xxx.c */ -extern int squashfs_readpage_block(struct page *, u64, int, int); +int squashfs_readpage_block(struct folio *, u64 block, int bsize, int expected); /* id.c */ extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *); diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index 22e812808e5c..67c55fe32ce8 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -314,26 +314,29 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_flags |= SB_RDONLY; sb->s_op = &squashfs_super_ops; - err = -ENOMEM; - msblk->block_cache = squashfs_cache_init("metadata", SQUASHFS_CACHED_BLKS, SQUASHFS_METADATA_SIZE); - if (msblk->block_cache == NULL) + if (IS_ERR(msblk->block_cache)) { + err = PTR_ERR(msblk->block_cache); goto failed_mount; + } /* Allocate read_page block */ msblk->read_page = squashfs_cache_init("data", - msblk->max_thread_num, msblk->block_size); - if (msblk->read_page == NULL) { + SQUASHFS_READ_PAGES, msblk->block_size); + if (IS_ERR(msblk->read_page)) { errorf(fc, "Failed to allocate read_page block"); + err = PTR_ERR(msblk->read_page); goto failed_mount; } if (msblk->devblksize == PAGE_SIZE) { struct inode *cache = new_inode(sb); - if (cache == NULL) + if (cache == NULL) { + err = -ENOMEM; goto failed_mount; + } set_nlink(cache, 1); cache->i_size = OFFSET_MAX; @@ -405,9 +408,9 @@ handle_fragments: goto check_directory_table; msblk->fragment_cache = squashfs_cache_init("fragment", - SQUASHFS_CACHED_FRAGMENTS, msblk->block_size); - if (msblk->fragment_cache == NULL) { - err = -ENOMEM; + min(SQUASHFS_CACHED_FRAGMENTS, fragments), msblk->block_size); + if (IS_ERR(msblk->fragment_cache)) { + err = PTR_ERR(msblk->fragment_cache); goto failed_mount; } diff --git a/fs/stat.c b/fs/stat.c index 0870e969a8a0..f13308bfdc98 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -281,6 +281,8 @@ static int vfs_statx_path(struct path *path, int flags, struct kstat *stat, u32 request_mask) { int error = vfs_getattr(path, stat, request_mask, flags); + if (error) + return error; if (request_mask & STATX_MNT_ID_UNIQUE) { stat->mnt_id = real_mount(path->mnt)->mnt_id_unique; @@ -302,7 +304,7 @@ static int vfs_statx_path(struct path *path, int flags, struct kstat *stat, if (S_ISBLK(stat->mode)) bdev_statx(path, stat, request_mask); - return error; + return 0; } static int vfs_statx_fd(int fd, int flags, struct kstat *stat, @@ -725,6 +727,7 @@ cp_statx(const struct kstat *stat, struct statx __user *buffer) tmp.stx_mnt_id = stat->mnt_id; tmp.stx_dio_mem_align = stat->dio_mem_align; tmp.stx_dio_offset_align = stat->dio_offset_align; + tmp.stx_dio_read_offset_align = stat->dio_read_offset_align; tmp.stx_subvol = stat->subvol; tmp.stx_atomic_write_unit_min = stat->atomic_write_unit_min; tmp.stx_atomic_write_unit_max = stat->atomic_write_unit_max; diff --git a/fs/super.c b/fs/super.c index c9c7223bc2a2..5a7db4a556e3 100644 --- a/fs/super.c +++ b/fs/super.c @@ -647,7 +647,7 @@ void generic_shutdown_super(struct super_block *sb) */ fscrypt_destroy_keyring(sb); - if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes), + if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes), NULL, "VFS: Busy inodes after unmount of %s (%s)", sb->s_id, sb->s_type->name)) { /* diff --git a/fs/sysctls.c b/fs/sysctls.c index 8dbde9a802fa..ad429dffeb4b 100644 --- a/fs/sysctls.c +++ b/fs/sysctls.c @@ -7,7 +7,7 @@ #include <linux/init.h> #include <linux/sysctl.h> -static struct ctl_table fs_shared_sysctls[] = { +static const struct ctl_table fs_shared_sysctls[] = { { .procname = "overflowuid", .data = &fs_overflowuid, diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 785408861c01..6931308876c4 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -817,7 +817,7 @@ EXPORT_SYMBOL_GPL(sysfs_emit_at); * Returns number of bytes written to @buf. */ ssize_t sysfs_bin_attr_simple_read(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, + const struct bin_attribute *attr, char *buf, loff_t off, size_t count) { memcpy(buf, attr->private + off, count); diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c index cfc614c638da..53214499e384 100644 --- a/fs/tracefs/inode.c +++ b/fs/tracefs/inode.c @@ -457,7 +457,8 @@ static void tracefs_d_release(struct dentry *dentry) eventfs_d_release(dentry); } -static int tracefs_d_revalidate(struct dentry *dentry, unsigned int flags) +static int tracefs_d_revalidate(struct inode *inode, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { struct eventfs_inode *ei = dentry->d_fsdata; diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 5cc69beaa62e..b01f382ce8db 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -863,7 +863,6 @@ void ubifs_dump_leb(const struct ubifs_info *c, int lnum) out: vfree(buf); - return; } void ubifs_dump_znode(const struct ubifs_info *c, @@ -946,16 +945,20 @@ void ubifs_dump_tnc(struct ubifs_info *c) pr_err("\n"); pr_err("(pid %d) start dumping TNC tree\n", current->pid); - znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, NULL); - level = znode->level; - pr_err("== Level %d ==\n", level); - while (znode) { - if (level != znode->level) { - level = znode->level; - pr_err("== Level %d ==\n", level); + if (c->zroot.znode) { + znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, NULL); + level = znode->level; + pr_err("== Level %d ==\n", level); + while (znode) { + if (level != znode->level) { + level = znode->level; + pr_err("== Level %d ==\n", level); + } + ubifs_dump_znode(c, znode); + znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, znode); } - ubifs_dump_znode(c, znode); - znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, znode); + } else { + pr_err("empty TNC tree in memory\n"); } pr_err("(pid %d) finish dumping TNC tree\n", current->pid); } diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c index aa8837e6247c..f2cb214581fd 100644 --- a/fs/ubifs/lpt_commit.c +++ b/fs/ubifs/lpt_commit.c @@ -1932,7 +1932,6 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum) pr_err("(pid %d) finish dumping LEB %d\n", current->pid, lnum); out: vfree(buf); - return; } /** diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 7c0bd0b55f88..97c4d71115d8 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -36,7 +36,7 @@ static int sysctl_unprivileged_userfaultfd __read_mostly; #ifdef CONFIG_SYSCTL -static struct ctl_table vm_userfaultfd_table[] = { +static const struct ctl_table vm_userfaultfd_table[] = { { .procname = "unprivileged_userfaultfd", .data = &sysctl_unprivileged_userfaultfd, diff --git a/fs/vboxsf/Kconfig b/fs/vboxsf/Kconfig index b84586ae08b3..d4694026db8b 100644 --- a/fs/vboxsf/Kconfig +++ b/fs/vboxsf/Kconfig @@ -1,6 +1,6 @@ config VBOXSF_FS tristate "VirtualBox guest shared folder (vboxsf) support" - depends on X86 && VBOXGUEST + depends on (ARM64 || X86) && VBOXGUEST select NLS help VirtualBox hosts can share folders with guests, this driver diff --git a/fs/vboxsf/dir.c b/fs/vboxsf/dir.c index 5f1a14d5b927..a859ac9b74ba 100644 --- a/fs/vboxsf/dir.c +++ b/fs/vboxsf/dir.c @@ -192,7 +192,8 @@ const struct file_operations vboxsf_dir_fops = { * This is called during name resolution/lookup to check if the @dentry in * the cache is still valid. the job is handled by vboxsf_inode_revalidate. */ -static int vboxsf_dentry_revalidate(struct dentry *dentry, unsigned int flags) +static int vboxsf_dentry_revalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { if (flags & LOOKUP_RCU) return -ECHILD; diff --git a/fs/vboxsf/super.c b/fs/vboxsf/super.c index e95b8a48d8a0..1d94bb784108 100644 --- a/fs/vboxsf/super.c +++ b/fs/vboxsf/super.c @@ -21,7 +21,8 @@ #define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */ -static const unsigned char VBSF_MOUNT_SIGNATURE[4] = "\000\377\376\375"; +static const unsigned char VBSF_MOUNT_SIGNATURE[4] = { '\000', '\377', '\376', + '\375' }; static int follow_symlinks; module_param(follow_symlinks, int, 0444); diff --git a/fs/verity/init.c b/fs/verity/init.c index f440f0e61e3e..6e8d33b50240 100644 --- a/fs/verity/init.c +++ b/fs/verity/init.c @@ -10,7 +10,7 @@ #include <linux/ratelimit.h> #ifdef CONFIG_SYSCTL -static struct ctl_table fsverity_sysctl_table[] = { +static const struct ctl_table fsverity_sysctl_table[] = { #ifdef CONFIG_FS_VERITY_BUILTIN_SIGNATURES { .procname = "require_signatures", diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index ed9b0dabc1f1..7afa51e41427 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -51,6 +51,8 @@ xfs-y += $(addprefix libxfs/, \ xfs_rmap_btree.o \ xfs_refcount.o \ xfs_refcount_btree.o \ + xfs_rtrefcount_btree.o \ + xfs_rtrmap_btree.o \ xfs_sb.o \ xfs_symlink_remote.o \ xfs_trans_inode.o \ @@ -193,6 +195,8 @@ xfs-$(CONFIG_XFS_ONLINE_SCRUB_STATS) += scrub/stats.o xfs-$(CONFIG_XFS_RT) += $(addprefix scrub/, \ rgsuper.o \ rtbitmap.o \ + rtrefcount.o \ + rtrmap.o \ rtsummary.o \ ) @@ -232,6 +236,8 @@ xfs-y += $(addprefix scrub/, \ xfs-$(CONFIG_XFS_RT) += $(addprefix scrub/, \ rtbitmap_repair.o \ + rtrefcount_repair.o \ + rtrmap_repair.o \ rtsummary_repair.o \ ) diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c index f5d853089019..fb79215a509d 100644 --- a/fs/xfs/libxfs/xfs_ag_resv.c +++ b/fs/xfs/libxfs/xfs_ag_resv.c @@ -114,6 +114,7 @@ xfs_ag_resv_needed( case XFS_AG_RESV_RMAPBT: len -= xfs_perag_resv(pag, type)->ar_reserved; break; + case XFS_AG_RESV_METAFILE: case XFS_AG_RESV_NONE: /* empty */ break; @@ -347,6 +348,7 @@ xfs_ag_resv_alloc_extent( switch (type) { case XFS_AG_RESV_AGFL: + case XFS_AG_RESV_METAFILE: return; case XFS_AG_RESV_METADATA: case XFS_AG_RESV_RMAPBT: @@ -389,6 +391,7 @@ xfs_ag_resv_free_extent( switch (type) { case XFS_AG_RESV_AGFL: + case XFS_AG_RESV_METAFILE: return; case XFS_AG_RESV_METADATA: case XFS_AG_RESV_RMAPBT: diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c index 17875ad865f5..8c04acd30d48 100644 --- a/fs/xfs/libxfs/xfs_attr.c +++ b/fs/xfs/libxfs/xfs_attr.c @@ -1004,9 +1004,7 @@ xfs_attr_add_fork( unsigned int blks; /* space reservation */ int error; /* error return value */ - if (xfs_is_metadir_inode(ip)) - ASSERT(XFS_IS_DQDETACHED(ip)); - else + if (!xfs_is_metadir_inode(ip)) ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); blks = XFS_ADDAFORK_SPACE_RES(mp); diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 5255f93bae31..0ef19f1469ec 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -615,7 +615,7 @@ xfs_bmap_btree_to_extents( xfs_trans_binval(tp, cbp); if (cur->bc_levels[0].bp == cbp) cur->bc_levels[0].bp = NULL; - xfs_iroot_realloc(ip, -1, whichfork); + xfs_bmap_broot_realloc(ip, whichfork, 0); ASSERT(ifp->if_broot == NULL); ifp->if_format = XFS_DINODE_FMT_EXTENTS; *logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork); @@ -659,12 +659,11 @@ xfs_bmap_extents_to_btree( * Make space in the inode incore. This needs to be undone if we fail * to expand the root. */ - xfs_iroot_realloc(ip, 1, whichfork); + block = xfs_bmap_broot_realloc(ip, whichfork, 1); /* * Fill in the root. */ - block = ifp->if_broot; xfs_bmbt_init_block(ip, block, NULL, 1, 1); /* * Need a cursor. Can't allocate until bb_level is filled in. @@ -746,7 +745,7 @@ xfs_bmap_extents_to_btree( out_unreserve_dquot: xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); out_root_realloc: - xfs_iroot_realloc(ip, -1, whichfork); + xfs_bmap_broot_realloc(ip, whichfork, 0); ifp->if_format = XFS_DINODE_FMT_EXTENTS; ASSERT(ifp->if_broot == NULL); xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); @@ -1043,9 +1042,7 @@ xfs_bmap_add_attrfork( int error; /* error return value */ xfs_assert_ilocked(ip, XFS_ILOCK_EXCL); - if (xfs_is_metadir_inode(ip)) - ASSERT(XFS_IS_DQDETACHED(ip)); - else + if (!xfs_is_metadir_inode(ip)) ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); ASSERT(!xfs_inode_has_attr_fork(ip)); @@ -3566,12 +3563,12 @@ xfs_bmap_btalloc_at_eof( int error; /* - * If there are already extents in the file, try an exact EOF block - * allocation to extend the file as a contiguous extent. If that fails, - * or it's the first allocation in a file, just try for a stripe aligned - * allocation. + * If there are already extents in the file, and xfs_bmap_adjacent() has + * given a better blkno, try an exact EOF block allocation to extend the + * file as a contiguous extent. If that fails, or it's the first + * allocation in a file, just try for a stripe aligned allocation. */ - if (ap->offset) { + if (ap->eof) { xfs_extlen_t nextminlen = 0; /* @@ -3739,7 +3736,8 @@ xfs_bmap_btalloc_best_length( int error; ap->blkno = XFS_INO_TO_FSB(args->mp, ap->ip->i_ino); - xfs_bmap_adjacent(ap); + if (!xfs_bmap_adjacent(ap)) + ap->eof = false; /* * Search for an allocation group with a single extent large enough for @@ -4567,8 +4565,9 @@ xfs_bmapi_write( * the refcount btree for orphan recovery. */ if (whichfork == XFS_COW_FORK) - xfs_refcount_alloc_cow_extent(tp, bma.blkno, - bma.length); + xfs_refcount_alloc_cow_extent(tp, + XFS_IS_REALTIME_INODE(ip), + bma.blkno, bma.length); } /* Deal with the allocated space we found. */ @@ -4743,7 +4742,8 @@ xfs_bmapi_convert_one_delalloc( *seq = READ_ONCE(ifp->if_seq); if (whichfork == XFS_COW_FORK) - xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length); + xfs_refcount_alloc_cow_extent(tp, XFS_IS_REALTIME_INODE(ip), + bma.blkno, bma.length); error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags, whichfork); @@ -5391,7 +5391,7 @@ xfs_bmap_del_extent_real( bool isrt = xfs_ifork_is_realtime(ip, whichfork); if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) { - xfs_refcount_decrease_extent(tp, del); + xfs_refcount_decrease_extent(tp, isrt, del); } else if (isrt && !xfs_has_rtgroups(mp)) { error = xfs_bmap_free_rtblocks(tp, del); } else { @@ -6501,9 +6501,8 @@ xfs_get_extsz_hint( * No point in aligning allocations if we need to COW to actually * write to them. */ - if (xfs_is_always_cow_inode(ip)) - return 0; - if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize) + if (!xfs_is_always_cow_inode(ip) && + (ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize) return ip->i_extsize; if (XFS_IS_REALTIME_INODE(ip) && ip->i_mount->m_sb.sb_rextsize > 1) @@ -6526,7 +6525,13 @@ xfs_get_cowextsz_hint( a = 0; if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) a = ip->i_cowextsize; - b = xfs_get_extsz_hint(ip); + if (XFS_IS_REALTIME_INODE(ip)) { + b = 0; + if (ip->i_diflags & XFS_DIFLAG_EXTSIZE) + b = ip->i_extsize; + } else { + b = xfs_get_extsz_hint(ip); + } a = max(a, b); if (a == 0) diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c index 3464be771f95..908d7b050e9c 100644 --- a/fs/xfs/libxfs/xfs_bmap_btree.c +++ b/fs/xfs/libxfs/xfs_bmap_btree.c @@ -516,6 +516,116 @@ xfs_bmbt_keys_contiguous( be64_to_cpu(key2->bmbt.br_startoff)); } +static inline void +xfs_bmbt_move_ptrs( + struct xfs_mount *mp, + struct xfs_btree_block *broot, + short old_size, + size_t new_size, + unsigned int numrecs) +{ + void *dptr; + void *sptr; + + sptr = xfs_bmap_broot_ptr_addr(mp, broot, 1, old_size); + dptr = xfs_bmap_broot_ptr_addr(mp, broot, 1, new_size); + memmove(dptr, sptr, numrecs * sizeof(xfs_bmbt_ptr_t)); +} + +/* + * Reallocate the space for if_broot based on the number of records. Move the + * records and pointers in if_broot to fit the new size. When shrinking this + * will eliminate holes between the records and pointers created by the caller. + * When growing this will create holes to be filled in by the caller. + * + * The caller must not request to add more records than would fit in the + * on-disk inode root. If the if_broot is currently NULL, then if we are + * adding records, one will be allocated. The caller must also not request + * that the number of records go below zero, although it can go to zero. + * + * ip -- the inode whose if_broot area is changing + * whichfork -- which inode fork to change + * new_numrecs -- the new number of records requested for the if_broot array + * + * Returns the incore btree root block. + */ +struct xfs_btree_block * +xfs_bmap_broot_realloc( + struct xfs_inode *ip, + int whichfork, + unsigned int new_numrecs) +{ + struct xfs_mount *mp = ip->i_mount; + struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); + struct xfs_btree_block *broot; + unsigned int new_size; + unsigned int old_size = ifp->if_broot_bytes; + + /* + * Block mapping btrees do not support storing zero records; if this + * happens, the fork is being changed to FMT_EXTENTS. Free the broot + * and get out. + */ + if (new_numrecs == 0) + return xfs_broot_realloc(ifp, 0); + + new_size = xfs_bmap_broot_space_calc(mp, new_numrecs); + + /* Handle the nop case quietly. */ + if (new_size == old_size) + return ifp->if_broot; + + if (new_size > old_size) { + unsigned int old_numrecs; + + /* + * If there wasn't any memory allocated before, just + * allocate it now and get out. + */ + if (old_size == 0) + return xfs_broot_realloc(ifp, new_size); + + /* + * If there is already an existing if_broot, then we need + * to realloc() it and shift the pointers to their new + * location. The records don't change location because + * they are kept butted up against the btree block header. + */ + old_numrecs = xfs_bmbt_maxrecs(mp, old_size, false); + broot = xfs_broot_realloc(ifp, new_size); + ASSERT(xfs_bmap_bmdr_space(broot) <= + xfs_inode_fork_size(ip, whichfork)); + xfs_bmbt_move_ptrs(mp, broot, old_size, new_size, old_numrecs); + return broot; + } + + /* + * We're reducing, but not totally eliminating, numrecs. In this case, + * we are shrinking the if_broot buffer, so it must already exist. + */ + ASSERT(ifp->if_broot != NULL && old_size > 0 && new_size > 0); + + /* + * Shrink the btree root by moving the bmbt pointers, since they are + * not butted up against the btree block header, then reallocating + * broot. + */ + xfs_bmbt_move_ptrs(mp, ifp->if_broot, old_size, new_size, new_numrecs); + broot = xfs_broot_realloc(ifp, new_size); + ASSERT(xfs_bmap_bmdr_space(broot) <= + xfs_inode_fork_size(ip, whichfork)); + return broot; +} + +static struct xfs_btree_block * +xfs_bmbt_broot_realloc( + struct xfs_btree_cur *cur, + unsigned int new_numrecs) +{ + return xfs_bmap_broot_realloc(cur->bc_ino.ip, cur->bc_ino.whichfork, + new_numrecs); +} + const struct xfs_btree_ops xfs_bmbt_ops = { .name = "bmap", .type = XFS_BTREE_TYPE_INODE, @@ -543,6 +653,7 @@ const struct xfs_btree_ops xfs_bmbt_ops = { .keys_inorder = xfs_bmbt_keys_inorder, .recs_inorder = xfs_bmbt_recs_inorder, .keys_contiguous = xfs_bmbt_keys_contiguous, + .broot_realloc = xfs_bmbt_broot_realloc, }; /* diff --git a/fs/xfs/libxfs/xfs_bmap_btree.h b/fs/xfs/libxfs/xfs_bmap_btree.h index 49a3bae3f6ec..b238d559ab03 100644 --- a/fs/xfs/libxfs/xfs_bmap_btree.h +++ b/fs/xfs/libxfs/xfs_bmap_btree.h @@ -198,4 +198,7 @@ xfs_bmap_bmdr_space(struct xfs_btree_block *bb) return xfs_bmdr_space_calc(be16_to_cpu(bb->bb_numrecs)); } +struct xfs_btree_block *xfs_bmap_broot_realloc(struct xfs_inode *ip, + int whichfork, unsigned int new_numrecs); + #endif /* __XFS_BMAP_BTREE_H__ */ diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c index 68ee1c299c25..299ce7fd11b0 100644 --- a/fs/xfs/libxfs/xfs_btree.c +++ b/fs/xfs/libxfs/xfs_btree.c @@ -30,6 +30,12 @@ #include "xfs_health.h" #include "xfs_buf_mem.h" #include "xfs_btree_mem.h" +#include "xfs_rtrmap_btree.h" +#include "xfs_bmap.h" +#include "xfs_rmap.h" +#include "xfs_quota.h" +#include "xfs_metafile.h" +#include "xfs_rtrefcount_btree.h" /* * Btree magic numbers. @@ -1537,12 +1543,16 @@ xfs_btree_log_recs( int first, int last) { + if (!bp) { + xfs_trans_log_inode(cur->bc_tp, cur->bc_ino.ip, + xfs_ilog_fbroot(cur->bc_ino.whichfork)); + return; + } xfs_trans_buf_set_type(cur->bc_tp, bp, XFS_BLFT_BTREE_BUF); xfs_trans_log_buf(cur->bc_tp, bp, xfs_btree_rec_offset(cur, first), xfs_btree_rec_offset(cur, last + 1) - 1); - } /* @@ -3078,6 +3088,131 @@ xfs_btree_split( #define xfs_btree_split __xfs_btree_split #endif /* __KERNEL__ */ +/* Move the records from a root leaf block to a separate block. */ +STATIC void +xfs_btree_promote_leaf_iroot( + struct xfs_btree_cur *cur, + struct xfs_btree_block *block, + struct xfs_buf *cbp, + union xfs_btree_ptr *cptr, + struct xfs_btree_block *cblock) +{ + union xfs_btree_rec *rp; + union xfs_btree_rec *crp; + union xfs_btree_key *kp; + union xfs_btree_ptr *pp; + struct xfs_btree_block *broot; + int numrecs = xfs_btree_get_numrecs(block); + + /* Copy the records from the leaf broot into the new child block. */ + rp = xfs_btree_rec_addr(cur, 1, block); + crp = xfs_btree_rec_addr(cur, 1, cblock); + xfs_btree_copy_recs(cur, crp, rp, numrecs); + + /* + * Increment the tree height. + * + * Trickery here: The amount of memory that we need per record for the + * ifork's btree root block may change when we convert the broot from a + * leaf to a node block. Free the existing leaf broot so that nobody + * thinks we need to migrate node pointers when we realloc the broot + * buffer after bumping nlevels. + */ + cur->bc_ops->broot_realloc(cur, 0); + cur->bc_nlevels++; + cur->bc_levels[1].ptr = 1; + + /* + * Allocate a new node broot and initialize it to point to the new + * child block. + */ + broot = cur->bc_ops->broot_realloc(cur, 1); + xfs_btree_init_block(cur->bc_mp, broot, cur->bc_ops, + cur->bc_nlevels - 1, 1, cur->bc_ino.ip->i_ino); + + pp = xfs_btree_ptr_addr(cur, 1, broot); + kp = xfs_btree_key_addr(cur, 1, broot); + xfs_btree_copy_ptrs(cur, pp, cptr, 1); + xfs_btree_get_keys(cur, cblock, kp); + + /* Attach the new block to the cursor and log it. */ + xfs_btree_setbuf(cur, 0, cbp); + xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS); + xfs_btree_log_recs(cur, cbp, 1, numrecs); +} + +/* + * Move the keys and pointers from a root block to a separate block. + * + * Since the keyptr size does not change, all we have to do is increase the + * tree height, copy the keyptrs to the new internal node (cblock), shrink + * the root, and copy the pointers there. + */ +STATIC int +xfs_btree_promote_node_iroot( + struct xfs_btree_cur *cur, + struct xfs_btree_block *block, + int level, + struct xfs_buf *cbp, + union xfs_btree_ptr *cptr, + struct xfs_btree_block *cblock) +{ + union xfs_btree_key *ckp; + union xfs_btree_key *kp; + union xfs_btree_ptr *cpp; + union xfs_btree_ptr *pp; + int i; + int error; + int numrecs = xfs_btree_get_numrecs(block); + + /* + * Increase tree height, adjusting the root block level to match. + * We cannot change the root btree node size until we've copied the + * block contents to the new child block. + */ + be16_add_cpu(&block->bb_level, 1); + cur->bc_nlevels++; + cur->bc_levels[level + 1].ptr = 1; + + /* + * Adjust the root btree record count, then copy the keys from the old + * root to the new child block. + */ + xfs_btree_set_numrecs(block, 1); + kp = xfs_btree_key_addr(cur, 1, block); + ckp = xfs_btree_key_addr(cur, 1, cblock); + xfs_btree_copy_keys(cur, ckp, kp, numrecs); + + /* Check the pointers and copy them to the new child block. */ + pp = xfs_btree_ptr_addr(cur, 1, block); + cpp = xfs_btree_ptr_addr(cur, 1, cblock); + for (i = 0; i < numrecs; i++) { + error = xfs_btree_debug_check_ptr(cur, pp, i, level); + if (error) + return error; + } + xfs_btree_copy_ptrs(cur, cpp, pp, numrecs); + + /* + * Set the first keyptr to point to the new child block, then shrink + * the memory buffer for the root block. + */ + error = xfs_btree_debug_check_ptr(cur, cptr, 0, level); + if (error) + return error; + xfs_btree_copy_ptrs(cur, pp, cptr, 1); + xfs_btree_get_keys(cur, cblock, kp); + + cur->bc_ops->broot_realloc(cur, 1); + + /* Attach the new block to the cursor and log it. */ + xfs_btree_setbuf(cur, level, cbp); + xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS); + xfs_btree_log_keys(cur, cbp, 1, numrecs); + xfs_btree_log_ptrs(cur, cbp, 1, numrecs); + return 0; +} + /* * Copy the old inode root contents into a real block and make the * broot point to it. @@ -3091,14 +3226,10 @@ xfs_btree_new_iroot( struct xfs_buf *cbp; /* buffer for cblock */ struct xfs_btree_block *block; /* btree block */ struct xfs_btree_block *cblock; /* child btree block */ - union xfs_btree_key *ckp; /* child key pointer */ - union xfs_btree_ptr *cpp; /* child ptr pointer */ - union xfs_btree_key *kp; /* pointer to btree key */ - union xfs_btree_ptr *pp; /* pointer to block addr */ + union xfs_btree_ptr aptr; union xfs_btree_ptr nptr; /* new block addr */ int level; /* btree level */ int error; /* error return code */ - int i; /* loop counter */ XFS_BTREE_STATS_INC(cur, newroot); @@ -3107,10 +3238,15 @@ xfs_btree_new_iroot( level = cur->bc_nlevels - 1; block = xfs_btree_get_iroot(cur); - pp = xfs_btree_ptr_addr(cur, 1, block); + ASSERT(level > 0 || (cur->bc_ops->geom_flags & XFS_BTGEO_IROOT_RECORDS)); + if (level > 0) + aptr = *xfs_btree_ptr_addr(cur, 1, block); + else + aptr.l = cpu_to_be64(XFS_INO_TO_FSB(cur->bc_mp, + cur->bc_ino.ip->i_ino)); /* Allocate the new block. If we can't do it, we're toast. Give up. */ - error = xfs_btree_alloc_block(cur, pp, &nptr, stat); + error = xfs_btree_alloc_block(cur, &aptr, &nptr, stat); if (error) goto error0; if (*stat == 0) @@ -3136,47 +3272,16 @@ xfs_btree_new_iroot( cblock->bb_u.s.bb_blkno = bno; } - be16_add_cpu(&block->bb_level, 1); - xfs_btree_set_numrecs(block, 1); - cur->bc_nlevels++; - ASSERT(cur->bc_nlevels <= cur->bc_maxlevels); - cur->bc_levels[level + 1].ptr = 1; - - kp = xfs_btree_key_addr(cur, 1, block); - ckp = xfs_btree_key_addr(cur, 1, cblock); - xfs_btree_copy_keys(cur, ckp, kp, xfs_btree_get_numrecs(cblock)); - - cpp = xfs_btree_ptr_addr(cur, 1, cblock); - for (i = 0; i < be16_to_cpu(cblock->bb_numrecs); i++) { - error = xfs_btree_debug_check_ptr(cur, pp, i, level); + if (level > 0) { + error = xfs_btree_promote_node_iroot(cur, block, level, cbp, + &nptr, cblock); if (error) goto error0; + } else { + xfs_btree_promote_leaf_iroot(cur, block, cbp, &nptr, cblock); } - xfs_btree_copy_ptrs(cur, cpp, pp, xfs_btree_get_numrecs(cblock)); - - error = xfs_btree_debug_check_ptr(cur, &nptr, 0, level); - if (error) - goto error0; - - xfs_btree_copy_ptrs(cur, pp, &nptr, 1); - - xfs_iroot_realloc(cur->bc_ino.ip, - 1 - xfs_btree_get_numrecs(cblock), - cur->bc_ino.whichfork); - - xfs_btree_setbuf(cur, level, cbp); - - /* - * Do all this logging at the end so that - * the root is at the right level. - */ - xfs_btree_log_block(cur, cbp, XFS_BB_ALL_BITS); - xfs_btree_log_keys(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs)); - xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs)); - - *logflags |= - XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork); + *logflags |= XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork); *stat = 1; return 0; error0: @@ -3347,7 +3452,7 @@ xfs_btree_make_block_unfull( if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) { /* A root block that can be made bigger. */ - xfs_iroot_realloc(ip, 1, cur->bc_ino.whichfork); + cur->bc_ops->broot_realloc(cur, numrecs + 1); *stat = 1; } else { /* A root block that needs replacing */ @@ -3693,6 +3798,97 @@ error0: return error; } +/* Move the records from a child leaf block to the root block. */ +STATIC void +xfs_btree_demote_leaf_child( + struct xfs_btree_cur *cur, + struct xfs_btree_block *cblock, + int numrecs) +{ + union xfs_btree_rec *rp; + union xfs_btree_rec *crp; + struct xfs_btree_block *broot; + + /* + * Decrease the tree height. + * + * Trickery here: The amount of memory that we need per record for the + * ifork's btree root block may change when we convert the broot from a + * node to a leaf. Free the old node broot so that we can get a fresh + * leaf broot. + */ + cur->bc_ops->broot_realloc(cur, 0); + cur->bc_nlevels--; + + /* + * Allocate a new leaf broot and copy the records from the old child. + * Detach the old child from the cursor. + */ + broot = cur->bc_ops->broot_realloc(cur, numrecs); + xfs_btree_init_block(cur->bc_mp, broot, cur->bc_ops, 0, numrecs, + cur->bc_ino.ip->i_ino); + + rp = xfs_btree_rec_addr(cur, 1, broot); + crp = xfs_btree_rec_addr(cur, 1, cblock); + xfs_btree_copy_recs(cur, rp, crp, numrecs); + + cur->bc_levels[0].bp = NULL; +} + +/* + * Move the keyptrs from a child node block to the root block. + * + * Since the keyptr size does not change, all we have to do is increase the + * tree height, copy the keyptrs to the new internal node (cblock), shrink + * the root, and copy the pointers there. + */ +STATIC int +xfs_btree_demote_node_child( + struct xfs_btree_cur *cur, + struct xfs_btree_block *cblock, + int level, + int numrecs) +{ + struct xfs_btree_block *block; + union xfs_btree_key *ckp; + union xfs_btree_key *kp; + union xfs_btree_ptr *cpp; + union xfs_btree_ptr *pp; + int i; + int error; + + /* + * Adjust the root btree node size and the record count to match the + * doomed child so that we can copy the keyptrs ahead of changing the + * tree shape. + */ + block = cur->bc_ops->broot_realloc(cur, numrecs); + + xfs_btree_set_numrecs(block, numrecs); + ASSERT(block->bb_numrecs == cblock->bb_numrecs); + + /* Copy keys from the doomed block. */ + kp = xfs_btree_key_addr(cur, 1, block); + ckp = xfs_btree_key_addr(cur, 1, cblock); + xfs_btree_copy_keys(cur, kp, ckp, numrecs); + + /* Copy pointers from the doomed block. */ + pp = xfs_btree_ptr_addr(cur, 1, block); + cpp = xfs_btree_ptr_addr(cur, 1, cblock); + for (i = 0; i < numrecs; i++) { + error = xfs_btree_debug_check_ptr(cur, cpp, i, level - 1); + if (error) + return error; + } + xfs_btree_copy_ptrs(cur, pp, cpp, numrecs); + + /* Decrease tree height, adjusting the root block level to match. */ + cur->bc_levels[level - 1].bp = NULL; + be16_add_cpu(&block->bb_level, -1); + cur->bc_nlevels--; + return 0; +} + /* * Try to merge a non-leaf block back into the inode root. * @@ -3705,34 +3901,31 @@ STATIC int xfs_btree_kill_iroot( struct xfs_btree_cur *cur) { - int whichfork = cur->bc_ino.whichfork; struct xfs_inode *ip = cur->bc_ino.ip; - struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); struct xfs_btree_block *block; struct xfs_btree_block *cblock; - union xfs_btree_key *kp; - union xfs_btree_key *ckp; - union xfs_btree_ptr *pp; - union xfs_btree_ptr *cpp; struct xfs_buf *cbp; int level; - int index; int numrecs; int error; #ifdef DEBUG union xfs_btree_ptr ptr; #endif - int i; ASSERT(cur->bc_ops->type == XFS_BTREE_TYPE_INODE); - ASSERT(cur->bc_nlevels > 1); + ASSERT((cur->bc_ops->geom_flags & XFS_BTGEO_IROOT_RECORDS) || + cur->bc_nlevels > 1); /* * Don't deal with the root block needs to be a leaf case. * We're just going to turn the thing back into extents anyway. */ level = cur->bc_nlevels - 1; - if (level == 1) + if (level == 1 && !(cur->bc_ops->geom_flags & XFS_BTGEO_IROOT_RECORDS)) + goto out0; + + /* If we're already a leaf, jump out. */ + if (level == 0) goto out0; /* @@ -3762,40 +3955,20 @@ xfs_btree_kill_iroot( ASSERT(xfs_btree_ptr_is_null(cur, &ptr)); #endif - index = numrecs - cur->bc_ops->get_maxrecs(cur, level); - if (index) { - xfs_iroot_realloc(cur->bc_ino.ip, index, - cur->bc_ino.whichfork); - block = ifp->if_broot; - } - - be16_add_cpu(&block->bb_numrecs, index); - ASSERT(block->bb_numrecs == cblock->bb_numrecs); - - kp = xfs_btree_key_addr(cur, 1, block); - ckp = xfs_btree_key_addr(cur, 1, cblock); - xfs_btree_copy_keys(cur, kp, ckp, numrecs); - - pp = xfs_btree_ptr_addr(cur, 1, block); - cpp = xfs_btree_ptr_addr(cur, 1, cblock); - - for (i = 0; i < numrecs; i++) { - error = xfs_btree_debug_check_ptr(cur, cpp, i, level - 1); + if (level > 1) { + error = xfs_btree_demote_node_child(cur, cblock, level, + numrecs); if (error) return error; - } - - xfs_btree_copy_ptrs(cur, pp, cpp, numrecs); + } else + xfs_btree_demote_leaf_child(cur, cblock, numrecs); error = xfs_btree_free_block(cur, cbp); if (error) return error; - cur->bc_levels[level - 1].bp = NULL; - be16_add_cpu(&block->bb_level, -1); xfs_trans_log_inode(cur->bc_tp, ip, XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_ino.whichfork)); - cur->bc_nlevels--; out0: return 0; } @@ -3949,10 +4122,10 @@ xfs_btree_delrec( /* * We're at the root level. First, shrink the root block in-memory. * Try to get rid of the next level down. If we can't then there's - * nothing left to do. + * nothing left to do. numrecs was decremented above. */ if (xfs_btree_at_iroot(cur, level)) { - xfs_iroot_realloc(cur->bc_ino.ip, -1, cur->bc_ino.whichfork); + cur->bc_ops->broot_realloc(cur, numrecs); error = xfs_btree_kill_iroot(cur); if (error) @@ -5360,6 +5533,12 @@ xfs_btree_init_cur_caches(void) error = xfs_refcountbt_init_cur_cache(); if (error) goto err; + error = xfs_rtrmapbt_init_cur_cache(); + if (error) + goto err; + error = xfs_rtrefcountbt_init_cur_cache(); + if (error) + goto err; return 0; err: @@ -5376,6 +5555,8 @@ xfs_btree_destroy_cur_caches(void) xfs_bmbt_destroy_cur_cache(); xfs_rmapbt_destroy_cur_cache(); xfs_refcountbt_destroy_cur_cache(); + xfs_rtrmapbt_destroy_cur_cache(); + xfs_rtrefcountbt_destroy_cur_cache(); } /* Move the btree cursor before the first record. */ @@ -5404,3 +5585,67 @@ xfs_btree_goto_left_edge( return 0; } + +/* Allocate a block for an inode-rooted metadata btree. */ +int +xfs_btree_alloc_metafile_block( + struct xfs_btree_cur *cur, + const union xfs_btree_ptr *start, + union xfs_btree_ptr *new, + int *stat) +{ + struct xfs_alloc_arg args = { + .mp = cur->bc_mp, + .tp = cur->bc_tp, + .resv = XFS_AG_RESV_METAFILE, + .minlen = 1, + .maxlen = 1, + .prod = 1, + }; + struct xfs_inode *ip = cur->bc_ino.ip; + int error; + + ASSERT(xfs_is_metadir_inode(ip)); + + xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, cur->bc_ino.whichfork); + error = xfs_alloc_vextent_start_ag(&args, + XFS_INO_TO_FSB(cur->bc_mp, ip->i_ino)); + if (error) + return error; + if (args.fsbno == NULLFSBLOCK) { + *stat = 0; + return 0; + } + ASSERT(args.len == 1); + + xfs_metafile_resv_alloc_space(ip, &args); + + new->l = cpu_to_be64(args.fsbno); + *stat = 1; + return 0; +} + +/* Free a block from an inode-rooted metadata btree. */ +int +xfs_btree_free_metafile_block( + struct xfs_btree_cur *cur, + struct xfs_buf *bp) +{ + struct xfs_owner_info oinfo; + struct xfs_mount *mp = cur->bc_mp; + struct xfs_inode *ip = cur->bc_ino.ip; + struct xfs_trans *tp = cur->bc_tp; + xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp)); + int error; + + ASSERT(xfs_is_metadir_inode(ip)); + + xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork); + error = xfs_free_extent_later(tp, fsbno, 1, &oinfo, XFS_AG_RESV_METAFILE, + 0); + if (error) + return error; + + xfs_metafile_resv_free_space(ip, tp, 1); + return 0; +} diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h index c5bff273cae2..355b304696e6 100644 --- a/fs/xfs/libxfs/xfs_btree.h +++ b/fs/xfs/libxfs/xfs_btree.h @@ -135,7 +135,7 @@ struct xfs_btree_ops { /* offset of btree stats array */ unsigned int statoff; - /* sick mask for health reporting (only for XFS_BTREE_TYPE_AG) */ + /* sick mask for health reporting (not for bmap btrees) */ unsigned int sick_mask; /* cursor operations */ @@ -213,11 +213,27 @@ struct xfs_btree_ops { const union xfs_btree_key *key1, const union xfs_btree_key *key2, const union xfs_btree_key *mask); + + /* + * Reallocate the space for if_broot to fit the number of records. + * Move the records and pointers in if_broot to fit the new size. When + * shrinking this will eliminate holes between the records and pointers + * created by the caller. When growing this will create holes to be + * filled in by the caller. + * + * The caller must not request to add more records than would fit in + * the on-disk inode root. If the if_broot is currently NULL, then if + * we are adding records, one will be allocated. The caller must also + * not request that the number of records go below zero, although it + * can go to zero. + */ + struct xfs_btree_block *(*broot_realloc)(struct xfs_btree_cur *cur, + unsigned int new_numrecs); }; /* btree geometry flags */ #define XFS_BTGEO_OVERLAPPING (1U << 0) /* overlapping intervals */ - +#define XFS_BTGEO_IROOT_RECORDS (1U << 1) /* iroot can store records */ union xfs_btree_irec { struct xfs_alloc_rec_incore a; @@ -281,7 +297,7 @@ struct xfs_btree_cur struct { unsigned int nr_ops; /* # record updates */ unsigned int shape_changes; /* # of extent splits */ - } bc_refc; /* refcountbt */ + } bc_refc; /* refcountbt/rtrefcountbt */ }; /* Must be at the end of the struct! */ @@ -687,4 +703,10 @@ xfs_btree_at_iroot( level == cur->bc_nlevels - 1; } +int xfs_btree_alloc_metafile_block(struct xfs_btree_cur *cur, + const union xfs_btree_ptr *start, union xfs_btree_ptr *newp, + int *stat); +int xfs_btree_free_metafile_block(struct xfs_btree_cur *cur, + struct xfs_buf *bp); + #endif /* __XFS_BTREE_H__ */ diff --git a/fs/xfs/libxfs/xfs_btree_mem.c b/fs/xfs/libxfs/xfs_btree_mem.c index df3d613675a1..f2f7b4305413 100644 --- a/fs/xfs/libxfs/xfs_btree_mem.c +++ b/fs/xfs/libxfs/xfs_btree_mem.c @@ -18,6 +18,7 @@ #include "xfs_ag.h" #include "xfs_buf_item.h" #include "xfs_trace.h" +#include "xfs_rtgroup.h" /* Set the root of an in-memory btree. */ void diff --git a/fs/xfs/libxfs/xfs_btree_staging.c b/fs/xfs/libxfs/xfs_btree_staging.c index 694929703152..5ed84f9cc877 100644 --- a/fs/xfs/libxfs/xfs_btree_staging.c +++ b/fs/xfs/libxfs/xfs_btree_staging.c @@ -134,6 +134,7 @@ xfs_btree_stage_ifakeroot( cur->bc_ino.ifake = ifake; cur->bc_nlevels = ifake->if_levels; cur->bc_ino.forksize = ifake->if_fork_size; + cur->bc_ino.whichfork = XFS_STAGING_FORK; cur->bc_flags |= XFS_BTREE_STAGING; } @@ -573,6 +574,7 @@ xfs_btree_bload_compute_geometry( struct xfs_btree_bload *bbl, uint64_t nr_records) { + const struct xfs_btree_ops *ops = cur->bc_ops; uint64_t nr_blocks = 0; uint64_t nr_this_level; @@ -599,7 +601,7 @@ xfs_btree_bload_compute_geometry( xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level, &avg_per_block, &level_blocks, &dontcare64); - if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) { + if (ops->type == XFS_BTREE_TYPE_INODE) { /* * If all the items we want to store at this level * would fit in the inode root block, then we have our @@ -607,7 +609,9 @@ xfs_btree_bload_compute_geometry( * * Note that bmap btrees forbid records in the root. */ - if (level != 0 && nr_this_level <= avg_per_block) { + if ((level != 0 || + (ops->geom_flags & XFS_BTGEO_IROOT_RECORDS)) && + nr_this_level <= avg_per_block) { nr_blocks++; break; } @@ -658,7 +662,7 @@ xfs_btree_bload_compute_geometry( return -EOVERFLOW; bbl->btree_height = cur->bc_nlevels; - if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) + if (ops->type == XFS_BTREE_TYPE_INODE) bbl->nr_blocks = nr_blocks - 1; else bbl->nr_blocks = nr_blocks; diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h index ec51b8465e61..9effd95ddcd4 100644 --- a/fs/xfs/libxfs/xfs_defer.h +++ b/fs/xfs/libxfs/xfs_defer.h @@ -68,7 +68,9 @@ struct xfs_defer_op_type { extern const struct xfs_defer_op_type xfs_bmap_update_defer_type; extern const struct xfs_defer_op_type xfs_refcount_update_defer_type; +extern const struct xfs_defer_op_type xfs_rtrefcount_update_defer_type; extern const struct xfs_defer_op_type xfs_rmap_update_defer_type; +extern const struct xfs_defer_op_type xfs_rtrmap_update_defer_type; extern const struct xfs_defer_op_type xfs_extent_free_defer_type; extern const struct xfs_defer_op_type xfs_agfl_free_defer_type; extern const struct xfs_defer_op_type xfs_rtextent_free_defer_type; diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c index 202468223bf9..1775abcfa04d 100644 --- a/fs/xfs/libxfs/xfs_dir2.c +++ b/fs/xfs/libxfs/xfs_dir2.c @@ -197,7 +197,7 @@ xfs_da_unmount( /* * Return 1 if directory contains only "." and "..". */ -int +static bool xfs_dir_isempty( xfs_inode_t *dp) { @@ -205,9 +205,9 @@ xfs_dir_isempty( ASSERT(S_ISDIR(VFS_I(dp)->i_mode)); if (dp->i_disk_size == 0) /* might happen during shutdown. */ - return 1; + return true; if (dp->i_disk_size > xfs_inode_data_fork_size(dp)) - return 0; + return false; sfp = dp->i_df.if_data; return !sfp->count; } @@ -379,12 +379,11 @@ xfs_dir_cilookup_result( !(args->op_flags & XFS_DA_OP_CILOOKUP)) return -EEXIST; - args->value = kmalloc(len, + args->value = kmemdup(name, len, GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_RETRY_MAYFAIL); if (!args->value) return -ENOMEM; - memcpy(args->value, name, len); args->valuelen = len; return -EEXIST; } diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h index 576068ed81fa..a6594a5a941d 100644 --- a/fs/xfs/libxfs/xfs_dir2.h +++ b/fs/xfs/libxfs/xfs_dir2.h @@ -58,7 +58,6 @@ extern void xfs_dir_startup(void); extern int xfs_da_mount(struct xfs_mount *mp); extern void xfs_da_unmount(struct xfs_mount *mp); -extern int xfs_dir_isempty(struct xfs_inode *dp); extern int xfs_dir_init(struct xfs_trans *tp, struct xfs_inode *dp, struct xfs_inode *pdp); extern int xfs_dir_createname(struct xfs_trans *tp, struct xfs_inode *dp, diff --git a/fs/xfs/libxfs/xfs_errortag.h b/fs/xfs/libxfs/xfs_errortag.h index 7002d7676a78..a53c5d40e084 100644 --- a/fs/xfs/libxfs/xfs_errortag.h +++ b/fs/xfs/libxfs/xfs_errortag.h @@ -64,7 +64,8 @@ #define XFS_ERRTAG_WB_DELAY_MS 42 #define XFS_ERRTAG_WRITE_DELAY_MS 43 #define XFS_ERRTAG_EXCHMAPS_FINISH_ONE 44 -#define XFS_ERRTAG_MAX 45 +#define XFS_ERRTAG_METAFILE_RESV_CRITICAL 45 +#define XFS_ERRTAG_MAX 46 /* * Random factors for above tags, 1 means always, 2 means 1/2 time, etc. @@ -113,5 +114,6 @@ #define XFS_RANDOM_WB_DELAY_MS 3000 #define XFS_RANDOM_WRITE_DELAY_MS 3000 #define XFS_RANDOM_EXCHMAPS_FINISH_ONE 1 +#define XFS_RANDOM_METAFILE_RESV_CRITICAL 4 #endif /* __XFS_ERRORTAG_H_ */ diff --git a/fs/xfs/libxfs/xfs_exchmaps.c b/fs/xfs/libxfs/xfs_exchmaps.c index 2021396651de..3f1d6a98c118 100644 --- a/fs/xfs/libxfs/xfs_exchmaps.c +++ b/fs/xfs/libxfs/xfs_exchmaps.c @@ -662,7 +662,9 @@ xfs_exchmaps_rmapbt_blocks( if (!xfs_has_rmapbt(mp)) return 0; if (XFS_IS_REALTIME_INODE(req->ip1)) - return 0; + return howmany_64(req->nr_exchanges, + XFS_MAX_CONTIG_RTRMAPS_PER_BLOCK(mp)) * + XFS_RTRMAPADD_SPACE_RES(mp); return howmany_64(req->nr_exchanges, XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp)) * diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h index 4d47a3e723aa..b1007fb661ba 100644 --- a/fs/xfs/libxfs/xfs_format.h +++ b/fs/xfs/libxfs/xfs_format.h @@ -857,6 +857,8 @@ enum xfs_metafile_type { XFS_METAFILE_PRJQUOTA, /* project quota */ XFS_METAFILE_RTBITMAP, /* rt bitmap */ XFS_METAFILE_RTSUMMARY, /* rt summary */ + XFS_METAFILE_RTRMAP, /* rt rmap */ + XFS_METAFILE_RTREFCOUNT, /* rt refcount */ XFS_METAFILE_MAX } __packed; @@ -868,7 +870,9 @@ enum xfs_metafile_type { { XFS_METAFILE_GRPQUOTA, "grpquota" }, \ { XFS_METAFILE_PRJQUOTA, "prjquota" }, \ { XFS_METAFILE_RTBITMAP, "rtbitmap" }, \ - { XFS_METAFILE_RTSUMMARY, "rtsummary" } + { XFS_METAFILE_RTSUMMARY, "rtsummary" }, \ + { XFS_METAFILE_RTRMAP, "rtrmap" }, \ + { XFS_METAFILE_RTREFCOUNT, "rtrefcount" } /* * On-disk inode structure. @@ -997,7 +1001,8 @@ enum xfs_dinode_fmt { XFS_DINODE_FMT_LOCAL, /* bulk data */ XFS_DINODE_FMT_EXTENTS, /* struct xfs_bmbt_rec */ XFS_DINODE_FMT_BTREE, /* struct xfs_bmdr_block */ - XFS_DINODE_FMT_UUID /* added long ago, but never used */ + XFS_DINODE_FMT_UUID, /* added long ago, but never used */ + XFS_DINODE_FMT_META_BTREE, /* metadata btree */ }; #define XFS_INODE_FORMAT_STR \ @@ -1005,7 +1010,8 @@ enum xfs_dinode_fmt { { XFS_DINODE_FMT_LOCAL, "local" }, \ { XFS_DINODE_FMT_EXTENTS, "extent" }, \ { XFS_DINODE_FMT_BTREE, "btree" }, \ - { XFS_DINODE_FMT_UUID, "uuid" } + { XFS_DINODE_FMT_UUID, "uuid" }, \ + { XFS_DINODE_FMT_META_BTREE, "meta_btree" } /* * Max values for extnum and aextnum. @@ -1726,6 +1732,24 @@ typedef __be32 xfs_rmap_ptr_t; XFS_IBT_BLOCK(mp) + 1) /* + * Realtime Reverse mapping btree format definitions + * + * This is a btree for reverse mapping records for realtime volumes + */ +#define XFS_RTRMAP_CRC_MAGIC 0x4d415052 /* 'MAPR' */ + +/* + * rtrmap root header, on-disk form only. + */ +struct xfs_rtrmap_root { + __be16 bb_level; /* 0 is a leaf */ + __be16 bb_numrecs; /* current # of data records */ +}; + +/* inode-based btree pointer type */ +typedef __be64 xfs_rtrmap_ptr_t; + +/* * Reference Count Btree format definitions * */ @@ -1768,12 +1792,29 @@ struct xfs_refcount_key { __be32 rc_startblock; /* starting block number */ }; -#define MAXREFCOUNT ((xfs_nlink_t)~0U) -#define MAXREFCEXTLEN ((xfs_extlen_t)~0U) +#define XFS_REFC_REFCOUNT_MAX ((xfs_nlink_t)~0U) +#define XFS_REFC_LEN_MAX ((xfs_extlen_t)~0U) /* btree pointer type */ typedef __be32 xfs_refcount_ptr_t; +/* + * Realtime Reference Count btree format definitions + * + * This is a btree for reference count records for realtime volumes + */ +#define XFS_RTREFC_CRC_MAGIC 0x52434e54 /* 'RCNT' */ + +/* + * rt refcount root header, on-disk form only. + */ +struct xfs_rtrefcount_root { + __be16 bb_level; /* 0 is a leaf */ + __be16 bb_numrecs; /* current # of data records */ +}; + +/* inode-rooted btree pointer type */ +typedef __be64 xfs_rtrefcount_ptr_t; /* * BMAP Btree format definitions diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h index 41ce4d3d650e..2c3171262b44 100644 --- a/fs/xfs/libxfs/xfs_fs.h +++ b/fs/xfs/libxfs/xfs_fs.h @@ -737,9 +737,11 @@ struct xfs_scrub_metadata { #define XFS_SCRUB_TYPE_DIRTREE 28 /* directory tree structure */ #define XFS_SCRUB_TYPE_METAPATH 29 /* metadata directory tree paths */ #define XFS_SCRUB_TYPE_RGSUPER 30 /* realtime superblock */ +#define XFS_SCRUB_TYPE_RTRMAPBT 31 /* rtgroup reverse mapping btree */ +#define XFS_SCRUB_TYPE_RTREFCBT 32 /* realtime reference count btree */ /* Number of scrub subcommands. */ -#define XFS_SCRUB_TYPE_NR 31 +#define XFS_SCRUB_TYPE_NR 33 /* * This special type code only applies to the vectored scrub implementation. @@ -829,9 +831,11 @@ struct xfs_scrub_vec_head { #define XFS_SCRUB_METAPATH_USRQUOTA (5) /* user quota */ #define XFS_SCRUB_METAPATH_GRPQUOTA (6) /* group quota */ #define XFS_SCRUB_METAPATH_PRJQUOTA (7) /* project quota */ +#define XFS_SCRUB_METAPATH_RTRMAPBT (8) /* realtime reverse mapping */ +#define XFS_SCRUB_METAPATH_RTREFCOUNTBT (9) /* realtime refcount */ /* Number of metapath sm_ino values */ -#define XFS_SCRUB_METAPATH_NR (8) +#define XFS_SCRUB_METAPATH_NR (10) /* * ioctl limits @@ -993,6 +997,8 @@ struct xfs_rtgroup_geometry { #define XFS_RTGROUP_GEOM_SICK_SUPER (1U << 0) /* superblock */ #define XFS_RTGROUP_GEOM_SICK_BITMAP (1U << 1) /* rtbitmap */ #define XFS_RTGROUP_GEOM_SICK_SUMMARY (1U << 2) /* rtsummary */ +#define XFS_RTGROUP_GEOM_SICK_RMAPBT (1U << 3) /* reverse mappings */ +#define XFS_RTGROUP_GEOM_SICK_REFCNTBT (1U << 4) /* reference counts */ /* * ioctl commands that are used by Linux filesystems diff --git a/fs/xfs/libxfs/xfs_health.h b/fs/xfs/libxfs/xfs_health.h index d34986ac18c3..b31000f7190c 100644 --- a/fs/xfs/libxfs/xfs_health.h +++ b/fs/xfs/libxfs/xfs_health.h @@ -70,6 +70,8 @@ struct xfs_rtgroup; #define XFS_SICK_RG_SUPER (1 << 0) /* rt group superblock */ #define XFS_SICK_RG_BITMAP (1 << 1) /* rt group bitmap */ #define XFS_SICK_RG_SUMMARY (1 << 2) /* rt groups summary */ +#define XFS_SICK_RG_RMAPBT (1 << 3) /* reverse mappings */ +#define XFS_SICK_RG_REFCNTBT (1 << 4) /* reference counts */ /* Observable health issues for AG metadata. */ #define XFS_SICK_AG_SB (1 << 0) /* superblock */ @@ -115,7 +117,9 @@ struct xfs_rtgroup; #define XFS_SICK_RG_PRIMARY (XFS_SICK_RG_SUPER | \ XFS_SICK_RG_BITMAP | \ - XFS_SICK_RG_SUMMARY) + XFS_SICK_RG_SUMMARY | \ + XFS_SICK_RG_RMAPBT | \ + XFS_SICK_RG_REFCNTBT) #define XFS_SICK_AG_PRIMARY (XFS_SICK_AG_SB | \ XFS_SICK_AG_AGF | \ diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c index 424861fbf1bd..f24fa628fecf 100644 --- a/fs/xfs/libxfs/xfs_inode_buf.c +++ b/fs/xfs/libxfs/xfs_inode_buf.c @@ -441,6 +441,30 @@ xfs_dinode_verify_fork( if (di_nextents > max_extents) return __this_address; break; + case XFS_DINODE_FMT_META_BTREE: + if (!xfs_has_metadir(mp)) + return __this_address; + if (!(dip->di_flags2 & cpu_to_be64(XFS_DIFLAG2_METADATA))) + return __this_address; + switch (be16_to_cpu(dip->di_metatype)) { + case XFS_METAFILE_RTRMAP: + /* + * growfs must create the rtrmap inodes before adding a + * realtime volume to the filesystem, so we cannot use + * the rtrmapbt predicate here. + */ + if (!xfs_has_rmapbt(mp)) + return __this_address; + break; + case XFS_METAFILE_RTREFCOUNT: + /* same comment about growfs and rmap inodes applies */ + if (!xfs_has_reflink(mp)) + return __this_address; + break; + default: + return __this_address; + } + break; default: return __this_address; } @@ -460,6 +484,10 @@ xfs_dinode_verify_forkoff( if (dip->di_forkoff != (roundup(sizeof(xfs_dev_t), 8) >> 3)) return __this_address; break; + case XFS_DINODE_FMT_META_BTREE: + if (!xfs_has_metadir(mp) || !xfs_has_parent(mp)) + return __this_address; + fallthrough; case XFS_DINODE_FMT_LOCAL: /* fall through ... */ case XFS_DINODE_FMT_EXTENTS: /* fall through ... */ case XFS_DINODE_FMT_BTREE: @@ -637,9 +665,6 @@ xfs_dinode_verify( if (mode && nextents + naextents > nblocks) return __this_address; - if (nextents + naextents == 0 && nblocks != 0) - return __this_address; - if (S_ISDIR(mode) && nextents > mp->m_dir_geo->max_extents) return __this_address; @@ -723,7 +748,8 @@ xfs_dinode_verify( return __this_address; /* don't let reflink and realtime mix */ - if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME)) + if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME) && + !xfs_has_rtreflink(mp)) return __this_address; /* COW extent size hint validation */ @@ -743,6 +769,12 @@ xfs_dinode_verify( return fa; } + /* metadata inodes containing btrees always have zero extent count */ + if (XFS_DFORK_FORMAT(dip, XFS_DATA_FORK) != XFS_DINODE_FMT_META_BTREE) { + if (nextents + naextents == 0 && nblocks != 0) + return __this_address; + } + return NULL; } @@ -878,11 +910,29 @@ xfs_inode_validate_cowextsize( bool rt_flag; bool hint_flag; uint32_t cowextsize_bytes; + uint32_t blocksize_bytes; rt_flag = (flags & XFS_DIFLAG_REALTIME); hint_flag = (flags2 & XFS_DIFLAG2_COWEXTSIZE); cowextsize_bytes = XFS_FSB_TO_B(mp, cowextsize); + /* + * Similar to extent size hints, a directory can be configured to + * propagate realtime status and a CoW extent size hint to newly + * created files even if there is no realtime device, and the hints on + * disk can become misaligned if the sysadmin changes the rt extent + * size while adding the realtime device. + * + * Therefore, we can only enforce the rextsize alignment check against + * regular realtime files, and rely on callers to decide when alignment + * checks are appropriate, and fix things up as needed. + */ + + if (rt_flag) + blocksize_bytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize); + else + blocksize_bytes = mp->m_sb.sb_blocksize; + if (hint_flag && !xfs_has_reflink(mp)) return __this_address; @@ -896,16 +946,13 @@ xfs_inode_validate_cowextsize( if (mode && !hint_flag && cowextsize != 0) return __this_address; - if (hint_flag && rt_flag) - return __this_address; - - if (cowextsize_bytes % mp->m_sb.sb_blocksize) + if (cowextsize_bytes % blocksize_bytes) return __this_address; if (cowextsize > XFS_MAX_BMBT_EXTLEN) return __this_address; - if (cowextsize > mp->m_sb.sb_agblocks / 2) + if (!rt_flag && cowextsize > mp->m_sb.sb_agblocks / 2) return __this_address; return NULL; diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c index 1158ca48626b..4f99b90add55 100644 --- a/fs/xfs/libxfs/xfs_inode_fork.c +++ b/fs/xfs/libxfs/xfs_inode_fork.c @@ -27,6 +27,8 @@ #include "xfs_errortag.h" #include "xfs_health.h" #include "xfs_symlink_remote.h" +#include "xfs_rtrmap_btree.h" +#include "xfs_rtrefcount_btree.h" struct kmem_cache *xfs_ifork_cache; @@ -178,7 +180,7 @@ xfs_iformat_btree( struct xfs_mount *mp = ip->i_mount; xfs_bmdr_block_t *dfp; struct xfs_ifork *ifp; - /* REFERENCED */ + struct xfs_btree_block *broot; int nrecs; int size; int level; @@ -211,16 +213,13 @@ xfs_iformat_btree( return -EFSCORRUPTED; } - ifp->if_broot_bytes = size; - ifp->if_broot = kmalloc(size, - GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL); - ASSERT(ifp->if_broot != NULL); + broot = xfs_broot_alloc(ifp, size); /* * Copy and convert from the on-disk structure * to the in-memory structure. */ xfs_bmdr_to_bmbt(ip, dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork), - ifp->if_broot, size); + broot, size); ifp->if_bytes = 0; ifp->if_data = NULL; @@ -270,6 +269,16 @@ xfs_iformat_data_fork( return xfs_iformat_extents(ip, dip, XFS_DATA_FORK); case XFS_DINODE_FMT_BTREE: return xfs_iformat_btree(ip, dip, XFS_DATA_FORK); + case XFS_DINODE_FMT_META_BTREE: + switch (ip->i_metatype) { + case XFS_METAFILE_RTRMAP: + return xfs_iformat_rtrmap(ip, dip); + case XFS_METAFILE_RTREFCOUNT: + return xfs_iformat_rtrefcount(ip, dip); + default: + break; + } + fallthrough; default: xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip, sizeof(*dip), __this_address); @@ -363,135 +372,68 @@ xfs_iformat_attr_fork( } /* - * Reallocate the space for if_broot based on the number of records - * being added or deleted as indicated in rec_diff. Move the records - * and pointers in if_broot to fit the new size. When shrinking this - * will eliminate holes between the records and pointers created by - * the caller. When growing this will create holes to be filled in - * by the caller. - * - * The caller must not request to add more records than would fit in - * the on-disk inode root. If the if_broot is currently NULL, then - * if we are adding records, one will be allocated. The caller must also - * not request that the number of records go below zero, although - * it can go to zero. - * - * ip -- the inode whose if_broot area is changing - * ext_diff -- the change in the number of records, positive or negative, - * requested for the if_broot array. + * Allocate the if_broot component of an inode fork so that it is @new_size + * bytes in size, using __GFP_NOLOCKDEP like all the other code that + * initializes a broot during inode load. Returns if_broot. */ -void -xfs_iroot_realloc( - xfs_inode_t *ip, - int rec_diff, - int whichfork) +struct xfs_btree_block * +xfs_broot_alloc( + struct xfs_ifork *ifp, + size_t new_size) { - struct xfs_mount *mp = ip->i_mount; - int cur_max; - struct xfs_ifork *ifp; - struct xfs_btree_block *new_broot; - int new_max; - size_t new_size; - char *np; - char *op; + ASSERT(ifp->if_broot == NULL); - /* - * Handle the degenerate case quietly. - */ - if (rec_diff == 0) { - return; - } + ifp->if_broot = kmalloc(new_size, + GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL); + ifp->if_broot_bytes = new_size; + return ifp->if_broot; +} - ifp = xfs_ifork_ptr(ip, whichfork); - if (rec_diff > 0) { - /* - * If there wasn't any memory allocated before, just - * allocate it now and get out. - */ - if (ifp->if_broot_bytes == 0) { - new_size = xfs_bmap_broot_space_calc(mp, rec_diff); - ifp->if_broot = kmalloc(new_size, - GFP_KERNEL | __GFP_NOFAIL); - ifp->if_broot_bytes = (int)new_size; - return; - } +/* + * Reallocate the if_broot component of an inode fork so that it is @new_size + * bytes in size. Returns if_broot. + */ +struct xfs_btree_block * +xfs_broot_realloc( + struct xfs_ifork *ifp, + size_t new_size) +{ + /* No size change? No action needed. */ + if (new_size == ifp->if_broot_bytes) + return ifp->if_broot; - /* - * If there is already an existing if_broot, then we need - * to realloc() it and shift the pointers to their new - * location. The records don't change location because - * they are kept butted up against the btree block header. - */ - cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, false); - new_max = cur_max + rec_diff; - new_size = xfs_bmap_broot_space_calc(mp, new_max); - ifp->if_broot = krealloc(ifp->if_broot, new_size, - GFP_KERNEL | __GFP_NOFAIL); - op = (char *)xfs_bmap_broot_ptr_addr(mp, ifp->if_broot, 1, - ifp->if_broot_bytes); - np = (char *)xfs_bmap_broot_ptr_addr(mp, ifp->if_broot, 1, - (int)new_size); - ifp->if_broot_bytes = (int)new_size; - ASSERT(xfs_bmap_bmdr_space(ifp->if_broot) <= - xfs_inode_fork_size(ip, whichfork)); - memmove(np, op, cur_max * (uint)sizeof(xfs_fsblock_t)); - return; + /* New size is zero, free it. */ + if (new_size == 0) { + ifp->if_broot_bytes = 0; + kfree(ifp->if_broot); + ifp->if_broot = NULL; + return NULL; } /* - * rec_diff is less than 0. In this case, we are shrinking the - * if_broot buffer. It must already exist. If we go to zero - * records, just get rid of the root and clear the status bit. + * Shrinking the iroot means we allocate a new smaller object and copy + * it. We don't trust krealloc not to nop on realloc-down. */ - ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0)); - cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, false); - new_max = cur_max + rec_diff; - ASSERT(new_max >= 0); - if (new_max > 0) - new_size = xfs_bmap_broot_space_calc(mp, new_max); - else - new_size = 0; - if (new_size > 0) { - new_broot = kmalloc(new_size, GFP_KERNEL | __GFP_NOFAIL); - /* - * First copy over the btree block header. - */ - memcpy(new_broot, ifp->if_broot, - xfs_bmbt_block_len(ip->i_mount)); - } else { - new_broot = NULL; + if (ifp->if_broot_bytes > 0 && ifp->if_broot_bytes > new_size) { + struct xfs_btree_block *old_broot = ifp->if_broot; + + ifp->if_broot = kmalloc(new_size, GFP_KERNEL | __GFP_NOFAIL); + ifp->if_broot_bytes = new_size; + memcpy(ifp->if_broot, old_broot, new_size); + kfree(old_broot); + return ifp->if_broot; } /* - * Only copy the keys and pointers if there are any. + * Growing the iroot means we can krealloc. This may get us the same + * object. */ - if (new_max > 0) { - /* - * First copy the keys. - */ - op = (char *)xfs_bmbt_key_addr(mp, ifp->if_broot, 1); - np = (char *)xfs_bmbt_key_addr(mp, new_broot, 1); - memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_key_t)); - - /* - * Then copy the pointers. - */ - op = (char *)xfs_bmap_broot_ptr_addr(mp, ifp->if_broot, 1, - ifp->if_broot_bytes); - np = (char *)xfs_bmap_broot_ptr_addr(mp, new_broot, 1, - (int)new_size); - memcpy(np, op, new_max * (uint)sizeof(xfs_fsblock_t)); - } - kfree(ifp->if_broot); - ifp->if_broot = new_broot; - ifp->if_broot_bytes = (int)new_size; - if (ifp->if_broot) - ASSERT(xfs_bmap_bmdr_space(ifp->if_broot) <= - xfs_inode_fork_size(ip, whichfork)); - return; + ifp->if_broot = krealloc(ifp->if_broot, new_size, + GFP_KERNEL | __GFP_NOFAIL); + ifp->if_broot_bytes = new_size; + return ifp->if_broot; } - /* * This is called when the amount of space needed for if_data * is increased or decreased. The change in size is indicated by @@ -671,6 +613,25 @@ xfs_iflush_fork( } break; + case XFS_DINODE_FMT_META_BTREE: + ASSERT(whichfork == XFS_DATA_FORK); + + if (!(iip->ili_fields & brootflag[whichfork])) + break; + + switch (ip->i_metatype) { + case XFS_METAFILE_RTRMAP: + xfs_iflush_rtrmap(ip, dip); + break; + case XFS_METAFILE_RTREFCOUNT: + xfs_iflush_rtrefcount(ip, dip); + break; + default: + ASSERT(0); + break; + } + break; + default: ASSERT(0); break; diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h index 2373d12fd474..69ed0919d60b 100644 --- a/fs/xfs/libxfs/xfs_inode_fork.h +++ b/fs/xfs/libxfs/xfs_inode_fork.h @@ -170,7 +170,11 @@ void xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *, void xfs_idestroy_fork(struct xfs_ifork *ifp); void * xfs_idata_realloc(struct xfs_inode *ip, int64_t byte_diff, int whichfork); -void xfs_iroot_realloc(struct xfs_inode *, int, int); +struct xfs_btree_block *xfs_broot_alloc(struct xfs_ifork *ifp, + size_t new_size); +struct xfs_btree_block *xfs_broot_realloc(struct xfs_ifork *ifp, + size_t new_size); + int xfs_iread_extents(struct xfs_trans *, struct xfs_inode *, int); int xfs_iextents_copy(struct xfs_inode *, struct xfs_bmbt_rec *, int); diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h index 15dec19b6c32..a472ac2e45d0 100644 --- a/fs/xfs/libxfs/xfs_log_format.h +++ b/fs/xfs/libxfs/xfs_log_format.h @@ -250,6 +250,10 @@ typedef struct xfs_trans_header { #define XFS_LI_XMD 0x1249 /* mapping exchange done */ #define XFS_LI_EFI_RT 0x124a /* realtime extent free intent */ #define XFS_LI_EFD_RT 0x124b /* realtime extent free done */ +#define XFS_LI_RUI_RT 0x124c /* realtime rmap update intent */ +#define XFS_LI_RUD_RT 0x124d /* realtime rmap update done */ +#define XFS_LI_CUI_RT 0x124e /* realtime refcount update intent */ +#define XFS_LI_CUD_RT 0x124f /* realtime refcount update done */ #define XFS_LI_TYPE_DESC \ { XFS_LI_EFI, "XFS_LI_EFI" }, \ @@ -271,7 +275,11 @@ typedef struct xfs_trans_header { { XFS_LI_XMI, "XFS_LI_XMI" }, \ { XFS_LI_XMD, "XFS_LI_XMD" }, \ { XFS_LI_EFI_RT, "XFS_LI_EFI_RT" }, \ - { XFS_LI_EFD_RT, "XFS_LI_EFD_RT" } + { XFS_LI_EFD_RT, "XFS_LI_EFD_RT" }, \ + { XFS_LI_RUI_RT, "XFS_LI_RUI_RT" }, \ + { XFS_LI_RUD_RT, "XFS_LI_RUD_RT" }, \ + { XFS_LI_CUI_RT, "XFS_LI_CUI_RT" }, \ + { XFS_LI_CUD_RT, "XFS_LI_CUD_RT" } /* * Inode Log Item Format definitions. @@ -351,12 +359,6 @@ struct xfs_inode_log_format_32 { */ #define XFS_ILOG_IVERSION 0x8000 -#define XFS_ILOG_NONCORE (XFS_ILOG_DDATA | XFS_ILOG_DEXT | \ - XFS_ILOG_DBROOT | XFS_ILOG_DEV | \ - XFS_ILOG_ADATA | XFS_ILOG_AEXT | \ - XFS_ILOG_ABROOT | XFS_ILOG_DOWNER | \ - XFS_ILOG_AOWNER) - #define XFS_ILOG_DFORK (XFS_ILOG_DDATA | XFS_ILOG_DEXT | \ XFS_ILOG_DBROOT) diff --git a/fs/xfs/libxfs/xfs_log_recover.h b/fs/xfs/libxfs/xfs_log_recover.h index 5397a8ff004d..66c7916fb5cd 100644 --- a/fs/xfs/libxfs/xfs_log_recover.h +++ b/fs/xfs/libxfs/xfs_log_recover.h @@ -79,6 +79,10 @@ extern const struct xlog_recover_item_ops xlog_xmi_item_ops; extern const struct xlog_recover_item_ops xlog_xmd_item_ops; extern const struct xlog_recover_item_ops xlog_rtefi_item_ops; extern const struct xlog_recover_item_ops xlog_rtefd_item_ops; +extern const struct xlog_recover_item_ops xlog_rtrui_item_ops; +extern const struct xlog_recover_item_ops xlog_rtrud_item_ops; +extern const struct xlog_recover_item_ops xlog_rtcui_item_ops; +extern const struct xlog_recover_item_ops xlog_rtcud_item_ops; /* * Macros, structures, prototypes for internal log manager use. diff --git a/fs/xfs/libxfs/xfs_metadir.c b/fs/xfs/libxfs/xfs_metadir.c index bae7377c0f22..178e89711cb7 100644 --- a/fs/xfs/libxfs/xfs_metadir.c +++ b/fs/xfs/libxfs/xfs_metadir.c @@ -29,6 +29,10 @@ #include "xfs_dir2_priv.h" #include "xfs_parent.h" #include "xfs_health.h" +#include "xfs_errortag.h" +#include "xfs_error.h" +#include "xfs_btree.h" +#include "xfs_alloc.h" /* * Metadata Directory Tree diff --git a/fs/xfs/libxfs/xfs_metafile.c b/fs/xfs/libxfs/xfs_metafile.c index adeb25d1a444..2f5f554a36d4 100644 --- a/fs/xfs/libxfs/xfs_metafile.c +++ b/fs/xfs/libxfs/xfs_metafile.c @@ -17,6 +17,28 @@ #include "xfs_metafile.h" #include "xfs_trace.h" #include "xfs_inode.h" +#include "xfs_quota.h" +#include "xfs_errortag.h" +#include "xfs_error.h" +#include "xfs_alloc.h" + +static const struct { + enum xfs_metafile_type mtype; + const char *name; +} xfs_metafile_type_strs[] = { XFS_METAFILE_TYPE_STR }; + +const char * +xfs_metafile_type_str(enum xfs_metafile_type metatype) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(xfs_metafile_type_strs); i++) { + if (xfs_metafile_type_strs[i].mtype == metatype) + return xfs_metafile_type_strs[i].name; + } + + return NULL; +} /* Set up an inode to be recognized as a metadata directory inode. */ void @@ -50,3 +72,204 @@ xfs_metafile_clear_iflag( ip->i_diflags2 &= ~XFS_DIFLAG2_METADATA; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); } + +/* + * Is the amount of space that could be allocated towards a given metadata + * file at or beneath a certain threshold? + */ +static inline bool +xfs_metafile_resv_can_cover( + struct xfs_inode *ip, + int64_t rhs) +{ + /* + * The amount of space that can be allocated to this metadata file is + * the remaining reservation for the particular metadata file + the + * global free block count. Take care of the first case to avoid + * touching the per-cpu counter. + */ + if (ip->i_delayed_blks >= rhs) + return true; + + /* + * There aren't enough blocks left in the inode's reservation, but it + * isn't critical unless there also isn't enough free space. + */ + return __percpu_counter_compare(&ip->i_mount->m_fdblocks, + rhs - ip->i_delayed_blks, 2048) >= 0; +} + +/* + * Is this metadata file critically low on blocks? For now we'll define that + * as the number of blocks we can get our hands on being less than 10% of what + * we reserved or less than some arbitrary number (maximum btree height). + */ +bool +xfs_metafile_resv_critical( + struct xfs_inode *ip) +{ + uint64_t asked_low_water; + + if (!ip) + return false; + + ASSERT(xfs_is_metadir_inode(ip)); + trace_xfs_metafile_resv_critical(ip, 0); + + if (!xfs_metafile_resv_can_cover(ip, ip->i_mount->m_rtbtree_maxlevels)) + return true; + + asked_low_water = div_u64(ip->i_meta_resv_asked, 10); + if (!xfs_metafile_resv_can_cover(ip, asked_low_water)) + return true; + + return XFS_TEST_ERROR(false, ip->i_mount, + XFS_ERRTAG_METAFILE_RESV_CRITICAL); +} + +/* Allocate a block from the metadata file's reservation. */ +void +xfs_metafile_resv_alloc_space( + struct xfs_inode *ip, + struct xfs_alloc_arg *args) +{ + int64_t len = args->len; + + ASSERT(xfs_is_metadir_inode(ip)); + ASSERT(args->resv == XFS_AG_RESV_METAFILE); + + trace_xfs_metafile_resv_alloc_space(ip, args->len); + + /* + * Allocate the blocks from the metadata inode's block reservation + * and update the ondisk sb counter. + */ + if (ip->i_delayed_blks > 0) { + int64_t from_resv; + + from_resv = min_t(int64_t, len, ip->i_delayed_blks); + ip->i_delayed_blks -= from_resv; + xfs_mod_delalloc(ip, 0, -from_resv); + xfs_trans_mod_sb(args->tp, XFS_TRANS_SB_RES_FDBLOCKS, + -from_resv); + len -= from_resv; + } + + /* + * Any allocation in excess of the reservation requires in-core and + * on-disk fdblocks updates. If we can grab @len blocks from the + * in-core fdblocks then all we need to do is update the on-disk + * superblock; if not, then try to steal some from the transaction's + * block reservation. Overruns are only expected for rmap btrees. + */ + if (len) { + unsigned int field; + int error; + + error = xfs_dec_fdblocks(ip->i_mount, len, true); + if (error) + field = XFS_TRANS_SB_FDBLOCKS; + else + field = XFS_TRANS_SB_RES_FDBLOCKS; + + xfs_trans_mod_sb(args->tp, field, -len); + } + + ip->i_nblocks += args->len; + xfs_trans_log_inode(args->tp, ip, XFS_ILOG_CORE); +} + +/* Free a block to the metadata file's reservation. */ +void +xfs_metafile_resv_free_space( + struct xfs_inode *ip, + struct xfs_trans *tp, + xfs_filblks_t len) +{ + int64_t to_resv; + + ASSERT(xfs_is_metadir_inode(ip)); + trace_xfs_metafile_resv_free_space(ip, len); + + ip->i_nblocks -= len; + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + + /* + * Add the freed blocks back into the inode's delalloc reservation + * until it reaches the maximum size. Update the ondisk fdblocks only. + */ + to_resv = ip->i_meta_resv_asked - (ip->i_nblocks + ip->i_delayed_blks); + if (to_resv > 0) { + to_resv = min_t(int64_t, to_resv, len); + ip->i_delayed_blks += to_resv; + xfs_mod_delalloc(ip, 0, to_resv); + xfs_trans_mod_sb(tp, XFS_TRANS_SB_RES_FDBLOCKS, to_resv); + len -= to_resv; + } + + /* + * Everything else goes back to the filesystem, so update the in-core + * and on-disk counters. + */ + if (len) + xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, len); +} + +/* Release a metadata file's space reservation. */ +void +xfs_metafile_resv_free( + struct xfs_inode *ip) +{ + /* Non-btree metadata inodes don't need space reservations. */ + if (!ip || !ip->i_meta_resv_asked) + return; + + ASSERT(xfs_is_metadir_inode(ip)); + trace_xfs_metafile_resv_free(ip, 0); + + if (ip->i_delayed_blks) { + xfs_mod_delalloc(ip, 0, -ip->i_delayed_blks); + xfs_add_fdblocks(ip->i_mount, ip->i_delayed_blks); + ip->i_delayed_blks = 0; + } + ip->i_meta_resv_asked = 0; +} + +/* Set up a metadata file's space reservation. */ +int +xfs_metafile_resv_init( + struct xfs_inode *ip, + xfs_filblks_t ask) +{ + xfs_filblks_t hidden_space; + xfs_filblks_t used; + int error; + + if (!ip || ip->i_meta_resv_asked > 0) + return 0; + + ASSERT(xfs_is_metadir_inode(ip)); + + /* + * Space taken by all other metadata btrees are accounted on-disk as + * used space. We therefore only hide the space that is reserved but + * not used by the trees. + */ + used = ip->i_nblocks; + if (used > ask) + ask = used; + hidden_space = ask - used; + + error = xfs_dec_fdblocks(ip->i_mount, hidden_space, true); + if (error) { + trace_xfs_metafile_resv_init_error(ip, error, _RET_IP_); + return error; + } + + xfs_mod_delalloc(ip, 0, hidden_space); + ip->i_delayed_blks = hidden_space; + ip->i_meta_resv_asked = ask; + + trace_xfs_metafile_resv_init(ip, ask); + return 0; +} diff --git a/fs/xfs/libxfs/xfs_metafile.h b/fs/xfs/libxfs/xfs_metafile.h index acec400123db..95af4b52e5a7 100644 --- a/fs/xfs/libxfs/xfs_metafile.h +++ b/fs/xfs/libxfs/xfs_metafile.h @@ -6,6 +6,8 @@ #ifndef __XFS_METAFILE_H__ #define __XFS_METAFILE_H__ +const char *xfs_metafile_type_str(enum xfs_metafile_type metatype); + /* All metadata files must have these flags set. */ #define XFS_METAFILE_DIFLAGS (XFS_DIFLAG_IMMUTABLE | \ XFS_DIFLAG_SYNC | \ @@ -21,6 +23,17 @@ void xfs_metafile_set_iflag(struct xfs_trans *tp, struct xfs_inode *ip, enum xfs_metafile_type metafile_type); void xfs_metafile_clear_iflag(struct xfs_trans *tp, struct xfs_inode *ip); +/* Space reservations for metadata inodes. */ +struct xfs_alloc_arg; + +bool xfs_metafile_resv_critical(struct xfs_inode *ip); +void xfs_metafile_resv_alloc_space(struct xfs_inode *ip, + struct xfs_alloc_arg *args); +void xfs_metafile_resv_free_space(struct xfs_inode *ip, struct xfs_trans *tp, + xfs_filblks_t len); +void xfs_metafile_resv_free(struct xfs_inode *ip); +int xfs_metafile_resv_init(struct xfs_inode *ip, xfs_filblks_t ask); + /* Code specific to kernel/userspace; must be provided externally. */ int xfs_trans_metafile_iget(struct xfs_trans *tp, xfs_ino_t ino, diff --git a/fs/xfs/libxfs/xfs_ondisk.h b/fs/xfs/libxfs/xfs_ondisk.h index ad0dedf00f18..a85ecddaa48e 100644 --- a/fs/xfs/libxfs/xfs_ondisk.h +++ b/fs/xfs/libxfs/xfs_ondisk.h @@ -83,6 +83,10 @@ xfs_check_ondisk_structs(void) XFS_CHECK_STRUCT_SIZE(union xfs_rtword_raw, 4); XFS_CHECK_STRUCT_SIZE(union xfs_suminfo_raw, 4); XFS_CHECK_STRUCT_SIZE(struct xfs_rtbuf_blkinfo, 48); + XFS_CHECK_STRUCT_SIZE(xfs_rtrmap_ptr_t, 8); + XFS_CHECK_STRUCT_SIZE(struct xfs_rtrmap_root, 4); + XFS_CHECK_STRUCT_SIZE(xfs_rtrefcount_ptr_t, 8); + XFS_CHECK_STRUCT_SIZE(struct xfs_rtrefcount_root, 4); /* * m68k has problems with struct xfs_attr_leaf_name_remote, but we pad diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c index 2dbab68b4fe6..cebe83f7842a 100644 --- a/fs/xfs/libxfs/xfs_refcount.c +++ b/fs/xfs/libxfs/xfs_refcount.c @@ -25,6 +25,9 @@ #include "xfs_ag.h" #include "xfs_health.h" #include "xfs_refcount_item.h" +#include "xfs_rtgroup.h" +#include "xfs_rtalloc.h" +#include "xfs_rtrefcount_btree.h" struct kmem_cache *xfs_refcount_intent_cache; @@ -128,7 +131,7 @@ xfs_refcount_check_irec( struct xfs_perag *pag, const struct xfs_refcount_irec *irec) { - if (irec->rc_blockcount == 0 || irec->rc_blockcount > MAXREFCEXTLEN) + if (irec->rc_blockcount == 0 || irec->rc_blockcount > XFS_REFC_LEN_MAX) return __this_address; if (!xfs_refcount_check_domain(irec)) @@ -138,12 +141,43 @@ xfs_refcount_check_irec( if (!xfs_verify_agbext(pag, irec->rc_startblock, irec->rc_blockcount)) return __this_address; - if (irec->rc_refcount == 0 || irec->rc_refcount > MAXREFCOUNT) + if (irec->rc_refcount == 0 || irec->rc_refcount > XFS_REFC_REFCOUNT_MAX) return __this_address; return NULL; } +xfs_failaddr_t +xfs_rtrefcount_check_irec( + struct xfs_rtgroup *rtg, + const struct xfs_refcount_irec *irec) +{ + if (irec->rc_blockcount == 0 || irec->rc_blockcount > XFS_REFC_LEN_MAX) + return __this_address; + + if (!xfs_refcount_check_domain(irec)) + return __this_address; + + /* check for valid extent range, including overflow */ + if (!xfs_verify_rgbext(rtg, irec->rc_startblock, irec->rc_blockcount)) + return __this_address; + + if (irec->rc_refcount == 0 || irec->rc_refcount > XFS_REFC_REFCOUNT_MAX) + return __this_address; + + return NULL; +} + +static inline xfs_failaddr_t +xfs_refcount_check_btrec( + struct xfs_btree_cur *cur, + const struct xfs_refcount_irec *irec) +{ + if (xfs_btree_is_rtrefcount(cur->bc_ops)) + return xfs_rtrefcount_check_irec(to_rtg(cur->bc_group), irec); + return xfs_refcount_check_irec(to_perag(cur->bc_group), irec); +} + static inline int xfs_refcount_complain_bad_rec( struct xfs_btree_cur *cur, @@ -152,9 +186,15 @@ xfs_refcount_complain_bad_rec( { struct xfs_mount *mp = cur->bc_mp; - xfs_warn(mp, + if (xfs_btree_is_rtrefcount(cur->bc_ops)) { + xfs_warn(mp, + "RT Refcount BTree record corruption in rtgroup %u detected at %pS!", + cur->bc_group->xg_gno, fa); + } else { + xfs_warn(mp, "Refcount BTree record corruption in AG %d detected at %pS!", cur->bc_group->xg_gno, fa); + } xfs_warn(mp, "Start block 0x%x, block count 0x%x, references 0x%x", irec->rc_startblock, irec->rc_blockcount, irec->rc_refcount); @@ -180,7 +220,7 @@ xfs_refcount_get_rec( return error; xfs_refcount_btrec_to_irec(rec, irec); - fa = xfs_refcount_check_irec(to_perag(cur->bc_group), irec); + fa = xfs_refcount_check_btrec(cur, irec); if (fa) return xfs_refcount_complain_bad_rec(cur, fa, irec); @@ -853,9 +893,9 @@ xfs_refc_merge_refcount( const struct xfs_refcount_irec *irec, enum xfs_refc_adjust_op adjust) { - /* Once a record hits MAXREFCOUNT, it is pinned there forever */ - if (irec->rc_refcount == MAXREFCOUNT) - return MAXREFCOUNT; + /* Once a record hits XFS_REFC_REFCOUNT_MAX, it is pinned forever */ + if (irec->rc_refcount == XFS_REFC_REFCOUNT_MAX) + return XFS_REFC_REFCOUNT_MAX; return irec->rc_refcount + adjust; } @@ -898,7 +938,7 @@ xfs_refc_want_merge_center( * hence we need to catch u32 addition overflows here. */ ulen += cleft->rc_blockcount + right->rc_blockcount; - if (ulen >= MAXREFCEXTLEN) + if (ulen >= XFS_REFC_LEN_MAX) return false; *ulenp = ulen; @@ -933,7 +973,7 @@ xfs_refc_want_merge_left( * hence we need to catch u32 addition overflows here. */ ulen += cleft->rc_blockcount; - if (ulen >= MAXREFCEXTLEN) + if (ulen >= XFS_REFC_LEN_MAX) return false; return true; @@ -967,7 +1007,7 @@ xfs_refc_want_merge_right( * hence we need to catch u32 addition overflows here. */ ulen += cright->rc_blockcount; - if (ulen >= MAXREFCEXTLEN) + if (ulen >= XFS_REFC_LEN_MAX) return false; return true; @@ -1065,7 +1105,7 @@ xfs_refcount_still_have_space( */ overhead = xfs_allocfree_block_count(cur->bc_mp, cur->bc_refc.shape_changes); - overhead += cur->bc_mp->m_refc_maxlevels; + overhead += cur->bc_maxlevels; overhead *= cur->bc_mp->m_sb.sb_blocksize; /* @@ -1085,6 +1125,22 @@ xfs_refcount_still_have_space( cur->bc_refc.nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD; } +/* Schedule an extent free. */ +static int +xrefc_free_extent( + struct xfs_btree_cur *cur, + struct xfs_refcount_irec *rec) +{ + unsigned int flags = 0; + + if (xfs_btree_is_rtrefcount(cur->bc_ops)) + flags |= XFS_FREE_EXTENT_REALTIME; + + return xfs_free_extent_later(cur->bc_tp, + xfs_gbno_to_fsb(cur->bc_group, rec->rc_startblock), + rec->rc_blockcount, NULL, XFS_AG_RESV_NONE, flags); +} + /* * Adjust the refcounts of middle extents. At this point we should have * split extents that crossed the adjustment range; merged with adjacent @@ -1101,7 +1157,6 @@ xfs_refcount_adjust_extents( struct xfs_refcount_irec ext, tmp; int error; int found_rec, found_tmp; - xfs_fsblock_t fsbno; /* Merging did all the work already. */ if (*aglen == 0) @@ -1117,7 +1172,7 @@ xfs_refcount_adjust_extents( if (error) goto out_error; if (!found_rec || ext.rc_domain != XFS_REFC_DOMAIN_SHARED) { - ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks; + ext.rc_startblock = xfs_group_max_blocks(cur->bc_group); ext.rc_blockcount = 0; ext.rc_refcount = 0; ext.rc_domain = XFS_REFC_DOMAIN_SHARED; @@ -1154,11 +1209,7 @@ xfs_refcount_adjust_extents( goto out_error; } } else { - fsbno = xfs_agbno_to_fsb(to_perag(cur->bc_group), - tmp.rc_startblock); - error = xfs_free_extent_later(cur->bc_tp, fsbno, - tmp.rc_blockcount, NULL, - XFS_AG_RESV_NONE, 0); + error = xrefc_free_extent(cur, &tmp); if (error) goto out_error; } @@ -1196,7 +1247,7 @@ xfs_refcount_adjust_extents( * Adjust the reference count and either update the tree * (incr) or free the blocks (decr). */ - if (ext.rc_refcount == MAXREFCOUNT) + if (ext.rc_refcount == XFS_REFC_REFCOUNT_MAX) goto skip; ext.rc_refcount += adj; trace_xfs_refcount_modify_extent(cur, &ext); @@ -1216,11 +1267,7 @@ xfs_refcount_adjust_extents( } goto advloop; } else { - fsbno = xfs_agbno_to_fsb(to_perag(cur->bc_group), - ext.rc_startblock); - error = xfs_free_extent_later(cur->bc_tp, fsbno, - ext.rc_blockcount, NULL, - XFS_AG_RESV_NONE, 0); + error = xrefc_free_extent(cur, &ext); if (error) goto out_error; } @@ -1417,12 +1464,122 @@ xfs_refcount_finish_one( } /* + * Set up a continuation a deferred rtrefcount operation by updating the + * intent. Checks to make sure we're not going to run off the end of the + * rtgroup. + */ +static inline int +xfs_rtrefcount_continue_op( + struct xfs_btree_cur *cur, + struct xfs_refcount_intent *ri, + xfs_agblock_t new_agbno) +{ + struct xfs_mount *mp = cur->bc_mp; + struct xfs_rtgroup *rtg = to_rtg(ri->ri_group); + + if (XFS_IS_CORRUPT(mp, !xfs_verify_rgbext(rtg, new_agbno, + ri->ri_blockcount))) { + xfs_btree_mark_sick(cur); + return -EFSCORRUPTED; + } + + ri->ri_startblock = xfs_rgbno_to_rtb(rtg, new_agbno); + + ASSERT(xfs_verify_rtbext(mp, ri->ri_startblock, ri->ri_blockcount)); + return 0; +} + +/* + * Process one of the deferred realtime refcount operations. We pass back the + * btree cursor to maintain our lock on the btree between calls. + */ +int +xfs_rtrefcount_finish_one( + struct xfs_trans *tp, + struct xfs_refcount_intent *ri, + struct xfs_btree_cur **pcur) +{ + struct xfs_mount *mp = tp->t_mountp; + struct xfs_rtgroup *rtg = to_rtg(ri->ri_group); + struct xfs_btree_cur *rcur = *pcur; + int error = 0; + xfs_rgblock_t bno; + unsigned long nr_ops = 0; + int shape_changes = 0; + + bno = xfs_rtb_to_rgbno(mp, ri->ri_startblock); + + trace_xfs_refcount_deferred(mp, ri); + + if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_REFCOUNT_FINISH_ONE)) + return -EIO; + + /* + * If we haven't gotten a cursor or the cursor AG doesn't match + * the startblock, get one now. + */ + if (rcur != NULL && rcur->bc_group != ri->ri_group) { + nr_ops = rcur->bc_refc.nr_ops; + shape_changes = rcur->bc_refc.shape_changes; + xfs_btree_del_cursor(rcur, 0); + rcur = NULL; + *pcur = NULL; + } + if (rcur == NULL) { + xfs_rtgroup_lock(rtg, XFS_RTGLOCK_REFCOUNT); + xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_REFCOUNT); + *pcur = rcur = xfs_rtrefcountbt_init_cursor(tp, rtg); + + rcur->bc_refc.nr_ops = nr_ops; + rcur->bc_refc.shape_changes = shape_changes; + } + + switch (ri->ri_type) { + case XFS_REFCOUNT_INCREASE: + error = xfs_refcount_adjust(rcur, &bno, &ri->ri_blockcount, + XFS_REFCOUNT_ADJUST_INCREASE); + if (error) + return error; + if (ri->ri_blockcount > 0) + error = xfs_rtrefcount_continue_op(rcur, ri, bno); + break; + case XFS_REFCOUNT_DECREASE: + error = xfs_refcount_adjust(rcur, &bno, &ri->ri_blockcount, + XFS_REFCOUNT_ADJUST_DECREASE); + if (error) + return error; + if (ri->ri_blockcount > 0) + error = xfs_rtrefcount_continue_op(rcur, ri, bno); + break; + case XFS_REFCOUNT_ALLOC_COW: + error = __xfs_refcount_cow_alloc(rcur, bno, ri->ri_blockcount); + if (error) + return error; + ri->ri_blockcount = 0; + break; + case XFS_REFCOUNT_FREE_COW: + error = __xfs_refcount_cow_free(rcur, bno, ri->ri_blockcount); + if (error) + return error; + ri->ri_blockcount = 0; + break; + default: + ASSERT(0); + return -EFSCORRUPTED; + } + if (!error && ri->ri_blockcount > 0) + trace_xfs_refcount_finish_one_leftover(mp, ri); + return error; +} + +/* * Record a refcount intent for later processing. */ static void __xfs_refcount_add( struct xfs_trans *tp, enum xfs_refcount_intent_type type, + bool isrt, xfs_fsblock_t startblock, xfs_extlen_t blockcount) { @@ -1434,6 +1591,7 @@ __xfs_refcount_add( ri->ri_type = type; ri->ri_startblock = startblock; ri->ri_blockcount = blockcount; + ri->ri_realtime = isrt; xfs_refcount_defer_add(tp, ri); } @@ -1444,12 +1602,13 @@ __xfs_refcount_add( void xfs_refcount_increase_extent( struct xfs_trans *tp, + bool isrt, struct xfs_bmbt_irec *PREV) { if (!xfs_has_reflink(tp->t_mountp)) return; - __xfs_refcount_add(tp, XFS_REFCOUNT_INCREASE, PREV->br_startblock, + __xfs_refcount_add(tp, XFS_REFCOUNT_INCREASE, isrt, PREV->br_startblock, PREV->br_blockcount); } @@ -1459,12 +1618,13 @@ xfs_refcount_increase_extent( void xfs_refcount_decrease_extent( struct xfs_trans *tp, + bool isrt, struct xfs_bmbt_irec *PREV) { if (!xfs_has_reflink(tp->t_mountp)) return; - __xfs_refcount_add(tp, XFS_REFCOUNT_DECREASE, PREV->br_startblock, + __xfs_refcount_add(tp, XFS_REFCOUNT_DECREASE, isrt, PREV->br_startblock, PREV->br_blockcount); } @@ -1666,7 +1826,7 @@ xfs_refcount_adjust_cow_extents( goto out_error; } if (!found_rec) { - ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks; + ext.rc_startblock = xfs_group_max_blocks(cur->bc_group); ext.rc_blockcount = 0; ext.rc_refcount = 0; ext.rc_domain = XFS_REFC_DOMAIN_COW; @@ -1820,6 +1980,7 @@ __xfs_refcount_cow_free( void xfs_refcount_alloc_cow_extent( struct xfs_trans *tp, + bool isrt, xfs_fsblock_t fsb, xfs_extlen_t len) { @@ -1828,17 +1989,17 @@ xfs_refcount_alloc_cow_extent( if (!xfs_has_reflink(mp)) return; - __xfs_refcount_add(tp, XFS_REFCOUNT_ALLOC_COW, fsb, len); + __xfs_refcount_add(tp, XFS_REFCOUNT_ALLOC_COW, isrt, fsb, len); /* Add rmap entry */ - xfs_rmap_alloc_extent(tp, XFS_FSB_TO_AGNO(mp, fsb), - XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW); + xfs_rmap_alloc_extent(tp, isrt, fsb, len, XFS_RMAP_OWN_COW); } /* Forget a CoW staging event in the refcount btree. */ void xfs_refcount_free_cow_extent( struct xfs_trans *tp, + bool isrt, xfs_fsblock_t fsb, xfs_extlen_t len) { @@ -1848,9 +2009,8 @@ xfs_refcount_free_cow_extent( return; /* Remove rmap entry */ - xfs_rmap_free_extent(tp, XFS_FSB_TO_AGNO(mp, fsb), - XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW); - __xfs_refcount_add(tp, XFS_REFCOUNT_FREE_COW, fsb, len); + xfs_rmap_free_extent(tp, isrt, fsb, len, XFS_RMAP_OWN_COW); + __xfs_refcount_add(tp, XFS_REFCOUNT_FREE_COW, isrt, fsb, len); } struct xfs_refcount_recovery { @@ -1879,8 +2039,7 @@ xfs_refcount_recover_extent( INIT_LIST_HEAD(&rr->rr_list); xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec); - if (xfs_refcount_check_irec(to_perag(cur->bc_group), &rr->rr_rrec) != - NULL || + if (xfs_refcount_check_btrec(cur, &rr->rr_rrec) != NULL || XFS_IS_CORRUPT(cur->bc_mp, rr->rr_rrec.rc_domain != XFS_REFC_DOMAIN_COW)) { xfs_btree_mark_sick(cur); @@ -1895,12 +2054,13 @@ xfs_refcount_recover_extent( /* Find and remove leftover CoW reservations. */ int xfs_refcount_recover_cow_leftovers( - struct xfs_mount *mp, - struct xfs_perag *pag) + struct xfs_group *xg) { + struct xfs_mount *mp = xg->xg_mount; + bool isrt = xg->xg_type == XG_TYPE_RTG; struct xfs_trans *tp; struct xfs_btree_cur *cur; - struct xfs_buf *agbp; + struct xfs_buf *agbp = NULL; struct xfs_refcount_recovery *rr, *n; struct list_head debris; union xfs_btree_irec low = { @@ -1913,10 +2073,19 @@ xfs_refcount_recover_cow_leftovers( xfs_fsblock_t fsb; int error; - /* reflink filesystems mustn't have AGs larger than 2^31-1 blocks */ + /* reflink filesystems must not have groups larger than 2^31-1 blocks */ + BUILD_BUG_ON(XFS_MAX_RGBLOCKS >= XFS_REFC_COWFLAG); BUILD_BUG_ON(XFS_MAX_CRC_AG_BLOCKS >= XFS_REFC_COWFLAG); - if (mp->m_sb.sb_agblocks > XFS_MAX_CRC_AG_BLOCKS) - return -EOPNOTSUPP; + + if (isrt) { + if (!xfs_has_rtgroups(mp)) + return 0; + if (xfs_group_max_blocks(xg) >= XFS_MAX_RGBLOCKS) + return -EOPNOTSUPP; + } else { + if (xfs_group_max_blocks(xg) > XFS_MAX_CRC_AG_BLOCKS) + return -EOPNOTSUPP; + } INIT_LIST_HEAD(&debris); @@ -1934,16 +2103,24 @@ xfs_refcount_recover_cow_leftovers( if (error) return error; - error = xfs_alloc_read_agf(pag, tp, 0, &agbp); - if (error) - goto out_trans; - cur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag); + if (isrt) { + xfs_rtgroup_lock(to_rtg(xg), XFS_RTGLOCK_REFCOUNT); + cur = xfs_rtrefcountbt_init_cursor(tp, to_rtg(xg)); + } else { + error = xfs_alloc_read_agf(to_perag(xg), tp, 0, &agbp); + if (error) + goto out_trans; + cur = xfs_refcountbt_init_cursor(mp, tp, agbp, to_perag(xg)); + } /* Find all the leftover CoW staging extents. */ error = xfs_btree_query_range(cur, &low, &high, xfs_refcount_recover_extent, &debris); xfs_btree_del_cursor(cur, error); - xfs_trans_brelse(tp, agbp); + if (agbp) + xfs_trans_brelse(tp, agbp); + else + xfs_rtgroup_unlock(to_rtg(xg), XFS_RTGLOCK_REFCOUNT); xfs_trans_cancel(tp); if (error) goto out_free; @@ -1956,14 +2133,15 @@ xfs_refcount_recover_cow_leftovers( goto out_free; /* Free the orphan record */ - fsb = xfs_agbno_to_fsb(pag, rr->rr_rrec.rc_startblock); - xfs_refcount_free_cow_extent(tp, fsb, + fsb = xfs_gbno_to_fsb(xg, rr->rr_rrec.rc_startblock); + xfs_refcount_free_cow_extent(tp, isrt, fsb, rr->rr_rrec.rc_blockcount); /* Free the block. */ error = xfs_free_extent_later(tp, fsb, rr->rr_rrec.rc_blockcount, NULL, - XFS_AG_RESV_NONE, 0); + XFS_AG_RESV_NONE, + isrt ? XFS_FREE_EXTENT_REALTIME : 0); if (error) goto out_trans; @@ -2028,7 +2206,7 @@ xfs_refcount_query_range_helper( xfs_failaddr_t fa; xfs_refcount_btrec_to_irec(rec, &irec); - fa = xfs_refcount_check_irec(to_perag(cur->bc_group), &irec); + fa = xfs_refcount_check_btrec(cur, &irec); if (fa) return xfs_refcount_complain_bad_rec(cur, fa, &irec); diff --git a/fs/xfs/libxfs/xfs_refcount.h b/fs/xfs/libxfs/xfs_refcount.h index 62d78afcf1f3..f2e299a716a4 100644 --- a/fs/xfs/libxfs/xfs_refcount.h +++ b/fs/xfs/libxfs/xfs_refcount.h @@ -12,6 +12,7 @@ struct xfs_perag; struct xfs_btree_cur; struct xfs_bmbt_irec; struct xfs_refcount_irec; +struct xfs_rtgroup; extern int xfs_refcount_lookup_le(struct xfs_btree_cur *cur, enum xfs_refc_domain domain, xfs_agblock_t bno, int *stat); @@ -60,6 +61,7 @@ struct xfs_refcount_intent { enum xfs_refcount_intent_type ri_type; xfs_extlen_t ri_blockcount; xfs_fsblock_t ri_startblock; + bool ri_realtime; }; /* Check that the refcount is appropriate for the record domain. */ @@ -74,24 +76,25 @@ xfs_refcount_check_domain( return true; } -void xfs_refcount_increase_extent(struct xfs_trans *tp, +void xfs_refcount_increase_extent(struct xfs_trans *tp, bool isrt, struct xfs_bmbt_irec *irec); -void xfs_refcount_decrease_extent(struct xfs_trans *tp, +void xfs_refcount_decrease_extent(struct xfs_trans *tp, bool isrt, struct xfs_bmbt_irec *irec); -extern int xfs_refcount_finish_one(struct xfs_trans *tp, +int xfs_refcount_finish_one(struct xfs_trans *tp, + struct xfs_refcount_intent *ri, struct xfs_btree_cur **pcur); +int xfs_rtrefcount_finish_one(struct xfs_trans *tp, struct xfs_refcount_intent *ri, struct xfs_btree_cur **pcur); extern int xfs_refcount_find_shared(struct xfs_btree_cur *cur, xfs_agblock_t agbno, xfs_extlen_t aglen, xfs_agblock_t *fbno, xfs_extlen_t *flen, bool find_end_of_shared); -void xfs_refcount_alloc_cow_extent(struct xfs_trans *tp, xfs_fsblock_t fsb, - xfs_extlen_t len); -void xfs_refcount_free_cow_extent(struct xfs_trans *tp, xfs_fsblock_t fsb, - xfs_extlen_t len); -extern int xfs_refcount_recover_cow_leftovers(struct xfs_mount *mp, - struct xfs_perag *pag); +void xfs_refcount_alloc_cow_extent(struct xfs_trans *tp, bool isrt, + xfs_fsblock_t fsb, xfs_extlen_t len); +void xfs_refcount_free_cow_extent(struct xfs_trans *tp, bool isrt, + xfs_fsblock_t fsb, xfs_extlen_t len); +int xfs_refcount_recover_cow_leftovers(struct xfs_group *xg); /* * While we're adjusting the refcounts records of an extent, we have @@ -120,6 +123,8 @@ extern void xfs_refcount_btrec_to_irec(const union xfs_btree_rec *rec, struct xfs_refcount_irec *irec); xfs_failaddr_t xfs_refcount_check_irec(struct xfs_perag *pag, const struct xfs_refcount_irec *irec); +xfs_failaddr_t xfs_rtrefcount_check_irec(struct xfs_rtgroup *rtg, + const struct xfs_refcount_irec *irec); extern int xfs_refcount_insert(struct xfs_btree_cur *cur, struct xfs_refcount_irec *irec, int *stat); diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c index d0df68dc3131..3cdf50563fec 100644 --- a/fs/xfs/libxfs/xfs_rmap.c +++ b/fs/xfs/libxfs/xfs_rmap.c @@ -25,6 +25,8 @@ #include "xfs_ag.h" #include "xfs_health.h" #include "xfs_rmap_item.h" +#include "xfs_rtgroup.h" +#include "xfs_rtrmap_btree.h" struct kmem_cache *xfs_rmap_intent_cache; @@ -264,11 +266,77 @@ xfs_rmap_check_irec( return NULL; } +static xfs_failaddr_t +xfs_rtrmap_check_meta_irec( + struct xfs_rtgroup *rtg, + const struct xfs_rmap_irec *irec) +{ + struct xfs_mount *mp = rtg_mount(rtg); + + if (irec->rm_offset != 0) + return __this_address; + if (irec->rm_flags & XFS_RMAP_UNWRITTEN) + return __this_address; + + switch (irec->rm_owner) { + case XFS_RMAP_OWN_FS: + if (irec->rm_startblock != 0) + return __this_address; + if (irec->rm_blockcount != mp->m_sb.sb_rextsize) + return __this_address; + return NULL; + case XFS_RMAP_OWN_COW: + if (!xfs_has_rtreflink(mp)) + return __this_address; + if (!xfs_verify_rgbext(rtg, irec->rm_startblock, + irec->rm_blockcount)) + return __this_address; + return NULL; + default: + return __this_address; + } + + return NULL; +} + +static xfs_failaddr_t +xfs_rtrmap_check_inode_irec( + struct xfs_rtgroup *rtg, + const struct xfs_rmap_irec *irec) +{ + struct xfs_mount *mp = rtg_mount(rtg); + + if (!xfs_verify_ino(mp, irec->rm_owner)) + return __this_address; + if (!xfs_verify_rgbext(rtg, irec->rm_startblock, irec->rm_blockcount)) + return __this_address; + if (!xfs_verify_fileext(mp, irec->rm_offset, irec->rm_blockcount)) + return __this_address; + return NULL; +} + +xfs_failaddr_t +xfs_rtrmap_check_irec( + struct xfs_rtgroup *rtg, + const struct xfs_rmap_irec *irec) +{ + if (irec->rm_blockcount == 0) + return __this_address; + if (irec->rm_flags & (XFS_RMAP_BMBT_BLOCK | XFS_RMAP_ATTR_FORK)) + return __this_address; + if (XFS_RMAP_NON_INODE_OWNER(irec->rm_owner)) + return xfs_rtrmap_check_meta_irec(rtg, irec); + return xfs_rtrmap_check_inode_irec(rtg, irec); +} + static inline xfs_failaddr_t xfs_rmap_check_btrec( struct xfs_btree_cur *cur, const struct xfs_rmap_irec *irec) { + if (xfs_btree_is_rtrmap(cur->bc_ops) || + xfs_btree_is_mem_rtrmap(cur->bc_ops)) + return xfs_rtrmap_check_irec(to_rtg(cur->bc_group), irec); return xfs_rmap_check_irec(to_perag(cur->bc_group), irec); } @@ -283,6 +351,10 @@ xfs_rmap_complain_bad_rec( if (xfs_btree_is_mem_rmap(cur->bc_ops)) xfs_warn(mp, "In-Memory Reverse Mapping BTree record corruption detected at %pS!", fa); + else if (xfs_btree_is_rtrmap(cur->bc_ops)) + xfs_warn(mp, + "RT Reverse Mapping BTree record corruption in rtgroup %u detected at %pS!", + cur->bc_group->xg_gno, fa); else xfs_warn(mp, "Reverse Mapping BTree record corruption in AG %d detected at %pS!", @@ -525,7 +597,7 @@ xfs_rmap_free_check_owner( struct xfs_btree_cur *cur, uint64_t ltoff, struct xfs_rmap_irec *rec, - xfs_filblks_t len, + xfs_extlen_t len, uint64_t owner, uint64_t offset, unsigned int flags) @@ -2556,6 +2628,47 @@ __xfs_rmap_finish_intent( } } +static int +xfs_rmap_finish_init_cursor( + struct xfs_trans *tp, + struct xfs_rmap_intent *ri, + struct xfs_btree_cur **pcur) +{ + struct xfs_perag *pag = to_perag(ri->ri_group); + struct xfs_buf *agbp = NULL; + int error; + + /* + * Refresh the freelist before we start changing the rmapbt, because a + * shape change could cause us to allocate blocks. + */ + error = xfs_free_extent_fix_freelist(tp, pag, &agbp); + if (error) { + xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL); + return error; + } + if (XFS_IS_CORRUPT(tp->t_mountp, !agbp)) { + xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL); + return -EFSCORRUPTED; + } + *pcur = xfs_rmapbt_init_cursor(tp->t_mountp, tp, agbp, pag); + return 0; +} + +static int +xfs_rtrmap_finish_init_cursor( + struct xfs_trans *tp, + struct xfs_rmap_intent *ri, + struct xfs_btree_cur **pcur) +{ + struct xfs_rtgroup *rtg = to_rtg(ri->ri_group); + + xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP); + xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_RMAP); + *pcur = xfs_rtrmapbt_init_cursor(tp, rtg); + return 0; +} + /* * Process one of the deferred rmap operations. We pass back the * btree cursor to maintain our lock on the rmapbt between calls. @@ -2571,8 +2684,6 @@ xfs_rmap_finish_one( { struct xfs_owner_info oinfo; struct xfs_mount *mp = tp->t_mountp; - struct xfs_btree_cur *rcur = *pcur; - struct xfs_buf *agbp = NULL; xfs_agblock_t bno; bool unwritten; int error = 0; @@ -2586,38 +2697,26 @@ xfs_rmap_finish_one( * If we haven't gotten a cursor or the cursor AG doesn't match * the startblock, get one now. */ - if (rcur != NULL && rcur->bc_group != ri->ri_group) { - xfs_btree_del_cursor(rcur, 0); - rcur = NULL; + if (*pcur != NULL && (*pcur)->bc_group != ri->ri_group) { + xfs_btree_del_cursor(*pcur, 0); *pcur = NULL; } - if (rcur == NULL) { - struct xfs_perag *pag = to_perag(ri->ri_group); - - /* - * Refresh the freelist before we start changing the - * rmapbt, because a shape change could cause us to - * allocate blocks. - */ - error = xfs_free_extent_fix_freelist(tp, pag, &agbp); - if (error) { - xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL); + if (*pcur == NULL) { + if (ri->ri_group->xg_type == XG_TYPE_RTG) + error = xfs_rtrmap_finish_init_cursor(tp, ri, pcur); + else + error = xfs_rmap_finish_init_cursor(tp, ri, pcur); + if (error) return error; - } - if (XFS_IS_CORRUPT(tp->t_mountp, !agbp)) { - xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL); - return -EFSCORRUPTED; - } - - *pcur = rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag); } xfs_rmap_ino_owner(&oinfo, ri->ri_owner, ri->ri_whichfork, ri->ri_bmap.br_startoff); unwritten = ri->ri_bmap.br_state == XFS_EXT_UNWRITTEN; - bno = XFS_FSB_TO_AGBNO(rcur->bc_mp, ri->ri_bmap.br_startblock); - error = __xfs_rmap_finish_intent(rcur, ri->ri_type, bno, + bno = xfs_fsb_to_gbno(mp, ri->ri_bmap.br_startblock, + ri->ri_group->xg_type); + error = __xfs_rmap_finish_intent(*pcur, ri->ri_type, bno, ri->ri_bmap.br_blockcount, &oinfo, unwritten); if (error) return error; @@ -2647,6 +2746,7 @@ __xfs_rmap_add( struct xfs_trans *tp, enum xfs_rmap_intent_type type, uint64_t owner, + bool isrt, int whichfork, struct xfs_bmbt_irec *bmap) { @@ -2658,6 +2758,7 @@ __xfs_rmap_add( ri->ri_owner = owner; ri->ri_whichfork = whichfork; ri->ri_bmap = *bmap; + ri->ri_realtime = isrt; xfs_rmap_defer_add(tp, ri); } @@ -2671,6 +2772,7 @@ xfs_rmap_map_extent( struct xfs_bmbt_irec *PREV) { enum xfs_rmap_intent_type type = XFS_RMAP_MAP; + bool isrt = xfs_ifork_is_realtime(ip, whichfork); if (!xfs_rmap_update_is_needed(tp->t_mountp, whichfork)) return; @@ -2678,7 +2780,7 @@ xfs_rmap_map_extent( if (whichfork != XFS_ATTR_FORK && xfs_is_reflink_inode(ip)) type = XFS_RMAP_MAP_SHARED; - __xfs_rmap_add(tp, type, ip->i_ino, whichfork, PREV); + __xfs_rmap_add(tp, type, ip->i_ino, isrt, whichfork, PREV); } /* Unmap an extent out of a file. */ @@ -2690,6 +2792,7 @@ xfs_rmap_unmap_extent( struct xfs_bmbt_irec *PREV) { enum xfs_rmap_intent_type type = XFS_RMAP_UNMAP; + bool isrt = xfs_ifork_is_realtime(ip, whichfork); if (!xfs_rmap_update_is_needed(tp->t_mountp, whichfork)) return; @@ -2697,7 +2800,7 @@ xfs_rmap_unmap_extent( if (whichfork != XFS_ATTR_FORK && xfs_is_reflink_inode(ip)) type = XFS_RMAP_UNMAP_SHARED; - __xfs_rmap_add(tp, type, ip->i_ino, whichfork, PREV); + __xfs_rmap_add(tp, type, ip->i_ino, isrt, whichfork, PREV); } /* @@ -2715,6 +2818,7 @@ xfs_rmap_convert_extent( struct xfs_bmbt_irec *PREV) { enum xfs_rmap_intent_type type = XFS_RMAP_CONVERT; + bool isrt = xfs_ifork_is_realtime(ip, whichfork); if (!xfs_rmap_update_is_needed(mp, whichfork)) return; @@ -2722,15 +2826,15 @@ xfs_rmap_convert_extent( if (whichfork != XFS_ATTR_FORK && xfs_is_reflink_inode(ip)) type = XFS_RMAP_CONVERT_SHARED; - __xfs_rmap_add(tp, type, ip->i_ino, whichfork, PREV); + __xfs_rmap_add(tp, type, ip->i_ino, isrt, whichfork, PREV); } /* Schedule the creation of an rmap for non-file data. */ void xfs_rmap_alloc_extent( struct xfs_trans *tp, - xfs_agnumber_t agno, - xfs_agblock_t bno, + bool isrt, + xfs_fsblock_t fsbno, xfs_extlen_t len, uint64_t owner) { @@ -2739,20 +2843,20 @@ xfs_rmap_alloc_extent( if (!xfs_rmap_update_is_needed(tp->t_mountp, XFS_DATA_FORK)) return; - bmap.br_startblock = XFS_AGB_TO_FSB(tp->t_mountp, agno, bno); + bmap.br_startblock = fsbno; bmap.br_blockcount = len; bmap.br_startoff = 0; bmap.br_state = XFS_EXT_NORM; - __xfs_rmap_add(tp, XFS_RMAP_ALLOC, owner, XFS_DATA_FORK, &bmap); + __xfs_rmap_add(tp, XFS_RMAP_ALLOC, owner, isrt, XFS_DATA_FORK, &bmap); } /* Schedule the deletion of an rmap for non-file data. */ void xfs_rmap_free_extent( struct xfs_trans *tp, - xfs_agnumber_t agno, - xfs_agblock_t bno, + bool isrt, + xfs_fsblock_t fsbno, xfs_extlen_t len, uint64_t owner) { @@ -2761,12 +2865,12 @@ xfs_rmap_free_extent( if (!xfs_rmap_update_is_needed(tp->t_mountp, XFS_DATA_FORK)) return; - bmap.br_startblock = XFS_AGB_TO_FSB(tp->t_mountp, agno, bno); + bmap.br_startblock = fsbno; bmap.br_blockcount = len; bmap.br_startoff = 0; bmap.br_state = XFS_EXT_NORM; - __xfs_rmap_add(tp, XFS_RMAP_FREE, owner, XFS_DATA_FORK, &bmap); + __xfs_rmap_add(tp, XFS_RMAP_FREE, owner, isrt, XFS_DATA_FORK, &bmap); } /* Compare rmap records. Returns -1 if a < b, 1 if a > b, and 0 if equal. */ diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h index 96b4321d8310..5f39f6e53cd1 100644 --- a/fs/xfs/libxfs/xfs_rmap.h +++ b/fs/xfs/libxfs/xfs_rmap.h @@ -7,6 +7,7 @@ #define __XFS_RMAP_H__ struct xfs_perag; +struct xfs_rtgroup; static inline void xfs_rmap_ino_bmbt_owner( @@ -174,6 +175,7 @@ struct xfs_rmap_intent { uint64_t ri_owner; struct xfs_bmbt_irec ri_bmap; struct xfs_group *ri_group; + bool ri_realtime; }; /* functions for updating the rmapbt based on bmbt map/unmap operations */ @@ -184,10 +186,10 @@ void xfs_rmap_unmap_extent(struct xfs_trans *tp, struct xfs_inode *ip, void xfs_rmap_convert_extent(struct xfs_mount *mp, struct xfs_trans *tp, struct xfs_inode *ip, int whichfork, struct xfs_bmbt_irec *imap); -void xfs_rmap_alloc_extent(struct xfs_trans *tp, xfs_agnumber_t agno, - xfs_agblock_t bno, xfs_extlen_t len, uint64_t owner); -void xfs_rmap_free_extent(struct xfs_trans *tp, xfs_agnumber_t agno, - xfs_agblock_t bno, xfs_extlen_t len, uint64_t owner); +void xfs_rmap_alloc_extent(struct xfs_trans *tp, bool isrt, xfs_fsblock_t fsbno, + xfs_extlen_t len, uint64_t owner); +void xfs_rmap_free_extent(struct xfs_trans *tp, bool isrt, xfs_fsblock_t fsbno, + xfs_extlen_t len, uint64_t owner); int xfs_rmap_finish_one(struct xfs_trans *tp, struct xfs_rmap_intent *ri, struct xfs_btree_cur **pcur); @@ -206,6 +208,8 @@ xfs_failaddr_t xfs_rmap_btrec_to_irec(const union xfs_btree_rec *rec, struct xfs_rmap_irec *irec); xfs_failaddr_t xfs_rmap_check_irec(struct xfs_perag *pag, const struct xfs_rmap_irec *irec); +xfs_failaddr_t xfs_rtrmap_check_irec(struct xfs_rtgroup *rtg, + const struct xfs_rmap_irec *irec); int xfs_rmap_has_records(struct xfs_btree_cur *cur, xfs_agblock_t bno, xfs_extlen_t len, enum xbtree_recpacking *outcome); diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c index 4ddfb7e395b3..770adf60dd73 100644 --- a/fs/xfs/libxfs/xfs_rtbitmap.c +++ b/fs/xfs/libxfs/xfs_rtbitmap.c @@ -1055,7 +1055,7 @@ xfs_rtfree_extent( xfs_rtxlen_t len) /* length of extent freed */ { struct xfs_mount *mp = tp->t_mountp; - struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP]; + struct xfs_inode *rbmip = rtg_bitmap(rtg); struct xfs_rtalloc_args args = { .mp = mp, .tp = tp, diff --git a/fs/xfs/libxfs/xfs_rtbitmap.h b/fs/xfs/libxfs/xfs_rtbitmap.h index 16563a44bd13..22e5d9cd95f4 100644 --- a/fs/xfs/libxfs/xfs_rtbitmap.h +++ b/fs/xfs/libxfs/xfs_rtbitmap.h @@ -135,6 +135,15 @@ xfs_rtb_to_rtx( return div_u64(rtbno, mp->m_sb.sb_rextsize); } +/* Return the offset of a rtgroup block number within an rt extent. */ +static inline xfs_extlen_t +xfs_rgbno_to_rtxoff( + struct xfs_mount *mp, + xfs_rgblock_t rgbno) +{ + return rgbno % mp->m_sb.sb_rextsize; +} + /* Return the offset of an rt block number within an rt extent. */ static inline xfs_extlen_t xfs_rtb_to_rtxoff( diff --git a/fs/xfs/libxfs/xfs_rtgroup.c b/fs/xfs/libxfs/xfs_rtgroup.c index 4f3bfc884aff..d84d32f1b48f 100644 --- a/fs/xfs/libxfs/xfs_rtgroup.c +++ b/fs/xfs/libxfs/xfs_rtgroup.c @@ -33,6 +33,8 @@ #include "xfs_rtbitmap.h" #include "xfs_metafile.h" #include "xfs_metadir.h" +#include "xfs_rtrmap_btree.h" +#include "xfs_rtrefcount_btree.h" /* Find the first usable fsblock in this rtgroup. */ static inline uint32_t @@ -197,11 +199,17 @@ xfs_rtgroup_lock( * Lock both realtime free space metadata inodes for a freespace * update. */ - xfs_ilock(rtg->rtg_inodes[XFS_RTGI_BITMAP], XFS_ILOCK_EXCL); - xfs_ilock(rtg->rtg_inodes[XFS_RTGI_SUMMARY], XFS_ILOCK_EXCL); + xfs_ilock(rtg_bitmap(rtg), XFS_ILOCK_EXCL); + xfs_ilock(rtg_summary(rtg), XFS_ILOCK_EXCL); } else if (rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) { - xfs_ilock(rtg->rtg_inodes[XFS_RTGI_BITMAP], XFS_ILOCK_SHARED); + xfs_ilock(rtg_bitmap(rtg), XFS_ILOCK_SHARED); } + + if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg_rmap(rtg)) + xfs_ilock(rtg_rmap(rtg), XFS_ILOCK_EXCL); + + if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && rtg_refcount(rtg)) + xfs_ilock(rtg_refcount(rtg), XFS_ILOCK_EXCL); } /* Unlock metadata inodes associated with this rt group. */ @@ -214,11 +222,17 @@ xfs_rtgroup_unlock( ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) || !(rtglock_flags & XFS_RTGLOCK_BITMAP)); + if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && rtg_refcount(rtg)) + xfs_iunlock(rtg_refcount(rtg), XFS_ILOCK_EXCL); + + if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg_rmap(rtg)) + xfs_iunlock(rtg_rmap(rtg), XFS_ILOCK_EXCL); + if (rtglock_flags & XFS_RTGLOCK_BITMAP) { - xfs_iunlock(rtg->rtg_inodes[XFS_RTGI_SUMMARY], XFS_ILOCK_EXCL); - xfs_iunlock(rtg->rtg_inodes[XFS_RTGI_BITMAP], XFS_ILOCK_EXCL); + xfs_iunlock(rtg_summary(rtg), XFS_ILOCK_EXCL); + xfs_iunlock(rtg_bitmap(rtg), XFS_ILOCK_EXCL); } else if (rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED) { - xfs_iunlock(rtg->rtg_inodes[XFS_RTGI_BITMAP], XFS_ILOCK_SHARED); + xfs_iunlock(rtg_bitmap(rtg), XFS_ILOCK_SHARED); } } @@ -236,11 +250,15 @@ xfs_rtgroup_trans_join( ASSERT(!(rtglock_flags & XFS_RTGLOCK_BITMAP_SHARED)); if (rtglock_flags & XFS_RTGLOCK_BITMAP) { - xfs_trans_ijoin(tp, rtg->rtg_inodes[XFS_RTGI_BITMAP], - XFS_ILOCK_EXCL); - xfs_trans_ijoin(tp, rtg->rtg_inodes[XFS_RTGI_SUMMARY], - XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, rtg_bitmap(rtg), XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, rtg_summary(rtg), XFS_ILOCK_EXCL); } + + if ((rtglock_flags & XFS_RTGLOCK_RMAP) && rtg_rmap(rtg)) + xfs_trans_ijoin(tp, rtg_rmap(rtg), XFS_ILOCK_EXCL); + + if ((rtglock_flags & XFS_RTGLOCK_REFCOUNT) && rtg_refcount(rtg)) + xfs_trans_ijoin(tp, rtg_refcount(rtg), XFS_ILOCK_EXCL); } /* Retrieve rt group geometry. */ @@ -284,7 +302,8 @@ xfs_rtginode_ilock_print_fn( const struct xfs_inode *ip = container_of(m, struct xfs_inode, i_lock.dep_map); - printk(KERN_CONT " rgno=%u", ip->i_projid); + printk(KERN_CONT " rgno=%u metatype=%s", ip->i_projid, + xfs_metafile_type_str(ip->i_metatype)); } /* @@ -316,8 +335,10 @@ struct xfs_rtginode_ops { unsigned int sick; /* rtgroup sickness flag */ + unsigned int fmt_mask; /* all valid data fork formats */ + /* Does the fs have this feature? */ - bool (*enabled)(struct xfs_mount *mp); + bool (*enabled)(const struct xfs_mount *mp); /* Create this rtgroup metadata inode and initialize it. */ int (*create)(struct xfs_rtgroup *rtg, @@ -331,14 +352,40 @@ static const struct xfs_rtginode_ops xfs_rtginode_ops[XFS_RTGI_MAX] = { .name = "bitmap", .metafile_type = XFS_METAFILE_RTBITMAP, .sick = XFS_SICK_RG_BITMAP, + .fmt_mask = (1U << XFS_DINODE_FMT_EXTENTS) | + (1U << XFS_DINODE_FMT_BTREE), .create = xfs_rtbitmap_create, }, [XFS_RTGI_SUMMARY] = { .name = "summary", .metafile_type = XFS_METAFILE_RTSUMMARY, .sick = XFS_SICK_RG_SUMMARY, + .fmt_mask = (1U << XFS_DINODE_FMT_EXTENTS) | + (1U << XFS_DINODE_FMT_BTREE), .create = xfs_rtsummary_create, }, + [XFS_RTGI_RMAP] = { + .name = "rmap", + .metafile_type = XFS_METAFILE_RTRMAP, + .sick = XFS_SICK_RG_RMAPBT, + .fmt_mask = 1U << XFS_DINODE_FMT_META_BTREE, + /* + * growfs must create the rtrmap inodes before adding a + * realtime volume to the filesystem, so we cannot use the + * rtrmapbt predicate here. + */ + .enabled = xfs_has_rmapbt, + .create = xfs_rtrmapbt_create, + }, + [XFS_RTGI_REFCOUNT] = { + .name = "refcount", + .metafile_type = XFS_METAFILE_RTREFCOUNT, + .sick = XFS_SICK_RG_REFCNTBT, + .fmt_mask = 1U << XFS_DINODE_FMT_META_BTREE, + /* same comment about growfs and rmap inodes applies here */ + .enabled = xfs_has_reflink, + .create = xfs_rtrefcountbt_create, + }, }; /* Return the shortname of this rtgroup inode. */ @@ -435,8 +482,7 @@ xfs_rtginode_load( return error; } - if (XFS_IS_CORRUPT(mp, ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS && - ip->i_df.if_format != XFS_DINODE_FMT_BTREE)) { + if (XFS_IS_CORRUPT(mp, !((1U << ip->i_df.if_format) & ops->fmt_mask))) { xfs_irele(ip); xfs_rtginode_mark_sick(rtg, type); return -EFSCORRUPTED; diff --git a/fs/xfs/libxfs/xfs_rtgroup.h b/fs/xfs/libxfs/xfs_rtgroup.h index 2d7822644eff..03f39d4e43fc 100644 --- a/fs/xfs/libxfs/xfs_rtgroup.h +++ b/fs/xfs/libxfs/xfs_rtgroup.h @@ -14,6 +14,8 @@ struct xfs_trans; enum xfs_rtg_inodes { XFS_RTGI_BITMAP, /* allocation bitmap */ XFS_RTGI_SUMMARY, /* allocation summary */ + XFS_RTGI_RMAP, /* rmap btree inode */ + XFS_RTGI_REFCOUNT, /* refcount btree inode */ XFS_RTGI_MAX, }; @@ -64,6 +66,26 @@ static inline xfs_rgnumber_t rtg_rgno(const struct xfs_rtgroup *rtg) return rtg->rtg_group.xg_gno; } +static inline struct xfs_inode *rtg_bitmap(const struct xfs_rtgroup *rtg) +{ + return rtg->rtg_inodes[XFS_RTGI_BITMAP]; +} + +static inline struct xfs_inode *rtg_summary(const struct xfs_rtgroup *rtg) +{ + return rtg->rtg_inodes[XFS_RTGI_SUMMARY]; +} + +static inline struct xfs_inode *rtg_rmap(const struct xfs_rtgroup *rtg) +{ + return rtg->rtg_inodes[XFS_RTGI_RMAP]; +} + +static inline struct xfs_inode *rtg_refcount(const struct xfs_rtgroup *rtg) +{ + return rtg->rtg_inodes[XFS_RTGI_REFCOUNT]; +} + /* Passive rtgroup references */ static inline struct xfs_rtgroup * xfs_rtgroup_get( @@ -122,6 +144,32 @@ xfs_rtgroup_next( return xfs_rtgroup_next_range(mp, rtg, 0, mp->m_sb.sb_rgcount - 1); } +static inline bool +xfs_verify_rgbno( + struct xfs_rtgroup *rtg, + xfs_rgblock_t rgbno) +{ + ASSERT(xfs_has_rtgroups(rtg_mount(rtg))); + + return xfs_verify_gbno(rtg_group(rtg), rgbno); +} + +/* + * Check that [@rgbno,@len] is a valid extent range in @rtg. + * + * Must only be used for RTG-enabled file systems. + */ +static inline bool +xfs_verify_rgbext( + struct xfs_rtgroup *rtg, + xfs_rgblock_t rgbno, + xfs_extlen_t len) +{ + ASSERT(xfs_has_rtgroups(rtg_mount(rtg))); + + return xfs_verify_gbext(rtg_group(rtg), rgbno, len); +} + static inline xfs_rtblock_t xfs_rgbno_to_rtb( struct xfs_rtgroup *rtg, @@ -223,9 +271,15 @@ int xfs_update_last_rtgroup_size(struct xfs_mount *mp, #define XFS_RTGLOCK_BITMAP (1U << 0) /* Lock the rt bitmap inode in shared mode */ #define XFS_RTGLOCK_BITMAP_SHARED (1U << 1) +/* Lock the rt rmap inode in exclusive mode */ +#define XFS_RTGLOCK_RMAP (1U << 2) +/* Lock the rt refcount inode in exclusive mode */ +#define XFS_RTGLOCK_REFCOUNT (1U << 3) #define XFS_RTGLOCK_ALL_FLAGS (XFS_RTGLOCK_BITMAP | \ - XFS_RTGLOCK_BITMAP_SHARED) + XFS_RTGLOCK_BITMAP_SHARED | \ + XFS_RTGLOCK_RMAP | \ + XFS_RTGLOCK_REFCOUNT) void xfs_rtgroup_lock(struct xfs_rtgroup *rtg, unsigned int rtglock_flags); void xfs_rtgroup_unlock(struct xfs_rtgroup *rtg, unsigned int rtglock_flags); @@ -248,6 +302,8 @@ int xfs_rtginode_create(struct xfs_rtgroup *rtg, enum xfs_rtg_inodes type, bool init); void xfs_rtginode_irele(struct xfs_inode **ipp); +void xfs_rtginode_irele(struct xfs_inode **ipp); + static inline const char *xfs_rtginode_path(xfs_rgnumber_t rgno, enum xfs_rtg_inodes type) { diff --git a/fs/xfs/libxfs/xfs_rtrefcount_btree.c b/fs/xfs/libxfs/xfs_rtrefcount_btree.c new file mode 100644 index 000000000000..3db5e7a4a945 --- /dev/null +++ b/fs/xfs/libxfs/xfs_rtrefcount_btree.c @@ -0,0 +1,757 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2021-2024 Oracle. All Rights Reserved. + * Author: Darrick J. Wong <djwong@kernel.org> + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_log_format.h" +#include "xfs_trans_resv.h" +#include "xfs_bit.h" +#include "xfs_sb.h" +#include "xfs_mount.h" +#include "xfs_defer.h" +#include "xfs_inode.h" +#include "xfs_trans.h" +#include "xfs_alloc.h" +#include "xfs_btree.h" +#include "xfs_btree_staging.h" +#include "xfs_rtrefcount_btree.h" +#include "xfs_refcount.h" +#include "xfs_trace.h" +#include "xfs_cksum.h" +#include "xfs_error.h" +#include "xfs_extent_busy.h" +#include "xfs_rtgroup.h" +#include "xfs_rtbitmap.h" +#include "xfs_metafile.h" +#include "xfs_health.h" + +static struct kmem_cache *xfs_rtrefcountbt_cur_cache; + +/* + * Realtime Reference Count btree. + * + * This is a btree used to track the owner(s) of a given extent in the realtime + * device. See the comments in xfs_refcount_btree.c for more information. + * + * This tree is basically the same as the regular refcount btree except that + * it's rooted in an inode. + */ + +static struct xfs_btree_cur * +xfs_rtrefcountbt_dup_cursor( + struct xfs_btree_cur *cur) +{ + return xfs_rtrefcountbt_init_cursor(cur->bc_tp, to_rtg(cur->bc_group)); +} + +STATIC int +xfs_rtrefcountbt_get_minrecs( + struct xfs_btree_cur *cur, + int level) +{ + if (level == cur->bc_nlevels - 1) { + struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur); + + return xfs_rtrefcountbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes, + level == 0) / 2; + } + + return cur->bc_mp->m_rtrefc_mnr[level != 0]; +} + +STATIC int +xfs_rtrefcountbt_get_maxrecs( + struct xfs_btree_cur *cur, + int level) +{ + if (level == cur->bc_nlevels - 1) { + struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur); + + return xfs_rtrefcountbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes, + level == 0); + } + + return cur->bc_mp->m_rtrefc_mxr[level != 0]; +} + +/* + * Calculate number of records in a realtime refcount btree inode root. + */ +unsigned int +xfs_rtrefcountbt_droot_maxrecs( + unsigned int blocklen, + bool leaf) +{ + blocklen -= sizeof(struct xfs_rtrefcount_root); + + if (leaf) + return blocklen / sizeof(struct xfs_refcount_rec); + return blocklen / (2 * sizeof(struct xfs_refcount_key) + + sizeof(xfs_rtrefcount_ptr_t)); +} + +/* + * Get the maximum records we could store in the on-disk format. + * + * For non-root nodes this is equivalent to xfs_rtrefcountbt_get_maxrecs, but + * for the root node this checks the available space in the dinode fork so that + * we can resize the in-memory buffer to match it. After a resize to the + * maximum size this function returns the same value as + * xfs_rtrefcountbt_get_maxrecs for the root node, too. + */ +STATIC int +xfs_rtrefcountbt_get_dmaxrecs( + struct xfs_btree_cur *cur, + int level) +{ + if (level != cur->bc_nlevels - 1) + return cur->bc_mp->m_rtrefc_mxr[level != 0]; + return xfs_rtrefcountbt_droot_maxrecs(cur->bc_ino.forksize, level == 0); +} + +STATIC void +xfs_rtrefcountbt_init_key_from_rec( + union xfs_btree_key *key, + const union xfs_btree_rec *rec) +{ + key->refc.rc_startblock = rec->refc.rc_startblock; +} + +STATIC void +xfs_rtrefcountbt_init_high_key_from_rec( + union xfs_btree_key *key, + const union xfs_btree_rec *rec) +{ + __u32 x; + + x = be32_to_cpu(rec->refc.rc_startblock); + x += be32_to_cpu(rec->refc.rc_blockcount) - 1; + key->refc.rc_startblock = cpu_to_be32(x); +} + +STATIC void +xfs_rtrefcountbt_init_rec_from_cur( + struct xfs_btree_cur *cur, + union xfs_btree_rec *rec) +{ + const struct xfs_refcount_irec *irec = &cur->bc_rec.rc; + uint32_t start; + + start = xfs_refcount_encode_startblock(irec->rc_startblock, + irec->rc_domain); + rec->refc.rc_startblock = cpu_to_be32(start); + rec->refc.rc_blockcount = cpu_to_be32(cur->bc_rec.rc.rc_blockcount); + rec->refc.rc_refcount = cpu_to_be32(cur->bc_rec.rc.rc_refcount); +} + +STATIC void +xfs_rtrefcountbt_init_ptr_from_cur( + struct xfs_btree_cur *cur, + union xfs_btree_ptr *ptr) +{ + ptr->l = 0; +} + +STATIC int64_t +xfs_rtrefcountbt_key_diff( + struct xfs_btree_cur *cur, + const union xfs_btree_key *key) +{ + const struct xfs_refcount_key *kp = &key->refc; + const struct xfs_refcount_irec *irec = &cur->bc_rec.rc; + uint32_t start; + + start = xfs_refcount_encode_startblock(irec->rc_startblock, + irec->rc_domain); + return (int64_t)be32_to_cpu(kp->rc_startblock) - start; +} + +STATIC int64_t +xfs_rtrefcountbt_diff_two_keys( + struct xfs_btree_cur *cur, + const union xfs_btree_key *k1, + const union xfs_btree_key *k2, + const union xfs_btree_key *mask) +{ + ASSERT(!mask || mask->refc.rc_startblock); + + return (int64_t)be32_to_cpu(k1->refc.rc_startblock) - + be32_to_cpu(k2->refc.rc_startblock); +} + +static xfs_failaddr_t +xfs_rtrefcountbt_verify( + struct xfs_buf *bp) +{ + struct xfs_mount *mp = bp->b_target->bt_mount; + struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); + xfs_failaddr_t fa; + int level; + + if (!xfs_verify_magic(bp, block->bb_magic)) + return __this_address; + + if (!xfs_has_reflink(mp)) + return __this_address; + fa = xfs_btree_fsblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN); + if (fa) + return fa; + level = be16_to_cpu(block->bb_level); + if (level > mp->m_rtrefc_maxlevels) + return __this_address; + + return xfs_btree_fsblock_verify(bp, mp->m_rtrefc_mxr[level != 0]); +} + +static void +xfs_rtrefcountbt_read_verify( + struct xfs_buf *bp) +{ + xfs_failaddr_t fa; + + if (!xfs_btree_fsblock_verify_crc(bp)) + xfs_verifier_error(bp, -EFSBADCRC, __this_address); + else { + fa = xfs_rtrefcountbt_verify(bp); + if (fa) + xfs_verifier_error(bp, -EFSCORRUPTED, fa); + } + + if (bp->b_error) + trace_xfs_btree_corrupt(bp, _RET_IP_); +} + +static void +xfs_rtrefcountbt_write_verify( + struct xfs_buf *bp) +{ + xfs_failaddr_t fa; + + fa = xfs_rtrefcountbt_verify(bp); + if (fa) { + trace_xfs_btree_corrupt(bp, _RET_IP_); + xfs_verifier_error(bp, -EFSCORRUPTED, fa); + return; + } + xfs_btree_fsblock_calc_crc(bp); + +} + +const struct xfs_buf_ops xfs_rtrefcountbt_buf_ops = { + .name = "xfs_rtrefcountbt", + .magic = { 0, cpu_to_be32(XFS_RTREFC_CRC_MAGIC) }, + .verify_read = xfs_rtrefcountbt_read_verify, + .verify_write = xfs_rtrefcountbt_write_verify, + .verify_struct = xfs_rtrefcountbt_verify, +}; + +STATIC int +xfs_rtrefcountbt_keys_inorder( + struct xfs_btree_cur *cur, + const union xfs_btree_key *k1, + const union xfs_btree_key *k2) +{ + return be32_to_cpu(k1->refc.rc_startblock) < + be32_to_cpu(k2->refc.rc_startblock); +} + +STATIC int +xfs_rtrefcountbt_recs_inorder( + struct xfs_btree_cur *cur, + const union xfs_btree_rec *r1, + const union xfs_btree_rec *r2) +{ + return be32_to_cpu(r1->refc.rc_startblock) + + be32_to_cpu(r1->refc.rc_blockcount) <= + be32_to_cpu(r2->refc.rc_startblock); +} + +STATIC enum xbtree_key_contig +xfs_rtrefcountbt_keys_contiguous( + struct xfs_btree_cur *cur, + const union xfs_btree_key *key1, + const union xfs_btree_key *key2, + const union xfs_btree_key *mask) +{ + ASSERT(!mask || mask->refc.rc_startblock); + + return xbtree_key_contig(be32_to_cpu(key1->refc.rc_startblock), + be32_to_cpu(key2->refc.rc_startblock)); +} + +static inline void +xfs_rtrefcountbt_move_ptrs( + struct xfs_mount *mp, + struct xfs_btree_block *broot, + short old_size, + size_t new_size, + unsigned int numrecs) +{ + void *dptr; + void *sptr; + + sptr = xfs_rtrefcount_broot_ptr_addr(mp, broot, 1, old_size); + dptr = xfs_rtrefcount_broot_ptr_addr(mp, broot, 1, new_size); + memmove(dptr, sptr, numrecs * sizeof(xfs_rtrefcount_ptr_t)); +} + +static struct xfs_btree_block * +xfs_rtrefcountbt_broot_realloc( + struct xfs_btree_cur *cur, + unsigned int new_numrecs) +{ + struct xfs_mount *mp = cur->bc_mp; + struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur); + struct xfs_btree_block *broot; + unsigned int new_size; + unsigned int old_size = ifp->if_broot_bytes; + const unsigned int level = cur->bc_nlevels - 1; + + new_size = xfs_rtrefcount_broot_space_calc(mp, level, new_numrecs); + + /* Handle the nop case quietly. */ + if (new_size == old_size) + return ifp->if_broot; + + if (new_size > old_size) { + unsigned int old_numrecs; + + /* + * If there wasn't any memory allocated before, just allocate + * it now and get out. + */ + if (old_size == 0) + return xfs_broot_realloc(ifp, new_size); + + /* + * If there is already an existing if_broot, then we need to + * realloc it and possibly move the node block pointers because + * those are not butted up against the btree block header. + */ + old_numrecs = xfs_rtrefcountbt_maxrecs(mp, old_size, level); + broot = xfs_broot_realloc(ifp, new_size); + if (level > 0) + xfs_rtrefcountbt_move_ptrs(mp, broot, old_size, + new_size, old_numrecs); + goto out_broot; + } + + /* + * We're reducing numrecs. If we're going all the way to zero, just + * free the block. + */ + ASSERT(ifp->if_broot != NULL && old_size > 0); + if (new_size == 0) + return xfs_broot_realloc(ifp, 0); + + /* + * Shrink the btree root by possibly moving the rtrmapbt pointers, + * since they are not butted up against the btree block header. Then + * reallocate broot. + */ + if (level > 0) + xfs_rtrefcountbt_move_ptrs(mp, ifp->if_broot, old_size, + new_size, new_numrecs); + broot = xfs_broot_realloc(ifp, new_size); + +out_broot: + ASSERT(xfs_rtrefcount_droot_space(broot) <= + xfs_inode_fork_size(cur->bc_ino.ip, cur->bc_ino.whichfork)); + return broot; +} + +const struct xfs_btree_ops xfs_rtrefcountbt_ops = { + .name = "rtrefcount", + .type = XFS_BTREE_TYPE_INODE, + .geom_flags = XFS_BTGEO_IROOT_RECORDS, + + .rec_len = sizeof(struct xfs_refcount_rec), + .key_len = sizeof(struct xfs_refcount_key), + .ptr_len = XFS_BTREE_LONG_PTR_LEN, + + .lru_refs = XFS_REFC_BTREE_REF, + .statoff = XFS_STATS_CALC_INDEX(xs_rtrefcbt_2), + .sick_mask = XFS_SICK_RG_REFCNTBT, + + .dup_cursor = xfs_rtrefcountbt_dup_cursor, + .alloc_block = xfs_btree_alloc_metafile_block, + .free_block = xfs_btree_free_metafile_block, + .get_minrecs = xfs_rtrefcountbt_get_minrecs, + .get_maxrecs = xfs_rtrefcountbt_get_maxrecs, + .get_dmaxrecs = xfs_rtrefcountbt_get_dmaxrecs, + .init_key_from_rec = xfs_rtrefcountbt_init_key_from_rec, + .init_high_key_from_rec = xfs_rtrefcountbt_init_high_key_from_rec, + .init_rec_from_cur = xfs_rtrefcountbt_init_rec_from_cur, + .init_ptr_from_cur = xfs_rtrefcountbt_init_ptr_from_cur, + .key_diff = xfs_rtrefcountbt_key_diff, + .buf_ops = &xfs_rtrefcountbt_buf_ops, + .diff_two_keys = xfs_rtrefcountbt_diff_two_keys, + .keys_inorder = xfs_rtrefcountbt_keys_inorder, + .recs_inorder = xfs_rtrefcountbt_recs_inorder, + .keys_contiguous = xfs_rtrefcountbt_keys_contiguous, + .broot_realloc = xfs_rtrefcountbt_broot_realloc, +}; + +/* Allocate a new rt refcount btree cursor. */ +struct xfs_btree_cur * +xfs_rtrefcountbt_init_cursor( + struct xfs_trans *tp, + struct xfs_rtgroup *rtg) +{ + struct xfs_inode *ip = rtg_refcount(rtg); + struct xfs_mount *mp = rtg_mount(rtg); + struct xfs_btree_cur *cur; + + xfs_assert_ilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL); + + cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rtrefcountbt_ops, + mp->m_rtrefc_maxlevels, xfs_rtrefcountbt_cur_cache); + + cur->bc_ino.ip = ip; + cur->bc_refc.nr_ops = 0; + cur->bc_refc.shape_changes = 0; + cur->bc_group = xfs_group_hold(rtg_group(rtg)); + cur->bc_nlevels = be16_to_cpu(ip->i_df.if_broot->bb_level) + 1; + cur->bc_ino.forksize = xfs_inode_fork_size(ip, XFS_DATA_FORK); + cur->bc_ino.whichfork = XFS_DATA_FORK; + return cur; +} + +/* + * Install a new rt reverse mapping btree root. Caller is responsible for + * invalidating and freeing the old btree blocks. + */ +void +xfs_rtrefcountbt_commit_staged_btree( + struct xfs_btree_cur *cur, + struct xfs_trans *tp) +{ + struct xbtree_ifakeroot *ifake = cur->bc_ino.ifake; + struct xfs_ifork *ifp; + int flags = XFS_ILOG_CORE | XFS_ILOG_DBROOT; + + ASSERT(cur->bc_flags & XFS_BTREE_STAGING); + ASSERT(ifake->if_fork->if_format == XFS_DINODE_FMT_META_BTREE); + + /* + * Free any resources hanging off the real fork, then shallow-copy the + * staging fork's contents into the real fork to transfer everything + * we just built. + */ + ifp = xfs_ifork_ptr(cur->bc_ino.ip, XFS_DATA_FORK); + xfs_idestroy_fork(ifp); + memcpy(ifp, ifake->if_fork, sizeof(struct xfs_ifork)); + + cur->bc_ino.ip->i_projid = cur->bc_group->xg_gno; + xfs_trans_log_inode(tp, cur->bc_ino.ip, flags); + xfs_btree_commit_ifakeroot(cur, tp, XFS_DATA_FORK); +} + +/* Calculate number of records in a realtime refcount btree block. */ +static inline unsigned int +xfs_rtrefcountbt_block_maxrecs( + unsigned int blocklen, + bool leaf) +{ + + if (leaf) + return blocklen / sizeof(struct xfs_refcount_rec); + return blocklen / (sizeof(struct xfs_refcount_key) + + sizeof(xfs_rtrefcount_ptr_t)); +} + +/* + * Calculate number of records in an refcount btree block. + */ +unsigned int +xfs_rtrefcountbt_maxrecs( + struct xfs_mount *mp, + unsigned int blocklen, + bool leaf) +{ + blocklen -= XFS_RTREFCOUNT_BLOCK_LEN; + return xfs_rtrefcountbt_block_maxrecs(blocklen, leaf); +} + +/* Compute the max possible height for realtime refcount btrees. */ +unsigned int +xfs_rtrefcountbt_maxlevels_ondisk(void) +{ + unsigned int minrecs[2]; + unsigned int blocklen; + + blocklen = XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_LBLOCK_CRC_LEN; + + minrecs[0] = xfs_rtrefcountbt_block_maxrecs(blocklen, true) / 2; + minrecs[1] = xfs_rtrefcountbt_block_maxrecs(blocklen, false) / 2; + + /* We need at most one record for every block in an rt group. */ + return xfs_btree_compute_maxlevels(minrecs, XFS_MAX_RGBLOCKS); +} + +int __init +xfs_rtrefcountbt_init_cur_cache(void) +{ + xfs_rtrefcountbt_cur_cache = kmem_cache_create("xfs_rtrefcountbt_cur", + xfs_btree_cur_sizeof( + xfs_rtrefcountbt_maxlevels_ondisk()), + 0, 0, NULL); + + if (!xfs_rtrefcountbt_cur_cache) + return -ENOMEM; + return 0; +} + +void +xfs_rtrefcountbt_destroy_cur_cache(void) +{ + kmem_cache_destroy(xfs_rtrefcountbt_cur_cache); + xfs_rtrefcountbt_cur_cache = NULL; +} + +/* Compute the maximum height of a realtime refcount btree. */ +void +xfs_rtrefcountbt_compute_maxlevels( + struct xfs_mount *mp) +{ + unsigned int d_maxlevels, r_maxlevels; + + if (!xfs_has_rtreflink(mp)) { + mp->m_rtrefc_maxlevels = 0; + return; + } + + /* + * The realtime refcountbt lives on the data device, which means that + * its maximum height is constrained by the size of the data device and + * the height required to store one refcount record for each rtextent + * in an rt group. + */ + d_maxlevels = xfs_btree_space_to_height(mp->m_rtrefc_mnr, + mp->m_sb.sb_dblocks); + r_maxlevels = xfs_btree_compute_maxlevels(mp->m_rtrefc_mnr, + mp->m_sb.sb_rgextents); + + /* Add one level to handle the inode root level. */ + mp->m_rtrefc_maxlevels = min(d_maxlevels, r_maxlevels) + 1; +} + +/* Calculate the rtrefcount btree size for some records. */ +unsigned long long +xfs_rtrefcountbt_calc_size( + struct xfs_mount *mp, + unsigned long long len) +{ + return xfs_btree_calc_size(mp->m_rtrefc_mnr, len); +} + +/* + * Calculate the maximum refcount btree size. + */ +static unsigned long long +xfs_rtrefcountbt_max_size( + struct xfs_mount *mp, + xfs_rtblock_t rtblocks) +{ + /* Bail out if we're uninitialized, which can happen in mkfs. */ + if (mp->m_rtrefc_mxr[0] == 0) + return 0; + + return xfs_rtrefcountbt_calc_size(mp, rtblocks); +} + +/* + * Figure out how many blocks to reserve and how many are used by this btree. + * We need enough space to hold one record for every rt extent in the rtgroup. + */ +xfs_filblks_t +xfs_rtrefcountbt_calc_reserves( + struct xfs_mount *mp) +{ + if (!xfs_has_rtreflink(mp)) + return 0; + + return xfs_rtrefcountbt_max_size(mp, mp->m_sb.sb_rgextents); +} + +/* + * Convert on-disk form of btree root to in-memory form. + */ +STATIC void +xfs_rtrefcountbt_from_disk( + struct xfs_inode *ip, + struct xfs_rtrefcount_root *dblock, + int dblocklen, + struct xfs_btree_block *rblock) +{ + struct xfs_mount *mp = ip->i_mount; + struct xfs_refcount_key *fkp; + __be64 *fpp; + struct xfs_refcount_key *tkp; + __be64 *tpp; + struct xfs_refcount_rec *frp; + struct xfs_refcount_rec *trp; + unsigned int numrecs; + unsigned int maxrecs; + unsigned int rblocklen; + + rblocklen = xfs_rtrefcount_broot_space(mp, dblock); + + xfs_btree_init_block(mp, rblock, &xfs_rtrefcountbt_ops, 0, 0, + ip->i_ino); + + rblock->bb_level = dblock->bb_level; + rblock->bb_numrecs = dblock->bb_numrecs; + + if (be16_to_cpu(rblock->bb_level) > 0) { + maxrecs = xfs_rtrefcountbt_droot_maxrecs(dblocklen, false); + fkp = xfs_rtrefcount_droot_key_addr(dblock, 1); + tkp = xfs_rtrefcount_key_addr(rblock, 1); + fpp = xfs_rtrefcount_droot_ptr_addr(dblock, 1, maxrecs); + tpp = xfs_rtrefcount_broot_ptr_addr(mp, rblock, 1, rblocklen); + numrecs = be16_to_cpu(dblock->bb_numrecs); + memcpy(tkp, fkp, 2 * sizeof(*fkp) * numrecs); + memcpy(tpp, fpp, sizeof(*fpp) * numrecs); + } else { + frp = xfs_rtrefcount_droot_rec_addr(dblock, 1); + trp = xfs_rtrefcount_rec_addr(rblock, 1); + numrecs = be16_to_cpu(dblock->bb_numrecs); + memcpy(trp, frp, sizeof(*frp) * numrecs); + } +} + +/* Load a realtime reference count btree root in from disk. */ +int +xfs_iformat_rtrefcount( + struct xfs_inode *ip, + struct xfs_dinode *dip) +{ + struct xfs_mount *mp = ip->i_mount; + struct xfs_rtrefcount_root *dfp = XFS_DFORK_PTR(dip, XFS_DATA_FORK); + struct xfs_btree_block *broot; + unsigned int numrecs; + unsigned int level; + int dsize; + + /* + * growfs must create the rtrefcount inodes before adding a realtime + * volume to the filesystem, so we cannot use the rtrefcount predicate + * here. + */ + if (!xfs_has_reflink(ip->i_mount)) { + xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); + return -EFSCORRUPTED; + } + + dsize = XFS_DFORK_SIZE(dip, mp, XFS_DATA_FORK); + numrecs = be16_to_cpu(dfp->bb_numrecs); + level = be16_to_cpu(dfp->bb_level); + + if (level > mp->m_rtrefc_maxlevels || + xfs_rtrefcount_droot_space_calc(level, numrecs) > dsize) { + xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); + return -EFSCORRUPTED; + } + + broot = xfs_broot_alloc(xfs_ifork_ptr(ip, XFS_DATA_FORK), + xfs_rtrefcount_broot_space_calc(mp, level, numrecs)); + if (broot) + xfs_rtrefcountbt_from_disk(ip, dfp, dsize, broot); + return 0; +} + +/* + * Convert in-memory form of btree root to on-disk form. + */ +void +xfs_rtrefcountbt_to_disk( + struct xfs_mount *mp, + struct xfs_btree_block *rblock, + int rblocklen, + struct xfs_rtrefcount_root *dblock, + int dblocklen) +{ + struct xfs_refcount_key *fkp; + __be64 *fpp; + struct xfs_refcount_key *tkp; + __be64 *tpp; + struct xfs_refcount_rec *frp; + struct xfs_refcount_rec *trp; + unsigned int maxrecs; + unsigned int numrecs; + + ASSERT(rblock->bb_magic == cpu_to_be32(XFS_RTREFC_CRC_MAGIC)); + ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid)); + ASSERT(rblock->bb_u.l.bb_blkno == cpu_to_be64(XFS_BUF_DADDR_NULL)); + ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK)); + ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK)); + + dblock->bb_level = rblock->bb_level; + dblock->bb_numrecs = rblock->bb_numrecs; + + if (be16_to_cpu(rblock->bb_level) > 0) { + maxrecs = xfs_rtrefcountbt_droot_maxrecs(dblocklen, false); + fkp = xfs_rtrefcount_key_addr(rblock, 1); + tkp = xfs_rtrefcount_droot_key_addr(dblock, 1); + fpp = xfs_rtrefcount_broot_ptr_addr(mp, rblock, 1, rblocklen); + tpp = xfs_rtrefcount_droot_ptr_addr(dblock, 1, maxrecs); + numrecs = be16_to_cpu(rblock->bb_numrecs); + memcpy(tkp, fkp, 2 * sizeof(*fkp) * numrecs); + memcpy(tpp, fpp, sizeof(*fpp) * numrecs); + } else { + frp = xfs_rtrefcount_rec_addr(rblock, 1); + trp = xfs_rtrefcount_droot_rec_addr(dblock, 1); + numrecs = be16_to_cpu(rblock->bb_numrecs); + memcpy(trp, frp, sizeof(*frp) * numrecs); + } +} + +/* Flush a realtime reference count btree root out to disk. */ +void +xfs_iflush_rtrefcount( + struct xfs_inode *ip, + struct xfs_dinode *dip) +{ + struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK); + struct xfs_rtrefcount_root *dfp = XFS_DFORK_PTR(dip, XFS_DATA_FORK); + + ASSERT(ifp->if_broot != NULL); + ASSERT(ifp->if_broot_bytes > 0); + ASSERT(xfs_rtrefcount_droot_space(ifp->if_broot) <= + xfs_inode_fork_size(ip, XFS_DATA_FORK)); + xfs_rtrefcountbt_to_disk(ip->i_mount, ifp->if_broot, + ifp->if_broot_bytes, dfp, + XFS_DFORK_SIZE(dip, ip->i_mount, XFS_DATA_FORK)); +} + +/* + * Create a realtime refcount btree inode. + */ +int +xfs_rtrefcountbt_create( + struct xfs_rtgroup *rtg, + struct xfs_inode *ip, + struct xfs_trans *tp, + bool init) +{ + struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK); + struct xfs_mount *mp = ip->i_mount; + struct xfs_btree_block *broot; + + ifp->if_format = XFS_DINODE_FMT_META_BTREE; + ASSERT(ifp->if_broot_bytes == 0); + ASSERT(ifp->if_bytes == 0); + + /* Initialize the empty incore btree root. */ + broot = xfs_broot_realloc(ifp, + xfs_rtrefcount_broot_space_calc(mp, 0, 0)); + if (broot) + xfs_btree_init_block(mp, broot, &xfs_rtrefcountbt_ops, 0, 0, + ip->i_ino); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE | XFS_ILOG_DBROOT); + return 0; +} diff --git a/fs/xfs/libxfs/xfs_rtrefcount_btree.h b/fs/xfs/libxfs/xfs_rtrefcount_btree.h new file mode 100644 index 000000000000..a99b7a8aec86 --- /dev/null +++ b/fs/xfs/libxfs/xfs_rtrefcount_btree.h @@ -0,0 +1,189 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021-2024 Oracle. All Rights Reserved. + * Author: Darrick J. Wong <djwong@kernel.org> + */ +#ifndef __XFS_RTREFCOUNT_BTREE_H__ +#define __XFS_RTREFCOUNT_BTREE_H__ + +struct xfs_buf; +struct xfs_btree_cur; +struct xfs_mount; +struct xbtree_ifakeroot; +struct xfs_rtgroup; + +/* refcounts only exist on crc enabled filesystems */ +#define XFS_RTREFCOUNT_BLOCK_LEN XFS_BTREE_LBLOCK_CRC_LEN + +struct xfs_btree_cur *xfs_rtrefcountbt_init_cursor(struct xfs_trans *tp, + struct xfs_rtgroup *rtg); +struct xfs_btree_cur *xfs_rtrefcountbt_stage_cursor(struct xfs_mount *mp, + struct xfs_rtgroup *rtg, struct xfs_inode *ip, + struct xbtree_ifakeroot *ifake); +void xfs_rtrefcountbt_commit_staged_btree(struct xfs_btree_cur *cur, + struct xfs_trans *tp); +unsigned int xfs_rtrefcountbt_maxrecs(struct xfs_mount *mp, + unsigned int blocklen, bool leaf); +void xfs_rtrefcountbt_compute_maxlevels(struct xfs_mount *mp); +unsigned int xfs_rtrefcountbt_droot_maxrecs(unsigned int blocklen, bool leaf); + +/* + * Addresses of records, keys, and pointers within an incore rtrefcountbt block. + * + * (note that some of these may appear unused, but they are used in userspace) + */ +static inline struct xfs_refcount_rec * +xfs_rtrefcount_rec_addr( + struct xfs_btree_block *block, + unsigned int index) +{ + return (struct xfs_refcount_rec *) + ((char *)block + XFS_RTREFCOUNT_BLOCK_LEN + + (index - 1) * sizeof(struct xfs_refcount_rec)); +} + +static inline struct xfs_refcount_key * +xfs_rtrefcount_key_addr( + struct xfs_btree_block *block, + unsigned int index) +{ + return (struct xfs_refcount_key *) + ((char *)block + XFS_RTREFCOUNT_BLOCK_LEN + + (index - 1) * sizeof(struct xfs_refcount_key)); +} + +static inline xfs_rtrefcount_ptr_t * +xfs_rtrefcount_ptr_addr( + struct xfs_btree_block *block, + unsigned int index, + unsigned int maxrecs) +{ + return (xfs_rtrefcount_ptr_t *) + ((char *)block + XFS_RTREFCOUNT_BLOCK_LEN + + maxrecs * sizeof(struct xfs_refcount_key) + + (index - 1) * sizeof(xfs_rtrefcount_ptr_t)); +} + +unsigned int xfs_rtrefcountbt_maxlevels_ondisk(void); +int __init xfs_rtrefcountbt_init_cur_cache(void); +void xfs_rtrefcountbt_destroy_cur_cache(void); + +xfs_filblks_t xfs_rtrefcountbt_calc_reserves(struct xfs_mount *mp); +unsigned long long xfs_rtrefcountbt_calc_size(struct xfs_mount *mp, + unsigned long long len); + +/* Addresses of key, pointers, and records within an ondisk rtrefcount block. */ + +static inline struct xfs_refcount_rec * +xfs_rtrefcount_droot_rec_addr( + struct xfs_rtrefcount_root *block, + unsigned int index) +{ + return (struct xfs_refcount_rec *) + ((char *)(block + 1) + + (index - 1) * sizeof(struct xfs_refcount_rec)); +} + +static inline struct xfs_refcount_key * +xfs_rtrefcount_droot_key_addr( + struct xfs_rtrefcount_root *block, + unsigned int index) +{ + return (struct xfs_refcount_key *) + ((char *)(block + 1) + + (index - 1) * sizeof(struct xfs_refcount_key)); +} + +static inline xfs_rtrefcount_ptr_t * +xfs_rtrefcount_droot_ptr_addr( + struct xfs_rtrefcount_root *block, + unsigned int index, + unsigned int maxrecs) +{ + return (xfs_rtrefcount_ptr_t *) + ((char *)(block + 1) + + maxrecs * sizeof(struct xfs_refcount_key) + + (index - 1) * sizeof(xfs_rtrefcount_ptr_t)); +} + +/* + * Address of pointers within the incore btree root. + * + * These are to be used when we know the size of the block and + * we don't have a cursor. + */ +static inline xfs_rtrefcount_ptr_t * +xfs_rtrefcount_broot_ptr_addr( + struct xfs_mount *mp, + struct xfs_btree_block *bb, + unsigned int index, + unsigned int block_size) +{ + return xfs_rtrefcount_ptr_addr(bb, index, + xfs_rtrefcountbt_maxrecs(mp, block_size, false)); +} + +/* + * Compute the space required for the incore btree root containing the given + * number of records. + */ +static inline size_t +xfs_rtrefcount_broot_space_calc( + struct xfs_mount *mp, + unsigned int level, + unsigned int nrecs) +{ + size_t sz = XFS_RTREFCOUNT_BLOCK_LEN; + + if (level > 0) + return sz + nrecs * (sizeof(struct xfs_refcount_key) + + sizeof(xfs_rtrefcount_ptr_t)); + return sz + nrecs * sizeof(struct xfs_refcount_rec); +} + +/* + * Compute the space required for the incore btree root given the ondisk + * btree root block. + */ +static inline size_t +xfs_rtrefcount_broot_space(struct xfs_mount *mp, struct xfs_rtrefcount_root *bb) +{ + return xfs_rtrefcount_broot_space_calc(mp, be16_to_cpu(bb->bb_level), + be16_to_cpu(bb->bb_numrecs)); +} + +/* Compute the space required for the ondisk root block. */ +static inline size_t +xfs_rtrefcount_droot_space_calc( + unsigned int level, + unsigned int nrecs) +{ + size_t sz = sizeof(struct xfs_rtrefcount_root); + + if (level > 0) + return sz + nrecs * (sizeof(struct xfs_refcount_key) + + sizeof(xfs_rtrefcount_ptr_t)); + return sz + nrecs * sizeof(struct xfs_refcount_rec); +} + +/* + * Compute the space required for the ondisk root block given an incore root + * block. + */ +static inline size_t +xfs_rtrefcount_droot_space(struct xfs_btree_block *bb) +{ + return xfs_rtrefcount_droot_space_calc(be16_to_cpu(bb->bb_level), + be16_to_cpu(bb->bb_numrecs)); +} + +int xfs_iformat_rtrefcount(struct xfs_inode *ip, struct xfs_dinode *dip); +void xfs_rtrefcountbt_to_disk(struct xfs_mount *mp, + struct xfs_btree_block *rblock, int rblocklen, + struct xfs_rtrefcount_root *dblock, int dblocklen); +void xfs_iflush_rtrefcount(struct xfs_inode *ip, struct xfs_dinode *dip); + +int xfs_rtrefcountbt_create(struct xfs_rtgroup *rtg, struct xfs_inode *ip, + struct xfs_trans *tp, bool init); + +#endif /* __XFS_RTREFCOUNT_BTREE_H__ */ diff --git a/fs/xfs/libxfs/xfs_rtrmap_btree.c b/fs/xfs/libxfs/xfs_rtrmap_btree.c new file mode 100644 index 000000000000..e4ec36943cb7 --- /dev/null +++ b/fs/xfs/libxfs/xfs_rtrmap_btree.c @@ -0,0 +1,1035 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2018-2024 Oracle. All Rights Reserved. + * Author: Darrick J. Wong <djwong@kernel.org> + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_log_format.h" +#include "xfs_trans_resv.h" +#include "xfs_bit.h" +#include "xfs_sb.h" +#include "xfs_mount.h" +#include "xfs_defer.h" +#include "xfs_inode.h" +#include "xfs_trans.h" +#include "xfs_alloc.h" +#include "xfs_btree.h" +#include "xfs_btree_staging.h" +#include "xfs_metafile.h" +#include "xfs_rmap.h" +#include "xfs_rtrmap_btree.h" +#include "xfs_trace.h" +#include "xfs_cksum.h" +#include "xfs_error.h" +#include "xfs_extent_busy.h" +#include "xfs_rtgroup.h" +#include "xfs_bmap.h" +#include "xfs_health.h" +#include "xfs_buf_mem.h" +#include "xfs_btree_mem.h" + +static struct kmem_cache *xfs_rtrmapbt_cur_cache; + +/* + * Realtime Reverse Map btree. + * + * This is a btree used to track the owner(s) of a given extent in the realtime + * device. See the comments in xfs_rmap_btree.c for more information. + * + * This tree is basically the same as the regular rmap btree except that it + * is rooted in an inode and does not live in free space. + */ + +static struct xfs_btree_cur * +xfs_rtrmapbt_dup_cursor( + struct xfs_btree_cur *cur) +{ + return xfs_rtrmapbt_init_cursor(cur->bc_tp, to_rtg(cur->bc_group)); +} + +STATIC int +xfs_rtrmapbt_get_minrecs( + struct xfs_btree_cur *cur, + int level) +{ + if (level == cur->bc_nlevels - 1) { + struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur); + + return xfs_rtrmapbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes, + level == 0) / 2; + } + + return cur->bc_mp->m_rtrmap_mnr[level != 0]; +} + +STATIC int +xfs_rtrmapbt_get_maxrecs( + struct xfs_btree_cur *cur, + int level) +{ + if (level == cur->bc_nlevels - 1) { + struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur); + + return xfs_rtrmapbt_maxrecs(cur->bc_mp, ifp->if_broot_bytes, + level == 0); + } + + return cur->bc_mp->m_rtrmap_mxr[level != 0]; +} + +/* Calculate number of records in the ondisk realtime rmap btree inode root. */ +unsigned int +xfs_rtrmapbt_droot_maxrecs( + unsigned int blocklen, + bool leaf) +{ + blocklen -= sizeof(struct xfs_rtrmap_root); + + if (leaf) + return blocklen / sizeof(struct xfs_rmap_rec); + return blocklen / (2 * sizeof(struct xfs_rmap_key) + + sizeof(xfs_rtrmap_ptr_t)); +} + +/* + * Get the maximum records we could store in the on-disk format. + * + * For non-root nodes this is equivalent to xfs_rtrmapbt_get_maxrecs, but + * for the root node this checks the available space in the dinode fork + * so that we can resize the in-memory buffer to match it. After a + * resize to the maximum size this function returns the same value + * as xfs_rtrmapbt_get_maxrecs for the root node, too. + */ +STATIC int +xfs_rtrmapbt_get_dmaxrecs( + struct xfs_btree_cur *cur, + int level) +{ + if (level != cur->bc_nlevels - 1) + return cur->bc_mp->m_rtrmap_mxr[level != 0]; + return xfs_rtrmapbt_droot_maxrecs(cur->bc_ino.forksize, level == 0); +} + +/* + * Convert the ondisk record's offset field into the ondisk key's offset field. + * Fork and bmbt are significant parts of the rmap record key, but written + * status is merely a record attribute. + */ +static inline __be64 ondisk_rec_offset_to_key(const union xfs_btree_rec *rec) +{ + return rec->rmap.rm_offset & ~cpu_to_be64(XFS_RMAP_OFF_UNWRITTEN); +} + +STATIC void +xfs_rtrmapbt_init_key_from_rec( + union xfs_btree_key *key, + const union xfs_btree_rec *rec) +{ + key->rmap.rm_startblock = rec->rmap.rm_startblock; + key->rmap.rm_owner = rec->rmap.rm_owner; + key->rmap.rm_offset = ondisk_rec_offset_to_key(rec); +} + +STATIC void +xfs_rtrmapbt_init_high_key_from_rec( + union xfs_btree_key *key, + const union xfs_btree_rec *rec) +{ + uint64_t off; + int adj; + + adj = be32_to_cpu(rec->rmap.rm_blockcount) - 1; + + key->rmap.rm_startblock = rec->rmap.rm_startblock; + be32_add_cpu(&key->rmap.rm_startblock, adj); + key->rmap.rm_owner = rec->rmap.rm_owner; + key->rmap.rm_offset = ondisk_rec_offset_to_key(rec); + if (XFS_RMAP_NON_INODE_OWNER(be64_to_cpu(rec->rmap.rm_owner)) || + XFS_RMAP_IS_BMBT_BLOCK(be64_to_cpu(rec->rmap.rm_offset))) + return; + off = be64_to_cpu(key->rmap.rm_offset); + off = (XFS_RMAP_OFF(off) + adj) | (off & ~XFS_RMAP_OFF_MASK); + key->rmap.rm_offset = cpu_to_be64(off); +} + +STATIC void +xfs_rtrmapbt_init_rec_from_cur( + struct xfs_btree_cur *cur, + union xfs_btree_rec *rec) +{ + rec->rmap.rm_startblock = cpu_to_be32(cur->bc_rec.r.rm_startblock); + rec->rmap.rm_blockcount = cpu_to_be32(cur->bc_rec.r.rm_blockcount); + rec->rmap.rm_owner = cpu_to_be64(cur->bc_rec.r.rm_owner); + rec->rmap.rm_offset = cpu_to_be64( + xfs_rmap_irec_offset_pack(&cur->bc_rec.r)); +} + +STATIC void +xfs_rtrmapbt_init_ptr_from_cur( + struct xfs_btree_cur *cur, + union xfs_btree_ptr *ptr) +{ + ptr->l = 0; +} + +/* + * Mask the appropriate parts of the ondisk key field for a key comparison. + * Fork and bmbt are significant parts of the rmap record key, but written + * status is merely a record attribute. + */ +static inline uint64_t offset_keymask(uint64_t offset) +{ + return offset & ~XFS_RMAP_OFF_UNWRITTEN; +} + +STATIC int64_t +xfs_rtrmapbt_key_diff( + struct xfs_btree_cur *cur, + const union xfs_btree_key *key) +{ + struct xfs_rmap_irec *rec = &cur->bc_rec.r; + const struct xfs_rmap_key *kp = &key->rmap; + __u64 x, y; + int64_t d; + + d = (int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock; + if (d) + return d; + + x = be64_to_cpu(kp->rm_owner); + y = rec->rm_owner; + if (x > y) + return 1; + else if (y > x) + return -1; + + x = offset_keymask(be64_to_cpu(kp->rm_offset)); + y = offset_keymask(xfs_rmap_irec_offset_pack(rec)); + if (x > y) + return 1; + else if (y > x) + return -1; + return 0; +} + +STATIC int64_t +xfs_rtrmapbt_diff_two_keys( + struct xfs_btree_cur *cur, + const union xfs_btree_key *k1, + const union xfs_btree_key *k2, + const union xfs_btree_key *mask) +{ + const struct xfs_rmap_key *kp1 = &k1->rmap; + const struct xfs_rmap_key *kp2 = &k2->rmap; + int64_t d; + __u64 x, y; + + /* Doesn't make sense to mask off the physical space part */ + ASSERT(!mask || mask->rmap.rm_startblock); + + d = (int64_t)be32_to_cpu(kp1->rm_startblock) - + be32_to_cpu(kp2->rm_startblock); + if (d) + return d; + + if (!mask || mask->rmap.rm_owner) { + x = be64_to_cpu(kp1->rm_owner); + y = be64_to_cpu(kp2->rm_owner); + if (x > y) + return 1; + else if (y > x) + return -1; + } + + if (!mask || mask->rmap.rm_offset) { + /* Doesn't make sense to allow offset but not owner */ + ASSERT(!mask || mask->rmap.rm_owner); + + x = offset_keymask(be64_to_cpu(kp1->rm_offset)); + y = offset_keymask(be64_to_cpu(kp2->rm_offset)); + if (x > y) + return 1; + else if (y > x) + return -1; + } + + return 0; +} + +static xfs_failaddr_t +xfs_rtrmapbt_verify( + struct xfs_buf *bp) +{ + struct xfs_mount *mp = bp->b_target->bt_mount; + struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); + xfs_failaddr_t fa; + int level; + + if (!xfs_verify_magic(bp, block->bb_magic)) + return __this_address; + + if (!xfs_has_rmapbt(mp)) + return __this_address; + fa = xfs_btree_fsblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN); + if (fa) + return fa; + level = be16_to_cpu(block->bb_level); + if (level > mp->m_rtrmap_maxlevels) + return __this_address; + + return xfs_btree_fsblock_verify(bp, mp->m_rtrmap_mxr[level != 0]); +} + +static void +xfs_rtrmapbt_read_verify( + struct xfs_buf *bp) +{ + xfs_failaddr_t fa; + + if (!xfs_btree_fsblock_verify_crc(bp)) + xfs_verifier_error(bp, -EFSBADCRC, __this_address); + else { + fa = xfs_rtrmapbt_verify(bp); + if (fa) + xfs_verifier_error(bp, -EFSCORRUPTED, fa); + } + + if (bp->b_error) + trace_xfs_btree_corrupt(bp, _RET_IP_); +} + +static void +xfs_rtrmapbt_write_verify( + struct xfs_buf *bp) +{ + xfs_failaddr_t fa; + + fa = xfs_rtrmapbt_verify(bp); + if (fa) { + trace_xfs_btree_corrupt(bp, _RET_IP_); + xfs_verifier_error(bp, -EFSCORRUPTED, fa); + return; + } + xfs_btree_fsblock_calc_crc(bp); + +} + +const struct xfs_buf_ops xfs_rtrmapbt_buf_ops = { + .name = "xfs_rtrmapbt", + .magic = { 0, cpu_to_be32(XFS_RTRMAP_CRC_MAGIC) }, + .verify_read = xfs_rtrmapbt_read_verify, + .verify_write = xfs_rtrmapbt_write_verify, + .verify_struct = xfs_rtrmapbt_verify, +}; + +STATIC int +xfs_rtrmapbt_keys_inorder( + struct xfs_btree_cur *cur, + const union xfs_btree_key *k1, + const union xfs_btree_key *k2) +{ + uint32_t x; + uint32_t y; + uint64_t a; + uint64_t b; + + x = be32_to_cpu(k1->rmap.rm_startblock); + y = be32_to_cpu(k2->rmap.rm_startblock); + if (x < y) + return 1; + else if (x > y) + return 0; + a = be64_to_cpu(k1->rmap.rm_owner); + b = be64_to_cpu(k2->rmap.rm_owner); + if (a < b) + return 1; + else if (a > b) + return 0; + a = offset_keymask(be64_to_cpu(k1->rmap.rm_offset)); + b = offset_keymask(be64_to_cpu(k2->rmap.rm_offset)); + if (a <= b) + return 1; + return 0; +} + +STATIC int +xfs_rtrmapbt_recs_inorder( + struct xfs_btree_cur *cur, + const union xfs_btree_rec *r1, + const union xfs_btree_rec *r2) +{ + uint32_t x; + uint32_t y; + uint64_t a; + uint64_t b; + + x = be32_to_cpu(r1->rmap.rm_startblock); + y = be32_to_cpu(r2->rmap.rm_startblock); + if (x < y) + return 1; + else if (x > y) + return 0; + a = be64_to_cpu(r1->rmap.rm_owner); + b = be64_to_cpu(r2->rmap.rm_owner); + if (a < b) + return 1; + else if (a > b) + return 0; + a = offset_keymask(be64_to_cpu(r1->rmap.rm_offset)); + b = offset_keymask(be64_to_cpu(r2->rmap.rm_offset)); + if (a <= b) + return 1; + return 0; +} + +STATIC enum xbtree_key_contig +xfs_rtrmapbt_keys_contiguous( + struct xfs_btree_cur *cur, + const union xfs_btree_key *key1, + const union xfs_btree_key *key2, + const union xfs_btree_key *mask) +{ + ASSERT(!mask || mask->rmap.rm_startblock); + + /* + * We only support checking contiguity of the physical space component. + * If any callers ever need more specificity than that, they'll have to + * implement it here. + */ + ASSERT(!mask || (!mask->rmap.rm_owner && !mask->rmap.rm_offset)); + + return xbtree_key_contig(be32_to_cpu(key1->rmap.rm_startblock), + be32_to_cpu(key2->rmap.rm_startblock)); +} + +static inline void +xfs_rtrmapbt_move_ptrs( + struct xfs_mount *mp, + struct xfs_btree_block *broot, + short old_size, + size_t new_size, + unsigned int numrecs) +{ + void *dptr; + void *sptr; + + sptr = xfs_rtrmap_broot_ptr_addr(mp, broot, 1, old_size); + dptr = xfs_rtrmap_broot_ptr_addr(mp, broot, 1, new_size); + memmove(dptr, sptr, numrecs * sizeof(xfs_rtrmap_ptr_t)); +} + +static struct xfs_btree_block * +xfs_rtrmapbt_broot_realloc( + struct xfs_btree_cur *cur, + unsigned int new_numrecs) +{ + struct xfs_mount *mp = cur->bc_mp; + struct xfs_ifork *ifp = xfs_btree_ifork_ptr(cur); + struct xfs_btree_block *broot; + unsigned int new_size; + unsigned int old_size = ifp->if_broot_bytes; + const unsigned int level = cur->bc_nlevels - 1; + + new_size = xfs_rtrmap_broot_space_calc(mp, level, new_numrecs); + + /* Handle the nop case quietly. */ + if (new_size == old_size) + return ifp->if_broot; + + if (new_size > old_size) { + unsigned int old_numrecs; + + /* + * If there wasn't any memory allocated before, just allocate + * it now and get out. + */ + if (old_size == 0) + return xfs_broot_realloc(ifp, new_size); + + /* + * If there is already an existing if_broot, then we need to + * realloc it and possibly move the node block pointers because + * those are not butted up against the btree block header. + */ + old_numrecs = xfs_rtrmapbt_maxrecs(mp, old_size, level == 0); + broot = xfs_broot_realloc(ifp, new_size); + if (level > 0) + xfs_rtrmapbt_move_ptrs(mp, broot, old_size, new_size, + old_numrecs); + goto out_broot; + } + + /* + * We're reducing numrecs. If we're going all the way to zero, just + * free the block. + */ + ASSERT(ifp->if_broot != NULL && old_size > 0); + if (new_size == 0) + return xfs_broot_realloc(ifp, 0); + + /* + * Shrink the btree root by possibly moving the rtrmapbt pointers, + * since they are not butted up against the btree block header. Then + * reallocate broot. + */ + if (level > 0) + xfs_rtrmapbt_move_ptrs(mp, ifp->if_broot, old_size, new_size, + new_numrecs); + broot = xfs_broot_realloc(ifp, new_size); + +out_broot: + ASSERT(xfs_rtrmap_droot_space(broot) <= + xfs_inode_fork_size(cur->bc_ino.ip, cur->bc_ino.whichfork)); + return broot; +} + +const struct xfs_btree_ops xfs_rtrmapbt_ops = { + .name = "rtrmap", + .type = XFS_BTREE_TYPE_INODE, + .geom_flags = XFS_BTGEO_OVERLAPPING | + XFS_BTGEO_IROOT_RECORDS, + + .rec_len = sizeof(struct xfs_rmap_rec), + /* Overlapping btree; 2 keys per pointer. */ + .key_len = 2 * sizeof(struct xfs_rmap_key), + .ptr_len = XFS_BTREE_LONG_PTR_LEN, + + .lru_refs = XFS_RMAP_BTREE_REF, + .statoff = XFS_STATS_CALC_INDEX(xs_rtrmap_2), + .sick_mask = XFS_SICK_RG_RMAPBT, + + .dup_cursor = xfs_rtrmapbt_dup_cursor, + .alloc_block = xfs_btree_alloc_metafile_block, + .free_block = xfs_btree_free_metafile_block, + .get_minrecs = xfs_rtrmapbt_get_minrecs, + .get_maxrecs = xfs_rtrmapbt_get_maxrecs, + .get_dmaxrecs = xfs_rtrmapbt_get_dmaxrecs, + .init_key_from_rec = xfs_rtrmapbt_init_key_from_rec, + .init_high_key_from_rec = xfs_rtrmapbt_init_high_key_from_rec, + .init_rec_from_cur = xfs_rtrmapbt_init_rec_from_cur, + .init_ptr_from_cur = xfs_rtrmapbt_init_ptr_from_cur, + .key_diff = xfs_rtrmapbt_key_diff, + .buf_ops = &xfs_rtrmapbt_buf_ops, + .diff_two_keys = xfs_rtrmapbt_diff_two_keys, + .keys_inorder = xfs_rtrmapbt_keys_inorder, + .recs_inorder = xfs_rtrmapbt_recs_inorder, + .keys_contiguous = xfs_rtrmapbt_keys_contiguous, + .broot_realloc = xfs_rtrmapbt_broot_realloc, +}; + +/* Allocate a new rt rmap btree cursor. */ +struct xfs_btree_cur * +xfs_rtrmapbt_init_cursor( + struct xfs_trans *tp, + struct xfs_rtgroup *rtg) +{ + struct xfs_inode *ip = rtg_rmap(rtg); + struct xfs_mount *mp = rtg_mount(rtg); + struct xfs_btree_cur *cur; + + xfs_assert_ilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL); + + cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rtrmapbt_ops, + mp->m_rtrmap_maxlevels, xfs_rtrmapbt_cur_cache); + + cur->bc_ino.ip = ip; + cur->bc_group = xfs_group_hold(rtg_group(rtg)); + cur->bc_ino.whichfork = XFS_DATA_FORK; + cur->bc_nlevels = be16_to_cpu(ip->i_df.if_broot->bb_level) + 1; + cur->bc_ino.forksize = xfs_inode_fork_size(ip, XFS_DATA_FORK); + + return cur; +} + +#ifdef CONFIG_XFS_BTREE_IN_MEM +/* + * Validate an in-memory realtime rmap btree block. Callers are allowed to + * generate an in-memory btree even if the ondisk feature is not enabled. + */ +static xfs_failaddr_t +xfs_rtrmapbt_mem_verify( + struct xfs_buf *bp) +{ + struct xfs_mount *mp = bp->b_mount; + struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp); + xfs_failaddr_t fa; + unsigned int level; + unsigned int maxrecs; + + if (!xfs_verify_magic(bp, block->bb_magic)) + return __this_address; + + fa = xfs_btree_fsblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN); + if (fa) + return fa; + + level = be16_to_cpu(block->bb_level); + if (xfs_has_rmapbt(mp)) { + if (level >= mp->m_rtrmap_maxlevels) + return __this_address; + } else { + if (level >= xfs_rtrmapbt_maxlevels_ondisk()) + return __this_address; + } + + maxrecs = xfs_rtrmapbt_maxrecs(mp, XFBNO_BLOCKSIZE, level == 0); + return xfs_btree_memblock_verify(bp, maxrecs); +} + +static void +xfs_rtrmapbt_mem_rw_verify( + struct xfs_buf *bp) +{ + xfs_failaddr_t fa = xfs_rtrmapbt_mem_verify(bp); + + if (fa) + xfs_verifier_error(bp, -EFSCORRUPTED, fa); +} + +/* skip crc checks on in-memory btrees to save time */ +static const struct xfs_buf_ops xfs_rtrmapbt_mem_buf_ops = { + .name = "xfs_rtrmapbt_mem", + .magic = { 0, cpu_to_be32(XFS_RTRMAP_CRC_MAGIC) }, + .verify_read = xfs_rtrmapbt_mem_rw_verify, + .verify_write = xfs_rtrmapbt_mem_rw_verify, + .verify_struct = xfs_rtrmapbt_mem_verify, +}; + +const struct xfs_btree_ops xfs_rtrmapbt_mem_ops = { + .type = XFS_BTREE_TYPE_MEM, + .geom_flags = XFS_BTGEO_OVERLAPPING, + + .rec_len = sizeof(struct xfs_rmap_rec), + /* Overlapping btree; 2 keys per pointer. */ + .key_len = 2 * sizeof(struct xfs_rmap_key), + .ptr_len = XFS_BTREE_LONG_PTR_LEN, + + .lru_refs = XFS_RMAP_BTREE_REF, + .statoff = XFS_STATS_CALC_INDEX(xs_rtrmap_mem_2), + + .dup_cursor = xfbtree_dup_cursor, + .set_root = xfbtree_set_root, + .alloc_block = xfbtree_alloc_block, + .free_block = xfbtree_free_block, + .get_minrecs = xfbtree_get_minrecs, + .get_maxrecs = xfbtree_get_maxrecs, + .init_key_from_rec = xfs_rtrmapbt_init_key_from_rec, + .init_high_key_from_rec = xfs_rtrmapbt_init_high_key_from_rec, + .init_rec_from_cur = xfs_rtrmapbt_init_rec_from_cur, + .init_ptr_from_cur = xfbtree_init_ptr_from_cur, + .key_diff = xfs_rtrmapbt_key_diff, + .buf_ops = &xfs_rtrmapbt_mem_buf_ops, + .diff_two_keys = xfs_rtrmapbt_diff_two_keys, + .keys_inorder = xfs_rtrmapbt_keys_inorder, + .recs_inorder = xfs_rtrmapbt_recs_inorder, + .keys_contiguous = xfs_rtrmapbt_keys_contiguous, +}; + +/* Create a cursor for an in-memory btree. */ +struct xfs_btree_cur * +xfs_rtrmapbt_mem_cursor( + struct xfs_rtgroup *rtg, + struct xfs_trans *tp, + struct xfbtree *xfbt) +{ + struct xfs_mount *mp = rtg_mount(rtg); + struct xfs_btree_cur *cur; + + cur = xfs_btree_alloc_cursor(mp, tp, &xfs_rtrmapbt_mem_ops, + mp->m_rtrmap_maxlevels, xfs_rtrmapbt_cur_cache); + cur->bc_mem.xfbtree = xfbt; + cur->bc_nlevels = xfbt->nlevels; + cur->bc_group = xfs_group_hold(rtg_group(rtg)); + return cur; +} + +/* Create an in-memory realtime rmap btree. */ +int +xfs_rtrmapbt_mem_init( + struct xfs_mount *mp, + struct xfbtree *xfbt, + struct xfs_buftarg *btp, + xfs_rgnumber_t rgno) +{ + xfbt->owner = rgno; + return xfbtree_init(mp, xfbt, btp, &xfs_rtrmapbt_mem_ops); +} +#endif /* CONFIG_XFS_BTREE_IN_MEM */ + +/* + * Install a new rt reverse mapping btree root. Caller is responsible for + * invalidating and freeing the old btree blocks. + */ +void +xfs_rtrmapbt_commit_staged_btree( + struct xfs_btree_cur *cur, + struct xfs_trans *tp) +{ + struct xbtree_ifakeroot *ifake = cur->bc_ino.ifake; + struct xfs_ifork *ifp; + int flags = XFS_ILOG_CORE | XFS_ILOG_DBROOT; + + ASSERT(cur->bc_flags & XFS_BTREE_STAGING); + ASSERT(ifake->if_fork->if_format == XFS_DINODE_FMT_META_BTREE); + + /* + * Free any resources hanging off the real fork, then shallow-copy the + * staging fork's contents into the real fork to transfer everything + * we just built. + */ + ifp = xfs_ifork_ptr(cur->bc_ino.ip, XFS_DATA_FORK); + xfs_idestroy_fork(ifp); + memcpy(ifp, ifake->if_fork, sizeof(struct xfs_ifork)); + + cur->bc_ino.ip->i_projid = cur->bc_group->xg_gno; + xfs_trans_log_inode(tp, cur->bc_ino.ip, flags); + xfs_btree_commit_ifakeroot(cur, tp, XFS_DATA_FORK); +} + +/* Calculate number of records in a rt reverse mapping btree block. */ +static inline unsigned int +xfs_rtrmapbt_block_maxrecs( + unsigned int blocklen, + bool leaf) +{ + if (leaf) + return blocklen / sizeof(struct xfs_rmap_rec); + return blocklen / + (2 * sizeof(struct xfs_rmap_key) + sizeof(xfs_rtrmap_ptr_t)); +} + +/* + * Calculate number of records in an rt reverse mapping btree block. + */ +unsigned int +xfs_rtrmapbt_maxrecs( + struct xfs_mount *mp, + unsigned int blocklen, + bool leaf) +{ + blocklen -= XFS_RTRMAP_BLOCK_LEN; + return xfs_rtrmapbt_block_maxrecs(blocklen, leaf); +} + +/* Compute the max possible height for realtime reverse mapping btrees. */ +unsigned int +xfs_rtrmapbt_maxlevels_ondisk(void) +{ + unsigned long long max_dblocks; + unsigned int minrecs[2]; + unsigned int blocklen; + + blocklen = XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_LBLOCK_CRC_LEN; + + minrecs[0] = xfs_rtrmapbt_block_maxrecs(blocklen, true) / 2; + minrecs[1] = xfs_rtrmapbt_block_maxrecs(blocklen, false) / 2; + + /* + * Compute the asymptotic maxlevels for an rtrmapbt on any rtreflink fs. + * + * On a reflink filesystem, each block in an rtgroup can have up to + * 2^32 (per the refcount record format) owners, which means that + * theoretically we could face up to 2^64 rmap records. However, we're + * likely to run out of blocks in the data device long before that + * happens, which means that we must compute the max height based on + * what the btree will look like if it consumes almost all the blocks + * in the data device due to maximal sharing factor. + */ + max_dblocks = -1U; /* max ag count */ + max_dblocks *= XFS_MAX_CRC_AG_BLOCKS; + return xfs_btree_space_to_height(minrecs, max_dblocks); +} + +int __init +xfs_rtrmapbt_init_cur_cache(void) +{ + xfs_rtrmapbt_cur_cache = kmem_cache_create("xfs_rtrmapbt_cur", + xfs_btree_cur_sizeof(xfs_rtrmapbt_maxlevels_ondisk()), + 0, 0, NULL); + + if (!xfs_rtrmapbt_cur_cache) + return -ENOMEM; + return 0; +} + +void +xfs_rtrmapbt_destroy_cur_cache(void) +{ + kmem_cache_destroy(xfs_rtrmapbt_cur_cache); + xfs_rtrmapbt_cur_cache = NULL; +} + +/* Compute the maximum height of an rt reverse mapping btree. */ +void +xfs_rtrmapbt_compute_maxlevels( + struct xfs_mount *mp) +{ + unsigned int d_maxlevels, r_maxlevels; + + if (!xfs_has_rtrmapbt(mp)) { + mp->m_rtrmap_maxlevels = 0; + return; + } + + /* + * The realtime rmapbt lives on the data device, which means that its + * maximum height is constrained by the size of the data device and + * the height required to store one rmap record for each block in an + * rt group. + * + * On a reflink filesystem, each rt block can have up to 2^32 (per the + * refcount record format) owners, which means that theoretically we + * could face up to 2^64 rmap records. This makes the computation of + * maxlevels based on record count meaningless, so we only consider the + * size of the data device. + */ + d_maxlevels = xfs_btree_space_to_height(mp->m_rtrmap_mnr, + mp->m_sb.sb_dblocks); + if (xfs_has_rtreflink(mp)) { + mp->m_rtrmap_maxlevels = d_maxlevels + 1; + return; + } + + r_maxlevels = xfs_btree_compute_maxlevels(mp->m_rtrmap_mnr, + mp->m_groups[XG_TYPE_RTG].blocks); + + /* Add one level to handle the inode root level. */ + mp->m_rtrmap_maxlevels = min(d_maxlevels, r_maxlevels) + 1; +} + +/* Calculate the rtrmap btree size for some records. */ +unsigned long long +xfs_rtrmapbt_calc_size( + struct xfs_mount *mp, + unsigned long long len) +{ + return xfs_btree_calc_size(mp->m_rtrmap_mnr, len); +} + +/* + * Calculate the maximum rmap btree size. + */ +static unsigned long long +xfs_rtrmapbt_max_size( + struct xfs_mount *mp, + xfs_rtblock_t rtblocks) +{ + /* Bail out if we're uninitialized, which can happen in mkfs. */ + if (mp->m_rtrmap_mxr[0] == 0) + return 0; + + return xfs_rtrmapbt_calc_size(mp, rtblocks); +} + +/* + * Figure out how many blocks to reserve and how many are used by this btree. + */ +xfs_filblks_t +xfs_rtrmapbt_calc_reserves( + struct xfs_mount *mp) +{ + uint32_t blocks = mp->m_groups[XG_TYPE_RTG].blocks; + + if (!xfs_has_rtrmapbt(mp)) + return 0; + + /* Reserve 1% of the rtgroup or enough for 1 block per record. */ + return max_t(xfs_filblks_t, blocks / 100, + xfs_rtrmapbt_max_size(mp, blocks)); +} + +/* Convert on-disk form of btree root to in-memory form. */ +STATIC void +xfs_rtrmapbt_from_disk( + struct xfs_inode *ip, + struct xfs_rtrmap_root *dblock, + unsigned int dblocklen, + struct xfs_btree_block *rblock) +{ + struct xfs_mount *mp = ip->i_mount; + struct xfs_rmap_key *fkp; + __be64 *fpp; + struct xfs_rmap_key *tkp; + __be64 *tpp; + struct xfs_rmap_rec *frp; + struct xfs_rmap_rec *trp; + unsigned int rblocklen = xfs_rtrmap_broot_space(mp, dblock); + unsigned int numrecs; + unsigned int maxrecs; + + xfs_btree_init_block(mp, rblock, &xfs_rtrmapbt_ops, 0, 0, ip->i_ino); + + rblock->bb_level = dblock->bb_level; + rblock->bb_numrecs = dblock->bb_numrecs; + numrecs = be16_to_cpu(dblock->bb_numrecs); + + if (be16_to_cpu(rblock->bb_level) > 0) { + maxrecs = xfs_rtrmapbt_droot_maxrecs(dblocklen, false); + fkp = xfs_rtrmap_droot_key_addr(dblock, 1); + tkp = xfs_rtrmap_key_addr(rblock, 1); + fpp = xfs_rtrmap_droot_ptr_addr(dblock, 1, maxrecs); + tpp = xfs_rtrmap_broot_ptr_addr(mp, rblock, 1, rblocklen); + memcpy(tkp, fkp, 2 * sizeof(*fkp) * numrecs); + memcpy(tpp, fpp, sizeof(*fpp) * numrecs); + } else { + frp = xfs_rtrmap_droot_rec_addr(dblock, 1); + trp = xfs_rtrmap_rec_addr(rblock, 1); + memcpy(trp, frp, sizeof(*frp) * numrecs); + } +} + +/* Load a realtime reverse mapping btree root in from disk. */ +int +xfs_iformat_rtrmap( + struct xfs_inode *ip, + struct xfs_dinode *dip) +{ + struct xfs_mount *mp = ip->i_mount; + struct xfs_rtrmap_root *dfp = XFS_DFORK_PTR(dip, XFS_DATA_FORK); + struct xfs_btree_block *broot; + unsigned int numrecs; + unsigned int level; + int dsize; + + /* + * growfs must create the rtrmap inodes before adding a realtime volume + * to the filesystem, so we cannot use the rtrmapbt predicate here. + */ + if (!xfs_has_rmapbt(ip->i_mount)) { + xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); + return -EFSCORRUPTED; + } + + dsize = XFS_DFORK_SIZE(dip, mp, XFS_DATA_FORK); + numrecs = be16_to_cpu(dfp->bb_numrecs); + level = be16_to_cpu(dfp->bb_level); + + if (level > mp->m_rtrmap_maxlevels || + xfs_rtrmap_droot_space_calc(level, numrecs) > dsize) { + xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); + return -EFSCORRUPTED; + } + + broot = xfs_broot_alloc(xfs_ifork_ptr(ip, XFS_DATA_FORK), + xfs_rtrmap_broot_space_calc(mp, level, numrecs)); + if (broot) + xfs_rtrmapbt_from_disk(ip, dfp, dsize, broot); + return 0; +} + +/* Convert in-memory form of btree root to on-disk form. */ +void +xfs_rtrmapbt_to_disk( + struct xfs_mount *mp, + struct xfs_btree_block *rblock, + unsigned int rblocklen, + struct xfs_rtrmap_root *dblock, + unsigned int dblocklen) +{ + struct xfs_rmap_key *fkp; + __be64 *fpp; + struct xfs_rmap_key *tkp; + __be64 *tpp; + struct xfs_rmap_rec *frp; + struct xfs_rmap_rec *trp; + unsigned int numrecs; + unsigned int maxrecs; + + ASSERT(rblock->bb_magic == cpu_to_be32(XFS_RTRMAP_CRC_MAGIC)); + ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid, &mp->m_sb.sb_meta_uuid)); + ASSERT(rblock->bb_u.l.bb_blkno == cpu_to_be64(XFS_BUF_DADDR_NULL)); + ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK)); + ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK)); + + dblock->bb_level = rblock->bb_level; + dblock->bb_numrecs = rblock->bb_numrecs; + numrecs = be16_to_cpu(rblock->bb_numrecs); + + if (be16_to_cpu(rblock->bb_level) > 0) { + maxrecs = xfs_rtrmapbt_droot_maxrecs(dblocklen, false); + fkp = xfs_rtrmap_key_addr(rblock, 1); + tkp = xfs_rtrmap_droot_key_addr(dblock, 1); + fpp = xfs_rtrmap_broot_ptr_addr(mp, rblock, 1, rblocklen); + tpp = xfs_rtrmap_droot_ptr_addr(dblock, 1, maxrecs); + memcpy(tkp, fkp, 2 * sizeof(*fkp) * numrecs); + memcpy(tpp, fpp, sizeof(*fpp) * numrecs); + } else { + frp = xfs_rtrmap_rec_addr(rblock, 1); + trp = xfs_rtrmap_droot_rec_addr(dblock, 1); + memcpy(trp, frp, sizeof(*frp) * numrecs); + } +} + +/* Flush a realtime reverse mapping btree root out to disk. */ +void +xfs_iflush_rtrmap( + struct xfs_inode *ip, + struct xfs_dinode *dip) +{ + struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK); + struct xfs_rtrmap_root *dfp = XFS_DFORK_PTR(dip, XFS_DATA_FORK); + + ASSERT(ifp->if_broot != NULL); + ASSERT(ifp->if_broot_bytes > 0); + ASSERT(xfs_rtrmap_droot_space(ifp->if_broot) <= + xfs_inode_fork_size(ip, XFS_DATA_FORK)); + xfs_rtrmapbt_to_disk(ip->i_mount, ifp->if_broot, ifp->if_broot_bytes, + dfp, XFS_DFORK_SIZE(dip, ip->i_mount, XFS_DATA_FORK)); +} + +/* + * Create a realtime rmap btree inode. + */ +int +xfs_rtrmapbt_create( + struct xfs_rtgroup *rtg, + struct xfs_inode *ip, + struct xfs_trans *tp, + bool init) +{ + struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK); + struct xfs_mount *mp = ip->i_mount; + struct xfs_btree_block *broot; + + ifp->if_format = XFS_DINODE_FMT_META_BTREE; + ASSERT(ifp->if_broot_bytes == 0); + ASSERT(ifp->if_bytes == 0); + + /* Initialize the empty incore btree root. */ + broot = xfs_broot_realloc(ifp, xfs_rtrmap_broot_space_calc(mp, 0, 0)); + if (broot) + xfs_btree_init_block(mp, broot, &xfs_rtrmapbt_ops, 0, 0, + ip->i_ino); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE | XFS_ILOG_DBROOT); + + return 0; +} + +/* + * Initialize an rmap for a realtime superblock using the potentially updated + * rt geometry in the provided @mp. + */ +int +xfs_rtrmapbt_init_rtsb( + struct xfs_mount *mp, + struct xfs_rtgroup *rtg, + struct xfs_trans *tp) +{ + struct xfs_rmap_irec rmap = { + .rm_blockcount = mp->m_sb.sb_rextsize, + .rm_owner = XFS_RMAP_OWN_FS, + }; + struct xfs_btree_cur *cur; + int error; + + ASSERT(xfs_has_rtsb(mp)); + ASSERT(rtg_rgno(rtg) == 0); + + cur = xfs_rtrmapbt_init_cursor(tp, rtg); + error = xfs_rmap_map_raw(cur, &rmap); + xfs_btree_del_cursor(cur, error); + return error; +} diff --git a/fs/xfs/libxfs/xfs_rtrmap_btree.h b/fs/xfs/libxfs/xfs_rtrmap_btree.h new file mode 100644 index 000000000000..9d0915089891 --- /dev/null +++ b/fs/xfs/libxfs/xfs_rtrmap_btree.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2018-2024 Oracle. All Rights Reserved. + * Author: Darrick J. Wong <djwong@kernel.org> + */ +#ifndef __XFS_RTRMAP_BTREE_H__ +#define __XFS_RTRMAP_BTREE_H__ + +struct xfs_buf; +struct xfs_btree_cur; +struct xfs_mount; +struct xbtree_ifakeroot; +struct xfs_rtgroup; +struct xfbtree; + +/* rmaps only exist on crc enabled filesystems */ +#define XFS_RTRMAP_BLOCK_LEN XFS_BTREE_LBLOCK_CRC_LEN + +struct xfs_btree_cur *xfs_rtrmapbt_init_cursor(struct xfs_trans *tp, + struct xfs_rtgroup *rtg); +struct xfs_btree_cur *xfs_rtrmapbt_stage_cursor(struct xfs_mount *mp, + struct xfs_rtgroup *rtg, struct xfs_inode *ip, + struct xbtree_ifakeroot *ifake); +void xfs_rtrmapbt_commit_staged_btree(struct xfs_btree_cur *cur, + struct xfs_trans *tp); +unsigned int xfs_rtrmapbt_maxrecs(struct xfs_mount *mp, unsigned int blocklen, + bool leaf); +void xfs_rtrmapbt_compute_maxlevels(struct xfs_mount *mp); +unsigned int xfs_rtrmapbt_droot_maxrecs(unsigned int blocklen, bool leaf); + +/* + * Addresses of records, keys, and pointers within an incore rtrmapbt block. + * + * (note that some of these may appear unused, but they are used in userspace) + */ +static inline struct xfs_rmap_rec * +xfs_rtrmap_rec_addr( + struct xfs_btree_block *block, + unsigned int index) +{ + return (struct xfs_rmap_rec *) + ((char *)block + XFS_RTRMAP_BLOCK_LEN + + (index - 1) * sizeof(struct xfs_rmap_rec)); +} + +static inline struct xfs_rmap_key * +xfs_rtrmap_key_addr( + struct xfs_btree_block *block, + unsigned int index) +{ + return (struct xfs_rmap_key *) + ((char *)block + XFS_RTRMAP_BLOCK_LEN + + (index - 1) * 2 * sizeof(struct xfs_rmap_key)); +} + +static inline struct xfs_rmap_key * +xfs_rtrmap_high_key_addr( + struct xfs_btree_block *block, + unsigned int index) +{ + return (struct xfs_rmap_key *) + ((char *)block + XFS_RTRMAP_BLOCK_LEN + + sizeof(struct xfs_rmap_key) + + (index - 1) * 2 * sizeof(struct xfs_rmap_key)); +} + +static inline xfs_rtrmap_ptr_t * +xfs_rtrmap_ptr_addr( + struct xfs_btree_block *block, + unsigned int index, + unsigned int maxrecs) +{ + return (xfs_rtrmap_ptr_t *) + ((char *)block + XFS_RTRMAP_BLOCK_LEN + + maxrecs * 2 * sizeof(struct xfs_rmap_key) + + (index - 1) * sizeof(xfs_rtrmap_ptr_t)); +} + +unsigned int xfs_rtrmapbt_maxlevels_ondisk(void); + +int __init xfs_rtrmapbt_init_cur_cache(void); +void xfs_rtrmapbt_destroy_cur_cache(void); + +xfs_filblks_t xfs_rtrmapbt_calc_reserves(struct xfs_mount *mp); + +/* Addresses of key, pointers, and records within an ondisk rtrmapbt block. */ + +static inline struct xfs_rmap_rec * +xfs_rtrmap_droot_rec_addr( + struct xfs_rtrmap_root *block, + unsigned int index) +{ + return (struct xfs_rmap_rec *) + ((char *)(block + 1) + + (index - 1) * sizeof(struct xfs_rmap_rec)); +} + +static inline struct xfs_rmap_key * +xfs_rtrmap_droot_key_addr( + struct xfs_rtrmap_root *block, + unsigned int index) +{ + return (struct xfs_rmap_key *) + ((char *)(block + 1) + + (index - 1) * 2 * sizeof(struct xfs_rmap_key)); +} + +static inline xfs_rtrmap_ptr_t * +xfs_rtrmap_droot_ptr_addr( + struct xfs_rtrmap_root *block, + unsigned int index, + unsigned int maxrecs) +{ + return (xfs_rtrmap_ptr_t *) + ((char *)(block + 1) + + maxrecs * 2 * sizeof(struct xfs_rmap_key) + + (index - 1) * sizeof(xfs_rtrmap_ptr_t)); +} + +/* + * Address of pointers within the incore btree root. + * + * These are to be used when we know the size of the block and + * we don't have a cursor. + */ +static inline xfs_rtrmap_ptr_t * +xfs_rtrmap_broot_ptr_addr( + struct xfs_mount *mp, + struct xfs_btree_block *bb, + unsigned int index, + unsigned int block_size) +{ + return xfs_rtrmap_ptr_addr(bb, index, + xfs_rtrmapbt_maxrecs(mp, block_size, false)); +} + +/* + * Compute the space required for the incore btree root containing the given + * number of records. + */ +static inline size_t +xfs_rtrmap_broot_space_calc( + struct xfs_mount *mp, + unsigned int level, + unsigned int nrecs) +{ + size_t sz = XFS_RTRMAP_BLOCK_LEN; + + if (level > 0) + return sz + nrecs * (2 * sizeof(struct xfs_rmap_key) + + sizeof(xfs_rtrmap_ptr_t)); + return sz + nrecs * sizeof(struct xfs_rmap_rec); +} + +/* + * Compute the space required for the incore btree root given the ondisk + * btree root block. + */ +static inline size_t +xfs_rtrmap_broot_space(struct xfs_mount *mp, struct xfs_rtrmap_root *bb) +{ + return xfs_rtrmap_broot_space_calc(mp, be16_to_cpu(bb->bb_level), + be16_to_cpu(bb->bb_numrecs)); +} + +/* Compute the space required for the ondisk root block. */ +static inline size_t +xfs_rtrmap_droot_space_calc( + unsigned int level, + unsigned int nrecs) +{ + size_t sz = sizeof(struct xfs_rtrmap_root); + + if (level > 0) + return sz + nrecs * (2 * sizeof(struct xfs_rmap_key) + + sizeof(xfs_rtrmap_ptr_t)); + return sz + nrecs * sizeof(struct xfs_rmap_rec); +} + +/* + * Compute the space required for the ondisk root block given an incore root + * block. + */ +static inline size_t +xfs_rtrmap_droot_space(struct xfs_btree_block *bb) +{ + return xfs_rtrmap_droot_space_calc(be16_to_cpu(bb->bb_level), + be16_to_cpu(bb->bb_numrecs)); +} + +int xfs_iformat_rtrmap(struct xfs_inode *ip, struct xfs_dinode *dip); +void xfs_rtrmapbt_to_disk(struct xfs_mount *mp, struct xfs_btree_block *rblock, + unsigned int rblocklen, struct xfs_rtrmap_root *dblock, + unsigned int dblocklen); +void xfs_iflush_rtrmap(struct xfs_inode *ip, struct xfs_dinode *dip); + +int xfs_rtrmapbt_create(struct xfs_rtgroup *rtg, struct xfs_inode *ip, + struct xfs_trans *tp, bool init); +int xfs_rtrmapbt_init_rtsb(struct xfs_mount *mp, struct xfs_rtgroup *rtg, + struct xfs_trans *tp); + +unsigned long long xfs_rtrmapbt_calc_size(struct xfs_mount *mp, + unsigned long long len); + +struct xfs_btree_cur *xfs_rtrmapbt_mem_cursor(struct xfs_rtgroup *rtg, + struct xfs_trans *tp, struct xfbtree *xfbtree); +int xfs_rtrmapbt_mem_init(struct xfs_mount *mp, struct xfbtree *xfbtree, + struct xfs_buftarg *btp, xfs_rgnumber_t rgno); + +#endif /* __XFS_RTRMAP_BTREE_H__ */ diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c index 3b5623611eba..3dc5f5dba162 100644 --- a/fs/xfs/libxfs/xfs_sb.c +++ b/fs/xfs/libxfs/xfs_sb.c @@ -28,6 +28,8 @@ #include "xfs_rtbitmap.h" #include "xfs_exchrange.h" #include "xfs_rtgroup.h" +#include "xfs_rtrmap_btree.h" +#include "xfs_rtrefcount_btree.h" /* * Physical superblock buffer manipulations. Shared with libxfs in userspace. @@ -1215,11 +1217,23 @@ xfs_sb_mount_common( mp->m_rmap_mnr[0] = mp->m_rmap_mxr[0] / 2; mp->m_rmap_mnr[1] = mp->m_rmap_mxr[1] / 2; + mp->m_rtrmap_mxr[0] = xfs_rtrmapbt_maxrecs(mp, sbp->sb_blocksize, true); + mp->m_rtrmap_mxr[1] = xfs_rtrmapbt_maxrecs(mp, sbp->sb_blocksize, false); + mp->m_rtrmap_mnr[0] = mp->m_rtrmap_mxr[0] / 2; + mp->m_rtrmap_mnr[1] = mp->m_rtrmap_mxr[1] / 2; + mp->m_refc_mxr[0] = xfs_refcountbt_maxrecs(mp, sbp->sb_blocksize, true); mp->m_refc_mxr[1] = xfs_refcountbt_maxrecs(mp, sbp->sb_blocksize, false); mp->m_refc_mnr[0] = mp->m_refc_mxr[0] / 2; mp->m_refc_mnr[1] = mp->m_refc_mxr[1] / 2; + mp->m_rtrefc_mxr[0] = xfs_rtrefcountbt_maxrecs(mp, sbp->sb_blocksize, + true); + mp->m_rtrefc_mxr[1] = xfs_rtrefcountbt_maxrecs(mp, sbp->sb_blocksize, + false); + mp->m_rtrefc_mnr[0] = mp->m_rtrefc_mxr[0] / 2; + mp->m_rtrefc_mnr[1] = mp->m_rtrefc_mxr[1] / 2; + mp->m_bsize = XFS_FSB_TO_BB(mp, 1); mp->m_alloc_set_aside = xfs_alloc_set_aside(mp); mp->m_ag_max_usable = xfs_alloc_ag_max_usable(mp); diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h index e7efdb9ceaf3..b1e0d9bc1f7d 100644 --- a/fs/xfs/libxfs/xfs_shared.h +++ b/fs/xfs/libxfs/xfs_shared.h @@ -42,6 +42,8 @@ extern const struct xfs_buf_ops xfs_rtbitmap_buf_ops; extern const struct xfs_buf_ops xfs_rtsummary_buf_ops; extern const struct xfs_buf_ops xfs_rtbuf_ops; extern const struct xfs_buf_ops xfs_rtsb_buf_ops; +extern const struct xfs_buf_ops xfs_rtrefcountbt_buf_ops; +extern const struct xfs_buf_ops xfs_rtrmapbt_buf_ops; extern const struct xfs_buf_ops xfs_sb_buf_ops; extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops; extern const struct xfs_buf_ops xfs_symlink_buf_ops; @@ -55,6 +57,9 @@ extern const struct xfs_btree_ops xfs_bmbt_ops; extern const struct xfs_btree_ops xfs_refcountbt_ops; extern const struct xfs_btree_ops xfs_rmapbt_ops; extern const struct xfs_btree_ops xfs_rmapbt_mem_ops; +extern const struct xfs_btree_ops xfs_rtrmapbt_ops; +extern const struct xfs_btree_ops xfs_rtrmapbt_mem_ops; +extern const struct xfs_btree_ops xfs_rtrefcountbt_ops; static inline bool xfs_btree_is_bno(const struct xfs_btree_ops *ops) { @@ -96,10 +101,26 @@ static inline bool xfs_btree_is_mem_rmap(const struct xfs_btree_ops *ops) { return ops == &xfs_rmapbt_mem_ops; } + +static inline bool xfs_btree_is_mem_rtrmap(const struct xfs_btree_ops *ops) +{ + return ops == &xfs_rtrmapbt_mem_ops; +} #else # define xfs_btree_is_mem_rmap(...) (false) +# define xfs_btree_is_mem_rtrmap(...) (false) #endif +static inline bool xfs_btree_is_rtrmap(const struct xfs_btree_ops *ops) +{ + return ops == &xfs_rtrmapbt_ops; +} + +static inline bool xfs_btree_is_rtrefcount(const struct xfs_btree_ops *ops) +{ + return ops == &xfs_rtrefcountbt_ops; +} + /* log size calculation functions */ int xfs_log_calc_unit_res(struct xfs_mount *mp, int unit_bytes); int xfs_log_calc_minimum_size(struct xfs_mount *); diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c index bab402340b5d..13d00c7166e1 100644 --- a/fs/xfs/libxfs/xfs_trans_resv.c +++ b/fs/xfs/libxfs/xfs_trans_resv.c @@ -92,6 +92,14 @@ xfs_refcountbt_block_count( return num_ops * (2 * mp->m_refc_maxlevels - 1); } +static unsigned int +xfs_rtrefcountbt_block_count( + struct xfs_mount *mp, + unsigned int num_ops) +{ + return num_ops * (2 * mp->m_rtrefc_maxlevels - 1); +} + /* * Logging inodes is really tricksy. They are logged in memory format, * which means that what we write into the log doesn't directly translate into @@ -213,7 +221,9 @@ xfs_calc_inode_chunk_res( * Per-extent log reservation for the btree changes involved in freeing or * allocating a realtime extent. We have to be able to log as many rtbitmap * blocks as needed to mark inuse XFS_BMBT_MAX_EXTLEN blocks' worth of realtime - * extents, as well as the realtime summary block. + * extents, as well as the realtime summary block (t1). Realtime rmap btree + * operations happen in a second transaction, so factor in a couple of rtrmapbt + * splits (t2). */ static unsigned int xfs_rtalloc_block_count( @@ -222,10 +232,16 @@ xfs_rtalloc_block_count( { unsigned int rtbmp_blocks; xfs_rtxlen_t rtxlen; + unsigned int t1, t2 = 0; rtxlen = xfs_extlen_to_rtxlen(mp, XFS_MAX_BMBT_EXTLEN); rtbmp_blocks = xfs_rtbitmap_blockcount_len(mp, rtxlen); - return (rtbmp_blocks + 1) * num_ops; + t1 = (rtbmp_blocks + 1) * num_ops; + + if (xfs_has_rmapbt(mp)) + t2 = num_ops * (2 * mp->m_rtrmap_maxlevels - 1); + + return max(t1, t2); } /* @@ -251,10 +267,13 @@ xfs_rtalloc_block_count( * Compute the log reservation required to handle the refcount update * transaction. Refcount updates are always done via deferred log items. * - * This is calculated as: + * This is calculated as the max of: * Data device refcount updates (t1): * the agfs of the ags containing the blocks: nr_ops * sector size * the refcount btrees: nr_ops * 1 trees * (2 * max depth - 1) * block size + * Realtime refcount updates (t2); + * the rt refcount inode + * the rtrefcount btrees: nr_ops * 1 trees * (2 * max depth - 1) * block size */ static unsigned int xfs_calc_refcountbt_reservation( @@ -262,12 +281,20 @@ xfs_calc_refcountbt_reservation( unsigned int nr_ops) { unsigned int blksz = XFS_FSB_TO_B(mp, 1); + unsigned int t1, t2 = 0; if (!xfs_has_reflink(mp)) return 0; - return xfs_calc_buf_res(nr_ops, mp->m_sb.sb_sectsize) + - xfs_calc_buf_res(xfs_refcountbt_block_count(mp, nr_ops), blksz); + t1 = xfs_calc_buf_res(nr_ops, mp->m_sb.sb_sectsize) + + xfs_calc_buf_res(xfs_refcountbt_block_count(mp, nr_ops), blksz); + + if (xfs_has_realtime(mp)) + t2 = xfs_calc_inode_res(mp, 1) + + xfs_calc_buf_res(xfs_rtrefcountbt_block_count(mp, nr_ops), + blksz); + + return max(t1, t2); } /* diff --git a/fs/xfs/libxfs/xfs_trans_space.h b/fs/xfs/libxfs/xfs_trans_space.h index 1155ff2d37e2..d89b570aafcc 100644 --- a/fs/xfs/libxfs/xfs_trans_space.h +++ b/fs/xfs/libxfs/xfs_trans_space.h @@ -14,6 +14,19 @@ #define XFS_MAX_CONTIG_BMAPS_PER_BLOCK(mp) \ (((mp)->m_bmap_dmxr[0]) - ((mp)->m_bmap_dmnr[0])) +/* Worst case number of realtime rmaps that can be held in a block. */ +#define XFS_MAX_CONTIG_RTRMAPS_PER_BLOCK(mp) \ + (((mp)->m_rtrmap_mxr[0]) - ((mp)->m_rtrmap_mnr[0])) + +/* Adding one realtime rmap could split every level to the top of the tree. */ +#define XFS_RTRMAPADD_SPACE_RES(mp) ((mp)->m_rtrmap_maxlevels) + +/* Blocks we might need to add "b" realtime rmaps to a tree. */ +#define XFS_NRTRMAPADD_SPACE_RES(mp, b) \ + ((((b) + XFS_MAX_CONTIG_RTRMAPS_PER_BLOCK(mp) - 1) / \ + XFS_MAX_CONTIG_RTRMAPS_PER_BLOCK(mp)) * \ + XFS_RTRMAPADD_SPACE_RES(mp)) + /* Worst case number of rmaps that can be held in a block. */ #define XFS_MAX_CONTIG_RMAPS_PER_BLOCK(mp) \ (((mp)->m_rmap_mxr[0]) - ((mp)->m_rmap_mnr[0])) diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h index bf33c2b1e43e..ca2401c1facd 100644 --- a/fs/xfs/libxfs/xfs_types.h +++ b/fs/xfs/libxfs/xfs_types.h @@ -202,6 +202,13 @@ enum xfs_ag_resv_type { * altering fdblocks. If you think you need this you're wrong. */ XFS_AG_RESV_IGNORE, + + /* + * This allocation activity is being done on behalf of a metadata file. + * These files maintain their own permanent space reservations and are + * required to adjust fdblocks using the xfs_metafile_resv_* helpers. + */ + XFS_AG_RESV_METAFILE, }; /* Results of scanning a btree keyspace to check occupancy. */ diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c index b45d2b32051a..cd6f0223879f 100644 --- a/fs/xfs/scrub/agheader_repair.c +++ b/fs/xfs/scrub/agheader_repair.c @@ -647,7 +647,7 @@ xrep_agfl_fill( xfs_agblock_t agbno = start; int error; - trace_xrep_agfl_insert(sc->sa.pag, agbno, len); + trace_xrep_agfl_insert(pag_group(sc->sa.pag), agbno, len); while (agbno < start + len && af->fl_off < af->flcount) af->agfl_bno[af->fl_off++] = cpu_to_be32(agbno++); diff --git a/fs/xfs/scrub/alloc_repair.c b/fs/xfs/scrub/alloc_repair.c index 0433363a90b6..bed6a09aa791 100644 --- a/fs/xfs/scrub/alloc_repair.c +++ b/fs/xfs/scrub/alloc_repair.c @@ -542,8 +542,9 @@ xrep_abt_dispose_one( /* Add a deferred rmap for each extent we used. */ if (resv->used > 0) - xfs_rmap_alloc_extent(sc->tp, pag_agno(pag), resv->agbno, - resv->used, XFS_RMAP_OWN_AG); + xfs_rmap_alloc_extent(sc->tp, false, + xfs_agbno_to_fsb(pag, resv->agbno), resv->used, + XFS_RMAP_OWN_AG); /* * For each reserved btree block we didn't use, add it to the free diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c index 7e00312225ed..66da7d4d56ba 100644 --- a/fs/xfs/scrub/bmap.c +++ b/fs/xfs/scrub/bmap.c @@ -21,6 +21,8 @@ #include "xfs_rmap_btree.h" #include "xfs_rtgroup.h" #include "xfs_health.h" +#include "xfs_rtalloc.h" +#include "xfs_rtrmap_btree.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/btree.h" @@ -143,15 +145,22 @@ static inline bool xchk_bmap_get_rmap( struct xchk_bmap_info *info, struct xfs_bmbt_irec *irec, - xfs_agblock_t agbno, + xfs_agblock_t bno, uint64_t owner, struct xfs_rmap_irec *rmap) { + struct xfs_btree_cur **curp = &info->sc->sa.rmap_cur; xfs_fileoff_t offset; unsigned int rflags = 0; int has_rmap; int error; + if (xfs_ifork_is_realtime(info->sc->ip, info->whichfork)) + curp = &info->sc->sr.rmap_cur; + + if (*curp == NULL) + return false; + if (info->whichfork == XFS_ATTR_FORK) rflags |= XFS_RMAP_ATTR_FORK; if (irec->br_state == XFS_EXT_UNWRITTEN) @@ -172,13 +181,13 @@ xchk_bmap_get_rmap( * range rmap lookup to make sure we get the correct owner/offset. */ if (info->is_shared) { - error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno, - owner, offset, rflags, rmap, &has_rmap); + error = xfs_rmap_lookup_le_range(*curp, bno, owner, offset, + rflags, rmap, &has_rmap); } else { - error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, - owner, offset, rflags, rmap, &has_rmap); + error = xfs_rmap_lookup_le(*curp, bno, owner, offset, + rflags, rmap, &has_rmap); } - if (!xchk_should_check_xref(info->sc, &error, &info->sc->sa.rmap_cur)) + if (!xchk_should_check_xref(info->sc, &error, curp)) return false; if (!has_rmap) @@ -192,29 +201,29 @@ STATIC void xchk_bmap_xref_rmap( struct xchk_bmap_info *info, struct xfs_bmbt_irec *irec, - xfs_agblock_t agbno) + xfs_agblock_t bno) { struct xfs_rmap_irec rmap; unsigned long long rmap_end; uint64_t owner = info->sc->ip->i_ino; - if (!info->sc->sa.rmap_cur || xchk_skip_xref(info->sc->sm)) + if (xchk_skip_xref(info->sc->sm)) return; /* Find the rmap record for this irec. */ - if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap)) + if (!xchk_bmap_get_rmap(info, irec, bno, owner, &rmap)) return; /* * The rmap must be an exact match for this incore file mapping record, * which may have arisen from multiple ondisk records. */ - if (rmap.rm_startblock != agbno) + if (rmap.rm_startblock != bno) xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, irec->br_startoff); rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount; - if (rmap_end != agbno + irec->br_blockcount) + if (rmap_end != bno + irec->br_blockcount) xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, irec->br_startoff); @@ -259,7 +268,7 @@ STATIC void xchk_bmap_xref_rmap_cow( struct xchk_bmap_info *info, struct xfs_bmbt_irec *irec, - xfs_agblock_t agbno) + xfs_agblock_t bno) { struct xfs_rmap_irec rmap; unsigned long long rmap_end; @@ -269,7 +278,7 @@ xchk_bmap_xref_rmap_cow( return; /* Find the rmap record for this irec. */ - if (!xchk_bmap_get_rmap(info, irec, agbno, owner, &rmap)) + if (!xchk_bmap_get_rmap(info, irec, bno, owner, &rmap)) return; /* @@ -277,12 +286,12 @@ xchk_bmap_xref_rmap_cow( * can start before and end after the physical space allocated to this * mapping. There are no offsets to check. */ - if (rmap.rm_startblock > agbno) + if (rmap.rm_startblock > bno) xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, irec->br_startoff); rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount; - if (rmap_end < agbno + irec->br_blockcount) + if (rmap_end < bno + irec->br_blockcount) xchk_fblock_xref_set_corrupt(info->sc, info->whichfork, irec->br_startoff); @@ -315,6 +324,8 @@ xchk_bmap_rt_iextent_xref( struct xchk_bmap_info *info, struct xfs_bmbt_irec *irec) { + struct xfs_owner_info oinfo; + xfs_rgblock_t rgbno; int error; error = xchk_rtgroup_init_existing(info->sc, @@ -324,10 +335,46 @@ xchk_bmap_rt_iextent_xref( irec->br_startoff, &error)) return; - xchk_rtgroup_lock(&info->sc->sr, XCHK_RTGLOCK_ALL); + error = xchk_rtgroup_lock(info->sc, &info->sc->sr, XCHK_RTGLOCK_ALL); + if (!xchk_fblock_process_error(info->sc, info->whichfork, + irec->br_startoff, &error)) + goto out_free; + xchk_xref_is_used_rt_space(info->sc, irec->br_startblock, irec->br_blockcount); + if (!xfs_has_rtrmapbt(info->sc->mp)) + goto out_cur; + + rgbno = xfs_rtb_to_rgbno(info->sc->mp, irec->br_startblock); + + switch (info->whichfork) { + case XFS_DATA_FORK: + xchk_bmap_xref_rmap(info, irec, rgbno); + if (!xfs_is_reflink_inode(info->sc->ip)) { + xfs_rmap_ino_owner(&oinfo, info->sc->ip->i_ino, + info->whichfork, irec->br_startoff); + xchk_xref_is_only_rt_owned_by(info->sc, rgbno, + irec->br_blockcount, &oinfo); + xchk_xref_is_not_rt_shared(info->sc, rgbno, + irec->br_blockcount); + } + xchk_xref_is_not_rt_cow_staging(info->sc, rgbno, + irec->br_blockcount); + break; + case XFS_COW_FORK: + xchk_bmap_xref_rmap_cow(info, irec, rgbno); + xchk_xref_is_only_rt_owned_by(info->sc, rgbno, + irec->br_blockcount, &XFS_RMAP_OINFO_COW); + xchk_xref_is_rt_cow_staging(info->sc, rgbno, + irec->br_blockcount); + xchk_xref_is_not_rt_shared(info->sc, rgbno, + irec->br_blockcount); + break; + } +out_cur: + xchk_rtgroup_btcur_free(&info->sc->sr); +out_free: xchk_rtgroup_free(info->sc, &info->sc->sr); } @@ -614,8 +661,7 @@ xchk_bmap_check_rmap( xchk_fblock_set_corrupt(sc, sbcri->whichfork, check_rec.rm_offset); if (irec.br_startblock != - xfs_agbno_to_fsb(to_perag(cur->bc_group), - check_rec.rm_startblock)) + xfs_gbno_to_fsb(cur->bc_group, check_rec.rm_startblock)) xchk_fblock_set_corrupt(sc, sbcri->whichfork, check_rec.rm_offset); if (irec.br_blockcount > check_rec.rm_blockcount) @@ -669,6 +715,30 @@ xchk_bmap_check_ag_rmaps( return error; } +/* Make sure each rt rmap has a corresponding bmbt entry. */ +STATIC int +xchk_bmap_check_rt_rmaps( + struct xfs_scrub *sc, + struct xfs_rtgroup *rtg) +{ + struct xchk_bmap_check_rmap_info sbcri; + struct xfs_btree_cur *cur; + int error; + + xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP); + cur = xfs_rtrmapbt_init_cursor(sc->tp, rtg); + + sbcri.sc = sc; + sbcri.whichfork = XFS_DATA_FORK; + error = xfs_rmap_query_all(cur, xchk_bmap_check_rmap, &sbcri); + if (error == -ECANCELED) + error = 0; + + xfs_btree_del_cursor(cur, error); + xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP); + return error; +} + /* * Decide if we want to scan the reverse mappings to determine if the attr * fork /really/ has zero space mappings. @@ -723,10 +793,6 @@ xchk_bmap_check_empty_datafork( { struct xfs_ifork *ifp = &ip->i_df; - /* Don't support realtime rmap checks yet. */ - if (XFS_IS_REALTIME_INODE(ip)) - return false; - /* * If the dinode repair found a bad data fork, it will reset the fork * to extents format with zero records and wait for the this scrubber @@ -777,6 +843,21 @@ xchk_bmap_check_rmaps( struct xfs_perag *pag = NULL; int error; + if (xfs_ifork_is_realtime(sc->ip, whichfork)) { + struct xfs_rtgroup *rtg = NULL; + + while ((rtg = xfs_rtgroup_next(sc->mp, rtg))) { + error = xchk_bmap_check_rt_rmaps(sc, rtg); + if (error || + (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) { + xfs_rtgroup_rele(rtg); + return error; + } + } + + return 0; + } + while ((pag = xfs_perag_next(sc->mp, pag))) { error = xchk_bmap_check_ag_rmaps(sc, whichfork, pag); if (error || @@ -983,6 +1064,7 @@ xchk_bmap( case XFS_DINODE_FMT_UUID: case XFS_DINODE_FMT_DEV: case XFS_DINODE_FMT_LOCAL: + case XFS_DINODE_FMT_META_BTREE: /* No mappings to check. */ if (whichfork == XFS_COW_FORK) xchk_fblock_set_corrupt(sc, whichfork, 0); diff --git a/fs/xfs/scrub/bmap_repair.c b/fs/xfs/scrub/bmap_repair.c index 7c4955482641..1084213b8e9b 100644 --- a/fs/xfs/scrub/bmap_repair.c +++ b/fs/xfs/scrub/bmap_repair.c @@ -25,11 +25,13 @@ #include "xfs_bmap_btree.h" #include "xfs_rmap.h" #include "xfs_rmap_btree.h" +#include "xfs_rtrmap_btree.h" #include "xfs_refcount.h" #include "xfs_quota.h" #include "xfs_ialloc.h" #include "xfs_ag.h" #include "xfs_reflink.h" +#include "xfs_rtgroup.h" #include "scrub/xfs_scrub.h" #include "scrub/scrub.h" #include "scrub/common.h" @@ -99,14 +101,21 @@ xrep_bmap_discover_shared( xfs_filblks_t blockcount) { struct xfs_scrub *sc = rb->sc; + struct xfs_btree_cur *cur; xfs_agblock_t agbno; xfs_agblock_t fbno; xfs_extlen_t flen; int error; - agbno = XFS_FSB_TO_AGBNO(sc->mp, startblock); - error = xfs_refcount_find_shared(sc->sa.refc_cur, agbno, blockcount, - &fbno, &flen, false); + if (XFS_IS_REALTIME_INODE(sc->ip)) { + agbno = xfs_rtb_to_rgbno(sc->mp, startblock); + cur = sc->sr.refc_cur; + } else { + agbno = XFS_FSB_TO_AGBNO(sc->mp, startblock); + cur = sc->sa.refc_cur; + } + error = xfs_refcount_find_shared(cur, agbno, blockcount, &fbno, &flen, + false); if (error) return error; @@ -359,6 +368,114 @@ xrep_bmap_scan_ag( return error; } +#ifdef CONFIG_XFS_RT +/* Check for any obvious errors or conflicts in the file mapping. */ +STATIC int +xrep_bmap_check_rtfork_rmap( + struct xfs_scrub *sc, + struct xfs_btree_cur *cur, + const struct xfs_rmap_irec *rec) +{ + /* xattr extents are never stored on realtime devices */ + if (rec->rm_flags & XFS_RMAP_ATTR_FORK) + return -EFSCORRUPTED; + + /* bmbt blocks are never stored on realtime devices */ + if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK) + return -EFSCORRUPTED; + + /* Data extents for non-rt files are never stored on the rt device. */ + if (!XFS_IS_REALTIME_INODE(sc->ip)) + return -EFSCORRUPTED; + + /* Check the file offsets and physical extents. */ + if (!xfs_verify_fileext(sc->mp, rec->rm_offset, rec->rm_blockcount)) + return -EFSCORRUPTED; + + /* Check that this is within the rtgroup. */ + if (!xfs_verify_rgbext(to_rtg(cur->bc_group), rec->rm_startblock, + rec->rm_blockcount)) + return -EFSCORRUPTED; + + /* Make sure this isn't free space. */ + return xrep_require_rtext_inuse(sc, rec->rm_startblock, + rec->rm_blockcount); +} + +/* Record realtime extents that belong to this inode's fork. */ +STATIC int +xrep_bmap_walk_rtrmap( + struct xfs_btree_cur *cur, + const struct xfs_rmap_irec *rec, + void *priv) +{ + struct xrep_bmap *rb = priv; + int error = 0; + + if (xchk_should_terminate(rb->sc, &error)) + return error; + + /* Skip extents which are not owned by this inode and fork. */ + if (rec->rm_owner != rb->sc->ip->i_ino) + return 0; + + error = xrep_bmap_check_rtfork_rmap(rb->sc, cur, rec); + if (error) + return error; + + /* + * Record all blocks allocated to this file even if the extent isn't + * for the fork we're rebuilding so that we can reset di_nblocks later. + */ + rb->nblocks += rec->rm_blockcount; + + /* If this rmap isn't for the fork we want, we're done. */ + if (rb->whichfork == XFS_DATA_FORK && + (rec->rm_flags & XFS_RMAP_ATTR_FORK)) + return 0; + if (rb->whichfork == XFS_ATTR_FORK && + !(rec->rm_flags & XFS_RMAP_ATTR_FORK)) + return 0; + + return xrep_bmap_from_rmap(rb, rec->rm_offset, + xfs_rgbno_to_rtb(to_rtg(cur->bc_group), + rec->rm_startblock), + rec->rm_blockcount, + rec->rm_flags & XFS_RMAP_UNWRITTEN); +} + +/* Scan the realtime reverse mappings to build the new extent map. */ +STATIC int +xrep_bmap_scan_rtgroup( + struct xrep_bmap *rb, + struct xfs_rtgroup *rtg) +{ + struct xfs_scrub *sc = rb->sc; + int error; + + if (!xfs_has_rtrmapbt(sc->mp)) + return 0; + + error = xrep_rtgroup_init(sc, rtg, &sc->sr, + XFS_RTGLOCK_RMAP | + XFS_RTGLOCK_REFCOUNT | + XFS_RTGLOCK_BITMAP_SHARED); + if (error) + return error; + + error = xfs_rmap_query_all(sc->sr.rmap_cur, xrep_bmap_walk_rtrmap, rb); + xchk_rtgroup_btcur_free(&sc->sr); + xchk_rtgroup_free(sc, &sc->sr); + return error; +} +#else +static inline int +xrep_bmap_scan_rtgroup(struct xrep_bmap *rb, struct xfs_rtgroup *rtg) +{ + return -EFSCORRUPTED; +} +#endif + /* Find the delalloc extents from the old incore extent tree. */ STATIC int xrep_bmap_find_delalloc( @@ -410,6 +527,22 @@ xrep_bmap_find_mappings( struct xfs_perag *pag = NULL; int error = 0; + /* + * Iterate the rtrmaps for extents. Metadata files never have content + * on the realtime device, so there's no need to scan them. + */ + if (!xfs_is_metadir_inode(sc->ip)) { + struct xfs_rtgroup *rtg = NULL; + + while ((rtg = xfs_rtgroup_next(sc->mp, rtg))) { + error = xrep_bmap_scan_rtgroup(rb, rtg); + if (error) { + xfs_rtgroup_rele(rtg); + return error; + } + } + } + /* Iterate the rmaps for extents. */ while ((pag = xfs_perag_next(sc->mp, pag))) { error = xrep_bmap_scan_ag(rb, pag); @@ -731,6 +864,7 @@ xrep_bmap_check_inputs( case XFS_DINODE_FMT_DEV: case XFS_DINODE_FMT_LOCAL: case XFS_DINODE_FMT_UUID: + case XFS_DINODE_FMT_META_BTREE: return -ECANCELED; case XFS_DINODE_FMT_EXTENTS: case XFS_DINODE_FMT_BTREE: @@ -753,10 +887,6 @@ xrep_bmap_check_inputs( return -EINVAL; } - /* Don't know how to rebuild realtime data forks. */ - if (XFS_IS_REALTIME_INODE(sc->ip)) - return -EOPNOTSUPP; - return 0; } @@ -782,10 +912,6 @@ xrep_bmap_init_reflink_scan( if (whichfork != XFS_DATA_FORK) return RLS_IRRELEVANT; - /* cannot share realtime extents */ - if (XFS_IS_REALTIME_INODE(sc->ip)) - return RLS_IRRELEVANT; - return RLS_UNKNOWN; } diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c index 5cbd94b56582..28ad341df8ee 100644 --- a/fs/xfs/scrub/common.c +++ b/fs/xfs/scrub/common.c @@ -35,6 +35,9 @@ #include "xfs_exchmaps.h" #include "xfs_rtbitmap.h" #include "xfs_rtgroup.h" +#include "xfs_rtrmap_btree.h" +#include "xfs_bmap_util.h" +#include "xfs_rtrefcount_btree.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/trace.h" @@ -719,20 +722,111 @@ xchk_rtgroup_init( return 0; } -void +/* Lock all the rt group metadata inode ILOCKs and wait for intents. */ +int xchk_rtgroup_lock( + struct xfs_scrub *sc, struct xchk_rt *sr, unsigned int rtglock_flags) { - xfs_rtgroup_lock(sr->rtg, rtglock_flags); + int error = 0; + + ASSERT(sr->rtg != NULL); + + /* + * If we're /only/ locking the rtbitmap in shared mode, then we're + * obviously not trying to compare records in two metadata inodes. + * There's no need to drain intents here because the caller (most + * likely the rgsuper scanner) doesn't need that level of consistency. + */ + if (rtglock_flags == XFS_RTGLOCK_BITMAP_SHARED) { + xfs_rtgroup_lock(sr->rtg, rtglock_flags); + sr->rtlock_flags = rtglock_flags; + return 0; + } + + do { + if (xchk_should_terminate(sc, &error)) + return error; + + xfs_rtgroup_lock(sr->rtg, rtglock_flags); + + /* + * If we've grabbed a non-metadata file for scrubbing, we + * assume that holding its ILOCK will suffice to coordinate + * with any rt intent chains involving this inode. + */ + if (sc->ip && !xfs_is_internal_inode(sc->ip)) + break; + + /* + * Decide if the rt group is quiet enough for all metadata to + * be consistent with each other. Regular file IO doesn't get + * to lock all the rt inodes at the same time, which means that + * there could be other threads in the middle of processing a + * chain of deferred ops. + * + * We just locked all the metadata inodes for this rt group; + * now take a look to see if there are any intents in progress. + * If there are, drop the rt group inode locks and wait for the + * intents to drain. Since we hold the rt group inode locks + * for the duration of the scrub, this is the only time we have + * to sample the intents counter; any threads increasing it + * after this point can't possibly be in the middle of a chain + * of rt metadata updates. + * + * Obviously, this should be slanted against scrub and in favor + * of runtime threads. + */ + if (!xfs_group_intent_busy(rtg_group(sr->rtg))) + break; + + xfs_rtgroup_unlock(sr->rtg, rtglock_flags); + + if (!(sc->flags & XCHK_FSGATES_DRAIN)) + return -ECHRNG; + error = xfs_group_intent_drain(rtg_group(sr->rtg)); + if (error) { + if (error == -ERESTARTSYS) + error = -EINTR; + return error; + } + } while (1); + sr->rtlock_flags = rtglock_flags; + + if (xfs_has_rtrmapbt(sc->mp) && (rtglock_flags & XFS_RTGLOCK_RMAP)) + sr->rmap_cur = xfs_rtrmapbt_init_cursor(sc->tp, sr->rtg); + + if (xfs_has_rtreflink(sc->mp) && (rtglock_flags & XFS_RTGLOCK_REFCOUNT)) + sr->refc_cur = xfs_rtrefcountbt_init_cursor(sc->tp, sr->rtg); + + return 0; +} + +/* + * Free all the btree cursors and other incore data relating to the realtime + * group. This has to be done /before/ committing (or cancelling) the scrub + * transaction. + */ +void +xchk_rtgroup_btcur_free( + struct xchk_rt *sr) +{ + if (sr->rmap_cur) + xfs_btree_del_cursor(sr->rmap_cur, XFS_BTREE_ERROR); + if (sr->refc_cur) + xfs_btree_del_cursor(sr->refc_cur, XFS_BTREE_ERROR); + + sr->refc_cur = NULL; + sr->rmap_cur = NULL; } /* * Unlock the realtime group. This must be done /after/ committing (or * cancelling) the scrub transaction. */ -static void +void xchk_rtgroup_unlock( struct xchk_rt *sr) { @@ -812,6 +906,14 @@ xchk_setup_fs( return xchk_trans_alloc(sc, resblks); } +/* Set us up with a transaction and an empty context to repair rt metadata. */ +int +xchk_setup_rt( + struct xfs_scrub *sc) +{ + return xchk_trans_alloc(sc, xrep_calc_rtgroup_resblks(sc)); +} + /* Set us up with AG headers and btree cursors. */ int xchk_setup_ag_btree( @@ -1379,7 +1481,7 @@ xchk_fsgates_enable( trace_xchk_fsgates_enable(sc, scrub_fsgates); if (scrub_fsgates & XCHK_FSGATES_DRAIN) - xfs_drain_wait_enable(); + xfs_defer_drain_wait_enable(); if (scrub_fsgates & XCHK_FSGATES_QUOTA) xfs_dqtrx_hook_enable(); @@ -1573,3 +1675,63 @@ xchk_inode_rootdir_inum(const struct xfs_inode *ip) return mp->m_metadirip->i_ino; return mp->m_rootip->i_ino; } + +static int +xchk_meta_btree_count_blocks( + struct xfs_scrub *sc, + xfs_extnum_t *nextents, + xfs_filblks_t *count) +{ + struct xfs_btree_cur *cur; + int error; + + if (!sc->sr.rtg) { + ASSERT(0); + return -EFSCORRUPTED; + } + + switch (sc->ip->i_metatype) { + case XFS_METAFILE_RTRMAP: + cur = xfs_rtrmapbt_init_cursor(sc->tp, sc->sr.rtg); + break; + case XFS_METAFILE_RTREFCOUNT: + cur = xfs_rtrefcountbt_init_cursor(sc->tp, sc->sr.rtg); + break; + default: + ASSERT(0); + return -EFSCORRUPTED; + } + + error = xfs_btree_count_blocks(cur, count); + xfs_btree_del_cursor(cur, error); + if (!error) { + *nextents = 0; + (*count)--; /* don't count the btree iroot */ + } + return error; +} + +/* Count the blocks used by a file, even if it's a metadata inode. */ +int +xchk_inode_count_blocks( + struct xfs_scrub *sc, + int whichfork, + xfs_extnum_t *nextents, + xfs_filblks_t *count) +{ + struct xfs_ifork *ifp = xfs_ifork_ptr(sc->ip, whichfork); + + if (!ifp) { + *nextents = 0; + *count = 0; + return 0; + } + + if (ifp->if_format == XFS_DINODE_FMT_META_BTREE) { + ASSERT(whichfork == XFS_DATA_FORK); + return xchk_meta_btree_count_blocks(sc, nextents, count); + } + + return xfs_bmap_count_blocks(sc->tp, sc->ip, whichfork, nextents, + count); +} diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h index 9ff3cafd8679..bdcd40f0ec74 100644 --- a/fs/xfs/scrub/common.h +++ b/fs/xfs/scrub/common.h @@ -63,6 +63,7 @@ static inline int xchk_setup_nothing(struct xfs_scrub *sc) /* Setup functions */ int xchk_setup_agheader(struct xfs_scrub *sc); int xchk_setup_fs(struct xfs_scrub *sc); +int xchk_setup_rt(struct xfs_scrub *sc); int xchk_setup_ag_allocbt(struct xfs_scrub *sc); int xchk_setup_ag_iallocbt(struct xfs_scrub *sc); int xchk_setup_ag_rmapbt(struct xfs_scrub *sc); @@ -80,10 +81,14 @@ int xchk_setup_metapath(struct xfs_scrub *sc); int xchk_setup_rtbitmap(struct xfs_scrub *sc); int xchk_setup_rtsummary(struct xfs_scrub *sc); int xchk_setup_rgsuperblock(struct xfs_scrub *sc); +int xchk_setup_rtrmapbt(struct xfs_scrub *sc); +int xchk_setup_rtrefcountbt(struct xfs_scrub *sc); #else # define xchk_setup_rtbitmap xchk_setup_nothing # define xchk_setup_rtsummary xchk_setup_nothing # define xchk_setup_rgsuperblock xchk_setup_nothing +# define xchk_setup_rtrmapbt xchk_setup_nothing +# define xchk_setup_rtrefcountbt xchk_setup_nothing #endif #ifdef CONFIG_XFS_QUOTA int xchk_ino_dqattach(struct xfs_scrub *sc); @@ -125,7 +130,9 @@ xchk_ag_init_existing( #ifdef CONFIG_XFS_RT /* All the locks we need to check an rtgroup. */ -#define XCHK_RTGLOCK_ALL (XFS_RTGLOCK_BITMAP) +#define XCHK_RTGLOCK_ALL (XFS_RTGLOCK_BITMAP | \ + XFS_RTGLOCK_RMAP | \ + XFS_RTGLOCK_REFCOUNT) int xchk_rtgroup_init(struct xfs_scrub *sc, xfs_rgnumber_t rgno, struct xchk_rt *sr); @@ -141,12 +148,17 @@ xchk_rtgroup_init_existing( return error == -ENOENT ? -EFSCORRUPTED : error; } -void xchk_rtgroup_lock(struct xchk_rt *sr, unsigned int rtglock_flags); +int xchk_rtgroup_lock(struct xfs_scrub *sc, struct xchk_rt *sr, + unsigned int rtglock_flags); +void xchk_rtgroup_unlock(struct xchk_rt *sr); +void xchk_rtgroup_btcur_free(struct xchk_rt *sr); void xchk_rtgroup_free(struct xfs_scrub *sc, struct xchk_rt *sr); #else # define xchk_rtgroup_init(sc, rgno, sr) (-EFSCORRUPTED) # define xchk_rtgroup_init_existing(sc, rgno, sr) (-EFSCORRUPTED) -# define xchk_rtgroup_lock(sc, lockflags) do { } while (0) +# define xchk_rtgroup_lock(sc, sr, lockflags) (-EFSCORRUPTED) +# define xchk_rtgroup_unlock(sr) do { } while (0) +# define xchk_rtgroup_btcur_free(sr) do { } while (0) # define xchk_rtgroup_free(sc, sr) do { } while (0) #endif /* CONFIG_XFS_RT */ @@ -257,6 +269,12 @@ int xchk_metadata_inode_forks(struct xfs_scrub *sc); (sc)->mp->m_super->s_id, \ (sc)->ip ? (sc)->ip->i_ino : (sc)->sm->sm_ino, \ ##__VA_ARGS__) +#define xchk_xfile_rtgroup_descr(sc, fmt, ...) \ + kasprintf(XCHK_GFP_FLAGS, "XFS (%s): rtgroup 0x%x " fmt, \ + (sc)->mp->m_super->s_id, \ + (sc)->sa.pag ? \ + rtg_rgno((sc)->sr.rtg) : (sc)->sm->sm_agno, \ + ##__VA_ARGS__) /* * Setting up a hook to wait for intents to drain is costly -- we have to take @@ -274,6 +292,8 @@ void xchk_fsgates_enable(struct xfs_scrub *sc, unsigned int scrub_fshooks); int xchk_inode_is_allocated(struct xfs_scrub *sc, xfs_agino_t agino, bool *inuse); +int xchk_inode_count_blocks(struct xfs_scrub *sc, int whichfork, + xfs_extnum_t *nextents, xfs_filblks_t *count); bool xchk_inode_is_dirtree_root(const struct xfs_inode *ip); bool xchk_inode_is_sb_rooted(const struct xfs_inode *ip); diff --git a/fs/xfs/scrub/cow_repair.c b/fs/xfs/scrub/cow_repair.c index 5b6194cef3e5..38a246b8bf11 100644 --- a/fs/xfs/scrub/cow_repair.c +++ b/fs/xfs/scrub/cow_repair.c @@ -26,6 +26,9 @@ #include "xfs_errortag.h" #include "xfs_icache.h" #include "xfs_refcount_btree.h" +#include "xfs_rtalloc.h" +#include "xfs_rtbitmap.h" +#include "xfs_rtgroup.h" #include "scrub/xfs_scrub.h" #include "scrub/scrub.h" #include "scrub/common.h" @@ -34,6 +37,7 @@ #include "scrub/bitmap.h" #include "scrub/off_bitmap.h" #include "scrub/fsb_bitmap.h" +#include "scrub/rtb_bitmap.h" #include "scrub/reap.h" /* @@ -61,7 +65,10 @@ struct xrep_cow { struct xoff_bitmap bad_fileoffs; /* Bitmap of fsblocks that were removed from the CoW fork. */ - struct xfsb_bitmap old_cowfork_fsblocks; + union { + struct xfsb_bitmap old_cowfork_fsblocks; + struct xrtb_bitmap old_cowfork_rtblocks; + }; /* CoW fork mappings used to scan for bad CoW staging extents. */ struct xfs_bmbt_irec irec; @@ -145,8 +152,7 @@ xrep_cow_mark_shared_staging( xrep_cow_trim_refcount(xc, &rrec, rec); return xrep_cow_mark_file_range(xc, - xfs_agbno_to_fsb(to_perag(cur->bc_group), - rrec.rc_startblock), + xfs_gbno_to_fsb(cur->bc_group, rrec.rc_startblock), rrec.rc_blockcount); } @@ -177,9 +183,8 @@ xrep_cow_mark_missing_staging( if (xc->next_bno >= rrec.rc_startblock) goto next; - error = xrep_cow_mark_file_range(xc, - xfs_agbno_to_fsb(to_perag(cur->bc_group), xc->next_bno), + xfs_gbno_to_fsb(cur->bc_group, xc->next_bno), rrec.rc_startblock - xc->next_bno); if (error) return error; @@ -222,8 +227,7 @@ xrep_cow_mark_missing_staging_rmap( } return xrep_cow_mark_file_range(xc, - xfs_agbno_to_fsb(to_perag(cur->bc_group), rec_bno), - rec_len); + xfs_gbno_to_fsb(cur->bc_group, rec_bno), rec_len); } /* @@ -311,6 +315,92 @@ out_pag: } /* + * Find any part of the CoW fork mapping that isn't a single-owner CoW staging + * extent and mark the corresponding part of the file range in the bitmap. + */ +STATIC int +xrep_cow_find_bad_rt( + struct xrep_cow *xc) +{ + struct xfs_refcount_irec rc_low = { 0 }; + struct xfs_refcount_irec rc_high = { 0 }; + struct xfs_rmap_irec rm_low = { 0 }; + struct xfs_rmap_irec rm_high = { 0 }; + struct xfs_scrub *sc = xc->sc; + struct xfs_rtgroup *rtg; + int error = 0; + + xc->irec_startbno = xfs_rtb_to_rgbno(sc->mp, xc->irec.br_startblock); + + rtg = xfs_rtgroup_get(sc->mp, + xfs_rtb_to_rgno(sc->mp, xc->irec.br_startblock)); + if (!rtg) + return -EFSCORRUPTED; + + error = xrep_rtgroup_init(sc, rtg, &sc->sr, + XFS_RTGLOCK_RMAP | XFS_RTGLOCK_REFCOUNT); + if (error) + goto out_rtg; + + /* Mark any CoW fork extents that are shared. */ + rc_low.rc_startblock = xc->irec_startbno; + rc_high.rc_startblock = xc->irec_startbno + xc->irec.br_blockcount - 1; + rc_low.rc_domain = rc_high.rc_domain = XFS_REFC_DOMAIN_SHARED; + error = xfs_refcount_query_range(sc->sr.refc_cur, &rc_low, &rc_high, + xrep_cow_mark_shared_staging, xc); + if (error) + goto out_sr; + + /* Make sure there are CoW staging extents for the whole mapping. */ + rc_low.rc_startblock = xc->irec_startbno; + rc_high.rc_startblock = xc->irec_startbno + xc->irec.br_blockcount - 1; + rc_low.rc_domain = rc_high.rc_domain = XFS_REFC_DOMAIN_COW; + xc->next_bno = xc->irec_startbno; + error = xfs_refcount_query_range(sc->sr.refc_cur, &rc_low, &rc_high, + xrep_cow_mark_missing_staging, xc); + if (error) + goto out_sr; + + if (xc->next_bno < xc->irec_startbno + xc->irec.br_blockcount) { + error = xrep_cow_mark_file_range(xc, + xfs_rgbno_to_rtb(rtg, xc->next_bno), + xc->irec_startbno + xc->irec.br_blockcount - + xc->next_bno); + if (error) + goto out_sr; + } + + /* Mark any area has an rmap that isn't a COW staging extent. */ + rm_low.rm_startblock = xc->irec_startbno; + memset(&rm_high, 0xFF, sizeof(rm_high)); + rm_high.rm_startblock = xc->irec_startbno + xc->irec.br_blockcount - 1; + error = xfs_rmap_query_range(sc->sr.rmap_cur, &rm_low, &rm_high, + xrep_cow_mark_missing_staging_rmap, xc); + if (error) + goto out_sr; + + /* + * If userspace is forcing us to rebuild the CoW fork or someone + * turned on the debugging knob, replace everything in the + * CoW fork and then scan for staging extents in the refcountbt. + */ + if ((sc->sm->sm_flags & XFS_SCRUB_IFLAG_FORCE_REBUILD) || + XFS_TEST_ERROR(false, sc->mp, XFS_ERRTAG_FORCE_SCRUB_REPAIR)) { + error = xrep_cow_mark_file_range(xc, xc->irec.br_startblock, + xc->irec.br_blockcount); + if (error) + goto out_rtg; + } + +out_sr: + xchk_rtgroup_btcur_free(&sc->sr); + xchk_rtgroup_free(sc, &sc->sr); +out_rtg: + xfs_rtgroup_put(rtg); + return error; +} + +/* * Allocate a replacement CoW staging extent of up to the given number of * blocks, and fill out the mapping. */ @@ -343,7 +433,7 @@ xrep_cow_alloc( if (args.fsbno == NULLFSBLOCK) return -ENOSPC; - xfs_refcount_alloc_cow_extent(sc->tp, args.fsbno, args.len); + xfs_refcount_alloc_cow_extent(sc->tp, false, args.fsbno, args.len); repl->fsbno = args.fsbno; repl->len = args.len; @@ -351,6 +441,32 @@ xrep_cow_alloc( } /* + * Allocate a replacement rt CoW staging extent of up to the given number of + * blocks, and fill out the mapping. + */ +STATIC int +xrep_cow_alloc_rt( + struct xfs_scrub *sc, + xfs_extlen_t maxlen, + struct xrep_cow_extent *repl) +{ + xfs_rtxlen_t maxrtx = xfs_rtb_to_rtx(sc->mp, maxlen); + int error; + + error = xfs_trans_reserve_more(sc->tp, 0, maxrtx); + if (error) + return error; + + error = xfs_rtallocate_rtgs(sc->tp, NULLRTBLOCK, 1, maxrtx, 1, false, + false, &repl->fsbno, &repl->len); + if (error) + return error; + + xfs_refcount_alloc_cow_extent(sc->tp, true, repl->fsbno, repl->len); + return 0; +} + +/* * Look up the current CoW fork mapping so that we only allocate enough to * replace a single mapping. If we don't find a mapping that covers the start * of the file range, or we find a delalloc or written extent, something is @@ -467,7 +583,10 @@ xrep_cow_replace_range( */ alloc_len = min_t(xfs_fileoff_t, XFS_MAX_BMBT_EXTLEN, nextoff - startoff); - error = xrep_cow_alloc(sc, alloc_len, &repl); + if (XFS_IS_REALTIME_INODE(sc->ip)) + error = xrep_cow_alloc_rt(sc, alloc_len, &repl); + else + error = xrep_cow_alloc(sc, alloc_len, &repl); if (error) return error; @@ -483,8 +602,12 @@ xrep_cow_replace_range( return error; /* Note the old CoW staging extents; we'll reap them all later. */ - error = xfsb_bitmap_set(&xc->old_cowfork_fsblocks, got.br_startblock, - repl.len); + if (XFS_IS_REALTIME_INODE(sc->ip)) + error = xrtb_bitmap_set(&xc->old_cowfork_rtblocks, + got.br_startblock, repl.len); + else + error = xfsb_bitmap_set(&xc->old_cowfork_fsblocks, + got.br_startblock, repl.len); if (error) return error; @@ -540,8 +663,16 @@ xrep_bmap_cow( if (!ifp) return 0; - /* realtime files aren't supported yet */ - if (XFS_IS_REALTIME_INODE(sc->ip)) + /* + * Realtime files with large extent sizes are not supported because + * we could encounter an CoW mapping that has been partially written + * out *and* requires replacement, and there's no solution to that. + */ + if (xfs_inode_has_bigrtalloc(sc->ip)) + return -EOPNOTSUPP; + + /* Metadata inodes aren't supposed to have data on the rt volume. */ + if (xfs_is_metadir_inode(sc->ip) && XFS_IS_REALTIME_INODE(sc->ip)) return -EOPNOTSUPP; /* @@ -562,7 +693,10 @@ xrep_bmap_cow( xc->sc = sc; xoff_bitmap_init(&xc->bad_fileoffs); - xfsb_bitmap_init(&xc->old_cowfork_fsblocks); + if (XFS_IS_REALTIME_INODE(sc->ip)) + xrtb_bitmap_init(&xc->old_cowfork_rtblocks); + else + xfsb_bitmap_init(&xc->old_cowfork_fsblocks); for_each_xfs_iext(ifp, &icur, &xc->irec) { if (xchk_should_terminate(sc, &error)) @@ -585,7 +719,10 @@ xrep_bmap_cow( if (xfs_bmap_is_written_extent(&xc->irec)) continue; - error = xrep_cow_find_bad(xc); + if (XFS_IS_REALTIME_INODE(sc->ip)) + error = xrep_cow_find_bad_rt(xc); + else + error = xrep_cow_find_bad(xc); if (error) goto out_bitmap; } @@ -600,13 +737,20 @@ xrep_bmap_cow( * by the refcount btree, not the inode, so it is correct to treat them * like inode metadata. */ - error = xrep_reap_fsblocks(sc, &xc->old_cowfork_fsblocks, - &XFS_RMAP_OINFO_COW); + if (XFS_IS_REALTIME_INODE(sc->ip)) + error = xrep_reap_rtblocks(sc, &xc->old_cowfork_rtblocks, + &XFS_RMAP_OINFO_COW); + else + error = xrep_reap_fsblocks(sc, &xc->old_cowfork_fsblocks, + &XFS_RMAP_OINFO_COW); if (error) goto out_bitmap; out_bitmap: - xfsb_bitmap_destroy(&xc->old_cowfork_fsblocks); + if (XFS_IS_REALTIME_INODE(sc->ip)) + xrtb_bitmap_destroy(&xc->old_cowfork_rtblocks); + else + xfsb_bitmap_destroy(&xc->old_cowfork_fsblocks); xoff_bitmap_destroy(&xc->bad_fileoffs); kfree(xc); return error; diff --git a/fs/xfs/scrub/health.c b/fs/xfs/scrub/health.c index ccc6ca5934ca..3c0f25098b69 100644 --- a/fs/xfs/scrub/health.c +++ b/fs/xfs/scrub/health.c @@ -114,6 +114,8 @@ static const struct xchk_health_map type_to_health_flag[XFS_SCRUB_TYPE_NR] = { [XFS_SCRUB_TYPE_DIRTREE] = { XHG_INO, XFS_SICK_INO_DIRTREE }, [XFS_SCRUB_TYPE_METAPATH] = { XHG_FS, XFS_SICK_FS_METAPATH }, [XFS_SCRUB_TYPE_RGSUPER] = { XHG_RTGROUP, XFS_SICK_RG_SUPER }, + [XFS_SCRUB_TYPE_RTRMAPBT] = { XHG_RTGROUP, XFS_SICK_RG_RMAPBT }, + [XFS_SCRUB_TYPE_RTREFCBT] = { XHG_RTGROUP, XFS_SICK_RG_REFCNTBT }, }; /* Return the health status mask for this scrub type. */ diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c index 25ee66e7649d..db6edd5a5fe5 100644 --- a/fs/xfs/scrub/inode.c +++ b/fs/xfs/scrub/inode.c @@ -260,12 +260,7 @@ xchk_inode_extsize( xchk_ino_set_warning(sc, ino); } -/* - * Validate di_cowextsize hint. - * - * The rules are documented at xfs_ioctl_setattr_check_cowextsize(). - * These functions must be kept in sync with each other. - */ +/* Validate di_cowextsize hint. */ STATIC void xchk_inode_cowextsize( struct xfs_scrub *sc, @@ -276,12 +271,25 @@ xchk_inode_cowextsize( uint64_t flags2) { xfs_failaddr_t fa; + uint32_t value = be32_to_cpu(dip->di_cowextsize); - fa = xfs_inode_validate_cowextsize(sc->mp, - be32_to_cpu(dip->di_cowextsize), mode, flags, - flags2); + fa = xfs_inode_validate_cowextsize(sc->mp, value, mode, flags, flags2); if (fa) xchk_ino_set_corrupt(sc, ino); + + /* + * XFS allows a sysadmin to change the rt extent size when adding a rt + * section to a filesystem after formatting. If there are any + * directories with cowextsize and rtinherit set, the hint could become + * misaligned with the new rextsize. The verifier doesn't check this, + * because we allow rtinherit directories even without an rt device. + * Flag this as an administrative warning since we will clean this up + * eventually. + */ + if ((flags & XFS_DIFLAG_RTINHERIT) && + (flags2 & XFS_DIFLAG2_COWEXTSIZE) && + value % sc->mp->m_sb.sb_rextsize > 0) + xchk_ino_set_warning(sc, ino); } /* Make sure the di_flags make sense for the inode. */ @@ -360,8 +368,9 @@ xchk_inode_flags2( if ((flags2 & XFS_DIFLAG2_REFLINK) && !S_ISREG(mode)) goto bad; - /* realtime and reflink make no sense, currently */ - if ((flags & XFS_DIFLAG_REALTIME) && (flags2 & XFS_DIFLAG2_REFLINK)) + /* realtime and reflink don't always go together */ + if ((flags & XFS_DIFLAG_REALTIME) && (flags2 & XFS_DIFLAG2_REFLINK) && + !xfs_has_rtreflink(mp)) goto bad; /* no bigtime iflag without the bigtime feature */ @@ -502,6 +511,10 @@ xchk_dinode( if (!S_ISREG(mode) && !S_ISDIR(mode)) xchk_ino_set_corrupt(sc, ino); break; + case XFS_DINODE_FMT_META_BTREE: + if (!S_ISREG(mode)) + xchk_ino_set_corrupt(sc, ino); + break; case XFS_DINODE_FMT_UUID: default: xchk_ino_set_corrupt(sc, ino); @@ -686,15 +699,13 @@ xchk_inode_xref_bmap( return; /* Walk all the extents to check nextents/naextents/nblocks. */ - error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_DATA_FORK, - &nextents, &count); + error = xchk_inode_count_blocks(sc, XFS_DATA_FORK, &nextents, &count); if (!xchk_should_check_xref(sc, &error, NULL)) return; if (nextents < xfs_dfork_data_extents(dip)) xchk_ino_xref_set_corrupt(sc, sc->ip->i_ino); - error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_ATTR_FORK, - &nextents, &acount); + error = xchk_inode_count_blocks(sc, XFS_ATTR_FORK, &nextents, &acount); if (!xchk_should_check_xref(sc, &error, NULL)) return; if (nextents != xfs_dfork_attr_extents(dip)) diff --git a/fs/xfs/scrub/inode_repair.c b/fs/xfs/scrub/inode_repair.c index 5a58ddd27bd2..2f641b6d663e 100644 --- a/fs/xfs/scrub/inode_repair.c +++ b/fs/xfs/scrub/inode_repair.c @@ -38,6 +38,9 @@ #include "xfs_log_priv.h" #include "xfs_health.h" #include "xfs_symlink_remote.h" +#include "xfs_rtgroup.h" +#include "xfs_rtrmap_btree.h" +#include "xfs_rtrefcount_btree.h" #include "scrub/xfs_scrub.h" #include "scrub/scrub.h" #include "scrub/common.h" @@ -562,8 +565,6 @@ xrep_dinode_flags( flags2 |= XFS_DIFLAG2_REFLINK; else flags2 &= ~(XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE); - if (flags & XFS_DIFLAG_REALTIME) - flags2 &= ~XFS_DIFLAG2_REFLINK; if (!xfs_has_bigtime(mp)) flags2 &= ~XFS_DIFLAG2_BIGTIME; if (!xfs_has_large_extent_counts(mp)) @@ -773,17 +774,71 @@ xrep_dinode_count_ag_rmaps( return error; } +/* Count extents and blocks for an inode given an rt rmap. */ +STATIC int +xrep_dinode_walk_rtrmap( + struct xfs_btree_cur *cur, + const struct xfs_rmap_irec *rec, + void *priv) +{ + struct xrep_inode *ri = priv; + int error = 0; + + if (xchk_should_terminate(ri->sc, &error)) + return error; + + /* We only care about this inode. */ + if (rec->rm_owner != ri->sc->sm->sm_ino) + return 0; + + if (rec->rm_flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK)) + return -EFSCORRUPTED; + + ri->rt_blocks += rec->rm_blockcount; + ri->rt_extents++; + return 0; +} + +/* Count extents and blocks for an inode from all realtime rmap data. */ +STATIC int +xrep_dinode_count_rtgroup_rmaps( + struct xrep_inode *ri, + struct xfs_rtgroup *rtg) +{ + struct xfs_scrub *sc = ri->sc; + int error; + + error = xrep_rtgroup_init(sc, rtg, &sc->sr, XFS_RTGLOCK_RMAP); + if (error) + return error; + + error = xfs_rmap_query_all(sc->sr.rmap_cur, xrep_dinode_walk_rtrmap, + ri); + xchk_rtgroup_btcur_free(&sc->sr); + xchk_rtgroup_free(sc, &sc->sr); + return error; +} + /* Count extents and blocks for a given inode from all rmap data. */ STATIC int xrep_dinode_count_rmaps( struct xrep_inode *ri) { struct xfs_perag *pag = NULL; + struct xfs_rtgroup *rtg = NULL; int error; - if (!xfs_has_rmapbt(ri->sc->mp) || xfs_has_realtime(ri->sc->mp)) + if (!xfs_has_rmapbt(ri->sc->mp)) return -EOPNOTSUPP; + while ((rtg = xfs_rtgroup_next(ri->sc->mp, rtg))) { + error = xrep_dinode_count_rtgroup_rmaps(ri, rtg); + if (error) { + xfs_rtgroup_rele(rtg); + return error; + } + } + while ((pag = xfs_perag_next(ri->sc->mp, pag))) { error = xrep_dinode_count_ag_rmaps(ri, pag); if (error) { @@ -888,6 +943,85 @@ xrep_dinode_bad_bmbt_fork( return false; } +/* Return true if this rmap-format ifork looks like garbage. */ +STATIC bool +xrep_dinode_bad_rtrmapbt_fork( + struct xfs_scrub *sc, + struct xfs_dinode *dip, + unsigned int dfork_size) +{ + struct xfs_rtrmap_root *dfp; + unsigned int nrecs; + unsigned int level; + + if (dfork_size < sizeof(struct xfs_rtrmap_root)) + return true; + + dfp = XFS_DFORK_PTR(dip, XFS_DATA_FORK); + nrecs = be16_to_cpu(dfp->bb_numrecs); + level = be16_to_cpu(dfp->bb_level); + + if (level > sc->mp->m_rtrmap_maxlevels) + return true; + if (xfs_rtrmap_droot_space_calc(level, nrecs) > dfork_size) + return true; + if (level > 0 && nrecs == 0) + return true; + + return false; +} + +/* Return true if this refcount-format ifork looks like garbage. */ +STATIC bool +xrep_dinode_bad_rtrefcountbt_fork( + struct xfs_scrub *sc, + struct xfs_dinode *dip, + unsigned int dfork_size) +{ + struct xfs_rtrefcount_root *dfp; + unsigned int nrecs; + unsigned int level; + + if (dfork_size < sizeof(struct xfs_rtrefcount_root)) + return true; + + dfp = XFS_DFORK_PTR(dip, XFS_DATA_FORK); + nrecs = be16_to_cpu(dfp->bb_numrecs); + level = be16_to_cpu(dfp->bb_level); + + if (level > sc->mp->m_rtrefc_maxlevels) + return true; + if (xfs_rtrefcount_droot_space_calc(level, nrecs) > dfork_size) + return true; + if (level > 0 && nrecs == 0) + return true; + + return false; +} + +/* Check a metadata-btree fork. */ +STATIC bool +xrep_dinode_bad_metabt_fork( + struct xfs_scrub *sc, + struct xfs_dinode *dip, + unsigned int dfork_size, + int whichfork) +{ + if (whichfork != XFS_DATA_FORK) + return true; + + switch (be16_to_cpu(dip->di_metatype)) { + case XFS_METAFILE_RTRMAP: + return xrep_dinode_bad_rtrmapbt_fork(sc, dip, dfork_size); + case XFS_METAFILE_RTREFCOUNT: + return xrep_dinode_bad_rtrefcountbt_fork(sc, dip, dfork_size); + default: + return true; + } + + return false; +} + /* * Check the data fork for things that will fail the ifork verifiers or the * ifork formatters. @@ -968,6 +1102,11 @@ xrep_dinode_check_dfork( XFS_DATA_FORK)) return true; break; + case XFS_DINODE_FMT_META_BTREE: + if (xrep_dinode_bad_metabt_fork(sc, dip, dfork_size, + XFS_DATA_FORK)) + return true; + break; default: return true; } @@ -1088,6 +1227,11 @@ xrep_dinode_check_afork( XFS_ATTR_FORK)) return true; break; + case XFS_DINODE_FMT_META_BTREE: + if (xrep_dinode_bad_metabt_fork(sc, dip, afork_size, + XFS_ATTR_FORK)) + return true; + break; default: return true; } @@ -1135,6 +1279,8 @@ xrep_dinode_ensure_forkoff( uint16_t mode) { struct xfs_bmdr_block *bmdr; + struct xfs_rtrmap_root *rmdr; + struct xfs_rtrefcount_root *rcdr; struct xfs_scrub *sc = ri->sc; xfs_extnum_t attr_extents, data_extents; size_t bmdr_minsz = xfs_bmdr_space_calc(1); @@ -1241,6 +1387,21 @@ xrep_dinode_ensure_forkoff( bmdr = XFS_DFORK_PTR(dip, XFS_DATA_FORK); dfork_min = xfs_bmap_broot_space(sc->mp, bmdr); break; + case XFS_DINODE_FMT_META_BTREE: + switch (be16_to_cpu(dip->di_metatype)) { + case XFS_METAFILE_RTRMAP: + rmdr = XFS_DFORK_PTR(dip, XFS_DATA_FORK); + dfork_min = xfs_rtrmap_broot_space(sc->mp, rmdr); + break; + case XFS_METAFILE_RTREFCOUNT: + rcdr = XFS_DFORK_PTR(dip, XFS_DATA_FORK); + dfork_min = xfs_rtrefcount_broot_space(sc->mp, rcdr); + break; + default: + dfork_min = 0; + break; + } + break; default: dfork_min = 0; break; @@ -1500,8 +1661,7 @@ xrep_inode_blockcounts( trace_xrep_inode_blockcounts(sc); /* Set data fork counters from the data fork mappings. */ - error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_DATA_FORK, - &nextents, &count); + error = xchk_inode_count_blocks(sc, XFS_DATA_FORK, &nextents, &count); if (error) return error; if (xfs_is_reflink_inode(sc->ip)) { @@ -1525,8 +1685,8 @@ xrep_inode_blockcounts( /* Set attr fork counters from the attr fork mappings. */ ifp = xfs_ifork_ptr(sc->ip, XFS_ATTR_FORK); if (ifp) { - error = xfs_bmap_count_blocks(sc->tp, sc->ip, XFS_ATTR_FORK, - &nextents, &acount); + error = xchk_inode_count_blocks(sc, XFS_ATTR_FORK, &nextents, + &acount); if (error) return error; if (count >= sc->mp->m_sb.sb_dblocks) @@ -1664,10 +1824,6 @@ xrep_inode_flags( /* DAX only applies to files and dirs. */ if (!(S_ISREG(mode) || S_ISDIR(mode))) sc->ip->i_diflags2 &= ~XFS_DIFLAG2_DAX; - - /* No reflink files on the realtime device. */ - if (sc->ip->i_diflags & XFS_DIFLAG_REALTIME) - sc->ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK; } /* @@ -1783,6 +1939,20 @@ xrep_inode_pptr( sizeof(struct xfs_attr_sf_hdr), true); } +/* Fix COW extent size hint problems. */ +STATIC void +xrep_inode_cowextsize( + struct xfs_scrub *sc) +{ + /* Fix misaligned CoW extent size hints on a directory. */ + if ((sc->ip->i_diflags & XFS_DIFLAG_RTINHERIT) && + (sc->ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) && + sc->ip->i_extsize % sc->mp->m_sb.sb_rextsize > 0) { + sc->ip->i_cowextsize = 0; + sc->ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE; + } +} + /* Fix any irregularities in an inode that the verifiers don't catch. */ STATIC int xrep_inode_problems( @@ -1806,6 +1976,7 @@ xrep_inode_problems( if (S_ISDIR(VFS_I(sc->ip)->i_mode)) xrep_inode_dir_size(sc); xrep_inode_extsize(sc); + xrep_inode_cowextsize(sc); trace_xrep_inode_fixed(sc); xfs_trans_log_inode(sc->tp, sc->ip, XFS_ILOG_CORE); diff --git a/fs/xfs/scrub/metapath.c b/fs/xfs/scrub/metapath.c index c678cba1ffc3..e21c16fbd15d 100644 --- a/fs/xfs/scrub/metapath.c +++ b/fs/xfs/scrub/metapath.c @@ -21,6 +21,8 @@ #include "xfs_trans_space.h" #include "xfs_attr.h" #include "xfs_rtgroup.h" +#include "xfs_rtrmap_btree.h" +#include "xfs_rtrefcount_btree.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/trace.h" @@ -246,6 +248,10 @@ xchk_setup_metapath( return xchk_setup_metapath_dqinode(sc, XFS_DQTYPE_GROUP); case XFS_SCRUB_METAPATH_PRJQUOTA: return xchk_setup_metapath_dqinode(sc, XFS_DQTYPE_PROJ); + case XFS_SCRUB_METAPATH_RTRMAPBT: + return xchk_setup_metapath_rtginode(sc, XFS_RTGI_RMAP); + case XFS_SCRUB_METAPATH_RTREFCOUNTBT: + return xchk_setup_metapath_rtginode(sc, XFS_RTGI_REFCOUNT); default: return -ENOENT; } diff --git a/fs/xfs/scrub/newbt.c b/fs/xfs/scrub/newbt.c index 70af27d98734..ac38f5843090 100644 --- a/fs/xfs/scrub/newbt.c +++ b/fs/xfs/scrub/newbt.c @@ -19,6 +19,8 @@ #include "xfs_rmap.h" #include "xfs_ag.h" #include "xfs_defer.h" +#include "xfs_metafile.h" +#include "xfs_quota.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/trace.h" @@ -121,6 +123,43 @@ xrep_newbt_init_inode( } /* + * Initialize accounting resources for staging a new metadata inode btree. + * If the metadata file has a space reservation, the caller must adjust that + * reservation when committing the new ondisk btree. + */ +int +xrep_newbt_init_metadir_inode( + struct xrep_newbt *xnr, + struct xfs_scrub *sc) +{ + struct xfs_owner_info oinfo; + struct xfs_ifork *ifp; + + ASSERT(xfs_is_metadir_inode(sc->ip)); + + xfs_rmap_ino_bmbt_owner(&oinfo, sc->ip->i_ino, XFS_DATA_FORK); + + ifp = kmem_cache_zalloc(xfs_ifork_cache, XCHK_GFP_FLAGS); + if (!ifp) + return -ENOMEM; + + /* + * Allocate new metadir btree blocks with XFS_AG_RESV_NONE because the + * inode metadata space reservations can only account allocated space + * to the i_nblocks. We do not want to change the inode core fields + * until we're ready to commit the new tree, so we allocate the blocks + * as if they were regular file blocks. This exposes us to a higher + * risk of the repair being cancelled due to ENOSPC. + */ + xrep_newbt_init_ag(xnr, sc, &oinfo, + XFS_INO_TO_FSB(sc->mp, sc->ip->i_ino), + XFS_AG_RESV_NONE); + xnr->ifake.if_fork = ifp; + xnr->ifake.if_fork_size = xfs_inode_fork_size(sc->ip, XFS_DATA_FORK); + return 0; +} + +/* * Initialize accounting resources for staging a new btree. Callers are * expected to add their own reservations (and clean them up) manually. */ @@ -224,6 +263,7 @@ xrep_newbt_alloc_ag_blocks( int error = 0; ASSERT(sc->sa.pag != NULL); + ASSERT(xnr->resv != XFS_AG_RESV_METAFILE); while (nr_blocks > 0) { struct xfs_alloc_arg args = { @@ -297,6 +337,8 @@ xrep_newbt_alloc_file_blocks( struct xfs_mount *mp = sc->mp; int error = 0; + ASSERT(xnr->resv != XFS_AG_RESV_METAFILE); + while (nr_blocks > 0) { struct xfs_alloc_arg args = { .tp = sc->tp, diff --git a/fs/xfs/scrub/newbt.h b/fs/xfs/scrub/newbt.h index 3d804d31af24..5ce785599287 100644 --- a/fs/xfs/scrub/newbt.h +++ b/fs/xfs/scrub/newbt.h @@ -63,6 +63,7 @@ void xrep_newbt_init_ag(struct xrep_newbt *xnr, struct xfs_scrub *sc, enum xfs_ag_resv_type resv); int xrep_newbt_init_inode(struct xrep_newbt *xnr, struct xfs_scrub *sc, int whichfork, const struct xfs_owner_info *oinfo); +int xrep_newbt_init_metadir_inode(struct xrep_newbt *xnr, struct xfs_scrub *sc); int xrep_newbt_alloc_blocks(struct xrep_newbt *xnr, uint64_t nr_blocks); int xrep_newbt_add_extent(struct xrep_newbt *xnr, struct xfs_perag *pag, xfs_agblock_t agbno, xfs_extlen_t len); diff --git a/fs/xfs/scrub/quota.c b/fs/xfs/scrub/quota.c index 183d531875ea..58d6d4ed2853 100644 --- a/fs/xfs/scrub/quota.c +++ b/fs/xfs/scrub/quota.c @@ -212,12 +212,18 @@ xchk_quota_item( if (mp->m_sb.sb_dblocks < dq->q_blk.count) xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); + if (mp->m_sb.sb_rblocks < dq->q_rtb.count) + xchk_fblock_set_warning(sc, XFS_DATA_FORK, + offset); } else { if (mp->m_sb.sb_dblocks < dq->q_blk.count) xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); + if (mp->m_sb.sb_rblocks < dq->q_rtb.count) + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, + offset); } - if (dq->q_ino.count > fs_icount || dq->q_rtb.count > mp->m_sb.sb_rblocks) + if (dq->q_ino.count > fs_icount) xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); /* diff --git a/fs/xfs/scrub/quota_repair.c b/fs/xfs/scrub/quota_repair.c index cd51f10f2920..8f4c8d41f308 100644 --- a/fs/xfs/scrub/quota_repair.c +++ b/fs/xfs/scrub/quota_repair.c @@ -233,7 +233,7 @@ xrep_quota_item( rqi->need_quotacheck = true; dirty = true; } - if (dq->q_rtb.count > mp->m_sb.sb_rblocks) { + if (!xfs_has_reflink(mp) && dq->q_rtb.count > mp->m_sb.sb_rblocks) { dq->q_rtb.reserved -= dq->q_rtb.count; dq->q_rtb.reserved += mp->m_sb.sb_rblocks; dq->q_rtb.count = mp->m_sb.sb_rblocks; diff --git a/fs/xfs/scrub/reap.c b/fs/xfs/scrub/reap.c index 08230952053b..b32fb233cf84 100644 --- a/fs/xfs/scrub/reap.c +++ b/fs/xfs/scrub/reap.c @@ -33,6 +33,9 @@ #include "xfs_attr.h" #include "xfs_attr_remote.h" #include "xfs_defer.h" +#include "xfs_metafile.h" +#include "xfs_rtgroup.h" +#include "xfs_rtrmap_btree.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/trace.h" @@ -40,6 +43,7 @@ #include "scrub/bitmap.h" #include "scrub/agb_bitmap.h" #include "scrub/fsb_bitmap.h" +#include "scrub/rtb_bitmap.h" #include "scrub/reap.h" /* @@ -310,7 +314,7 @@ xreap_agextent_binval( } out: - trace_xreap_agextent_binval(sc->sa.pag, agbno, *aglenp); + trace_xreap_agextent_binval(pag_group(sc->sa.pag), agbno, *aglenp); } /* @@ -369,7 +373,8 @@ xreap_agextent_select( out_found: *aglenp = len; - trace_xreap_agextent_select(sc->sa.pag, agbno, len, *crosslinked); + trace_xreap_agextent_select(pag_group(sc->sa.pag), agbno, len, + *crosslinked); out_cur: xfs_btree_del_cursor(cur, error); return error; @@ -390,6 +395,8 @@ xreap_agextent_iter( xfs_fsblock_t fsbno; int error = 0; + ASSERT(rs->resv != XFS_AG_RESV_METAFILE); + fsbno = xfs_agbno_to_fsb(sc->sa.pag, agbno); /* @@ -406,7 +413,8 @@ xreap_agextent_iter( * to run xfs_repair. */ if (crosslinked) { - trace_xreap_dispose_unmap_extent(sc->sa.pag, agbno, *aglenp); + trace_xreap_dispose_unmap_extent(pag_group(sc->sa.pag), agbno, + *aglenp); rs->force_roll = true; @@ -416,7 +424,8 @@ xreap_agextent_iter( * records from the refcountbt, which will remove the * rmap record as well. */ - xfs_refcount_free_cow_extent(sc->tp, fsbno, *aglenp); + xfs_refcount_free_cow_extent(sc->tp, false, fsbno, + *aglenp); return 0; } @@ -424,7 +433,7 @@ xreap_agextent_iter( *aglenp, rs->oinfo); } - trace_xreap_dispose_free_extent(sc->sa.pag, agbno, *aglenp); + trace_xreap_dispose_free_extent(pag_group(sc->sa.pag), agbno, *aglenp); /* * Invalidate as many buffers as we can, starting at agbno. If this @@ -448,7 +457,7 @@ xreap_agextent_iter( if (rs->oinfo == &XFS_RMAP_OINFO_COW) { ASSERT(rs->resv == XFS_AG_RESV_NONE); - xfs_refcount_free_cow_extent(sc->tp, fsbno, *aglenp); + xfs_refcount_free_cow_extent(sc->tp, false, fsbno, *aglenp); error = xfs_free_extent_later(sc->tp, fsbno, *aglenp, NULL, rs->resv, XFS_FREE_EXTENT_SKIP_DISCARD); if (error) @@ -675,6 +684,263 @@ xrep_reap_fsblocks( return 0; } +#ifdef CONFIG_XFS_RT +/* + * Figure out the longest run of blocks that we can dispose of with a single + * call. Cross-linked blocks should have their reverse mappings removed, but + * single-owner extents can be freed. Units are rt blocks, not rt extents. + */ +STATIC int +xreap_rgextent_select( + struct xreap_state *rs, + xfs_rgblock_t rgbno, + xfs_rgblock_t rgbno_next, + bool *crosslinked, + xfs_extlen_t *rglenp) +{ + struct xfs_scrub *sc = rs->sc; + struct xfs_btree_cur *cur; + xfs_rgblock_t bno = rgbno + 1; + xfs_extlen_t len = 1; + int error; + + /* + * Determine if there are any other rmap records covering the first + * block of this extent. If so, the block is crosslinked. + */ + cur = xfs_rtrmapbt_init_cursor(sc->tp, sc->sr.rtg); + error = xfs_rmap_has_other_keys(cur, rgbno, 1, rs->oinfo, + crosslinked); + if (error) + goto out_cur; + + /* + * Figure out how many of the subsequent blocks have the same crosslink + * status. + */ + while (bno < rgbno_next) { + bool also_crosslinked; + + error = xfs_rmap_has_other_keys(cur, bno, 1, rs->oinfo, + &also_crosslinked); + if (error) + goto out_cur; + + if (*crosslinked != also_crosslinked) + break; + + len++; + bno++; + } + + *rglenp = len; + trace_xreap_agextent_select(rtg_group(sc->sr.rtg), rgbno, len, + *crosslinked); +out_cur: + xfs_btree_del_cursor(cur, error); + return error; +} + +/* + * Dispose of as much of the beginning of this rtgroup extent as possible. + * The number of blocks disposed of will be returned in @rglenp. + */ +STATIC int +xreap_rgextent_iter( + struct xreap_state *rs, + xfs_rgblock_t rgbno, + xfs_extlen_t *rglenp, + bool crosslinked) +{ + struct xfs_scrub *sc = rs->sc; + xfs_rtblock_t rtbno; + int error; + + /* + * The only caller so far is CoW fork repair, so we only know how to + * unlink or free CoW staging extents. Here we don't have to worry + * about invalidating buffers! + */ + if (rs->oinfo != &XFS_RMAP_OINFO_COW) { + ASSERT(rs->oinfo == &XFS_RMAP_OINFO_COW); + return -EFSCORRUPTED; + } + ASSERT(rs->resv == XFS_AG_RESV_NONE); + + rtbno = xfs_rgbno_to_rtb(sc->sr.rtg, rgbno); + + /* + * If there are other rmappings, this block is cross linked and must + * not be freed. Remove the forward and reverse mapping and move on. + */ + if (crosslinked) { + trace_xreap_dispose_unmap_extent(rtg_group(sc->sr.rtg), rgbno, + *rglenp); + + xfs_refcount_free_cow_extent(sc->tp, true, rtbno, *rglenp); + rs->deferred++; + return 0; + } + + trace_xreap_dispose_free_extent(rtg_group(sc->sr.rtg), rgbno, *rglenp); + + /* + * The CoW staging extent is not crosslinked. Use deferred work items + * to remove the refcountbt records (which removes the rmap records) + * and free the extent. We're not worried about the system going down + * here because log recovery walks the refcount btree to clean out the + * CoW staging extents. + */ + xfs_refcount_free_cow_extent(sc->tp, true, rtbno, *rglenp); + error = xfs_free_extent_later(sc->tp, rtbno, *rglenp, NULL, + rs->resv, + XFS_FREE_EXTENT_REALTIME | + XFS_FREE_EXTENT_SKIP_DISCARD); + if (error) + return error; + + rs->deferred++; + return 0; +} + +#define XREAP_RTGLOCK_ALL (XFS_RTGLOCK_BITMAP | \ + XFS_RTGLOCK_RMAP | \ + XFS_RTGLOCK_REFCOUNT) + +/* + * Break a rt file metadata extent into sub-extents by fate (crosslinked, not + * crosslinked), and dispose of each sub-extent separately. The extent must + * be aligned to a realtime extent. + */ +STATIC int +xreap_rtmeta_extent( + uint64_t rtbno, + uint64_t len, + void *priv) +{ + struct xreap_state *rs = priv; + struct xfs_scrub *sc = rs->sc; + xfs_rgblock_t rgbno = xfs_rtb_to_rgbno(sc->mp, rtbno); + xfs_rgblock_t rgbno_next = rgbno + len; + int error = 0; + + ASSERT(sc->ip != NULL); + ASSERT(!sc->sr.rtg); + + /* + * We're reaping blocks after repairing file metadata, which means that + * we have to init the xchk_ag structure ourselves. + */ + sc->sr.rtg = xfs_rtgroup_get(sc->mp, xfs_rtb_to_rgno(sc->mp, rtbno)); + if (!sc->sr.rtg) + return -EFSCORRUPTED; + + xfs_rtgroup_lock(sc->sr.rtg, XREAP_RTGLOCK_ALL); + + while (rgbno < rgbno_next) { + xfs_extlen_t rglen; + bool crosslinked; + + error = xreap_rgextent_select(rs, rgbno, rgbno_next, + &crosslinked, &rglen); + if (error) + goto out_unlock; + + error = xreap_rgextent_iter(rs, rgbno, &rglen, crosslinked); + if (error) + goto out_unlock; + + if (xreap_want_defer_finish(rs)) { + error = xfs_defer_finish(&sc->tp); + if (error) + goto out_unlock; + xreap_defer_finish_reset(rs); + } else if (xreap_want_roll(rs)) { + error = xfs_trans_roll_inode(&sc->tp, sc->ip); + if (error) + goto out_unlock; + xreap_reset(rs); + } + + rgbno += rglen; + } + +out_unlock: + xfs_rtgroup_unlock(sc->sr.rtg, XREAP_RTGLOCK_ALL); + xfs_rtgroup_put(sc->sr.rtg); + sc->sr.rtg = NULL; + return error; +} + +/* + * Dispose of every block of every rt metadata extent in the bitmap. + * Do not use this to dispose of the mappings in an ondisk inode fork. + */ +int +xrep_reap_rtblocks( + struct xfs_scrub *sc, + struct xrtb_bitmap *bitmap, + const struct xfs_owner_info *oinfo) +{ + struct xreap_state rs = { + .sc = sc, + .oinfo = oinfo, + .resv = XFS_AG_RESV_NONE, + }; + int error; + + ASSERT(xfs_has_rmapbt(sc->mp)); + ASSERT(sc->ip != NULL); + + error = xrtb_bitmap_walk(bitmap, xreap_rtmeta_extent, &rs); + if (error) + return error; + + if (xreap_dirty(&rs)) + return xrep_defer_finish(sc); + + return 0; +} +#endif /* CONFIG_XFS_RT */ + +/* + * Dispose of every block of an old metadata btree that used to be rooted in a + * metadata directory file. + */ +int +xrep_reap_metadir_fsblocks( + struct xfs_scrub *sc, + struct xfsb_bitmap *bitmap) +{ + /* + * Reap old metadir btree blocks with XFS_AG_RESV_NONE because the old + * blocks are no longer mapped by the inode, and inode metadata space + * reservations can only account freed space to the i_nblocks. + */ + struct xfs_owner_info oinfo; + struct xreap_state rs = { + .sc = sc, + .oinfo = &oinfo, + .resv = XFS_AG_RESV_NONE, + }; + int error; + + ASSERT(xfs_has_rmapbt(sc->mp)); + ASSERT(sc->ip != NULL); + ASSERT(xfs_is_metadir_inode(sc->ip)); + + xfs_rmap_ino_bmbt_owner(&oinfo, sc->ip->i_ino, XFS_DATA_FORK); + + error = xfsb_bitmap_walk(bitmap, xreap_fsmeta_extent, &rs); + if (error) + return error; + + if (xreap_dirty(&rs)) + return xrep_defer_finish(sc); + + return 0; +} + /* * Metadata files are not supposed to share blocks with anything else. * If blocks are shared, we remove the reverse mapping (thus reducing the @@ -729,7 +995,8 @@ xreap_bmapi_select( } imap->br_blockcount = len; - trace_xreap_bmapi_select(sc->sa.pag, agbno, len, *crosslinked); + trace_xreap_bmapi_select(pag_group(sc->sa.pag), agbno, len, + *crosslinked); out_cur: xfs_btree_del_cursor(cur, error); return error; @@ -868,7 +1135,8 @@ xreap_bmapi_binval( } out: - trace_xreap_bmapi_binval(sc->sa.pag, agbno, imap->br_blockcount); + trace_xreap_bmapi_binval(pag_group(sc->sa.pag), agbno, + imap->br_blockcount); return 0; } @@ -895,7 +1163,7 @@ xrep_reap_bmapi_iter( * anybody else who thinks they own the block, even though that * runs the risk of stale buffer warnings in the future. */ - trace_xreap_dispose_unmap_extent(sc->sa.pag, + trace_xreap_dispose_unmap_extent(pag_group(sc->sa.pag), XFS_FSB_TO_AGBNO(sc->mp, imap->br_startblock), imap->br_blockcount); @@ -918,7 +1186,7 @@ xrep_reap_bmapi_iter( * by a block starting before the first block of the extent but overlap * anyway. */ - trace_xreap_dispose_free_extent(sc->sa.pag, + trace_xreap_dispose_free_extent(pag_group(sc->sa.pag), XFS_FSB_TO_AGBNO(sc->mp, imap->br_startblock), imap->br_blockcount); diff --git a/fs/xfs/scrub/reap.h b/fs/xfs/scrub/reap.h index 3f2f1775e29d..4c8f62701fb3 100644 --- a/fs/xfs/scrub/reap.h +++ b/fs/xfs/scrub/reap.h @@ -14,6 +14,15 @@ int xrep_reap_agblocks(struct xfs_scrub *sc, struct xagb_bitmap *bitmap, int xrep_reap_fsblocks(struct xfs_scrub *sc, struct xfsb_bitmap *bitmap, const struct xfs_owner_info *oinfo); int xrep_reap_ifork(struct xfs_scrub *sc, struct xfs_inode *ip, int whichfork); +int xrep_reap_metadir_fsblocks(struct xfs_scrub *sc, + struct xfsb_bitmap *bitmap); + +#ifdef CONFIG_XFS_RT +int xrep_reap_rtblocks(struct xfs_scrub *sc, struct xrtb_bitmap *bitmap, + const struct xfs_owner_info *oinfo); +#else +# define xrep_reap_rtblocks(...) (-EOPNOTSUPP) +#endif /* CONFIG_XFS_RT */ /* Buffer cache scan context. */ struct xrep_bufscan { diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c index 1c5e45cc6419..d46528023015 100644 --- a/fs/xfs/scrub/refcount.c +++ b/fs/xfs/scrub/refcount.c @@ -421,7 +421,7 @@ xchk_refcount_mergeable( if (r1->rc_refcount != r2->rc_refcount) return false; if ((unsigned long long)r1->rc_blockcount + r2->rc_blockcount > - MAXREFCEXTLEN) + XFS_REFC_LEN_MAX) return false; return true; diff --git a/fs/xfs/scrub/refcount_repair.c b/fs/xfs/scrub/refcount_repair.c index 4e572b81c986..9c8cb5332da0 100644 --- a/fs/xfs/scrub/refcount_repair.c +++ b/fs/xfs/scrub/refcount_repair.c @@ -183,13 +183,13 @@ xrep_refc_stash( if (xchk_should_terminate(sc, &error)) return error; - irec.rc_refcount = min_t(uint64_t, MAXREFCOUNT, refcount); + irec.rc_refcount = min_t(uint64_t, XFS_REFC_REFCOUNT_MAX, refcount); error = xrep_refc_check_ext(rr->sc, &irec); if (error) return error; - trace_xrep_refc_found(sc->sa.pag, &irec); + trace_xrep_refc_found(pag_group(sc->sa.pag), &irec); return xfarray_append(rr->refcount_records, &irec); } @@ -422,7 +422,7 @@ xrep_refc_find_refcounts( /* * Set up a bag to store all the rmap records that we're tracking to * generate a reference count record. If the size of the bag exceeds - * MAXREFCOUNT, we clamp rc_refcount. + * XFS_REFC_REFCOUNT_MAX, we clamp rc_refcount. */ error = rcbag_init(sc->mp, sc->xmbtp, &rcstack); if (error) diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c index 91c8bc055a4f..3b5288d3ef4e 100644 --- a/fs/xfs/scrub/repair.c +++ b/fs/xfs/scrub/repair.c @@ -37,6 +37,12 @@ #include "xfs_da_btree.h" #include "xfs_attr.h" #include "xfs_dir2.h" +#include "xfs_rtrmap_btree.h" +#include "xfs_rtbitmap.h" +#include "xfs_rtgroup.h" +#include "xfs_rtalloc.h" +#include "xfs_metafile.h" +#include "xfs_rtrefcount_btree.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/trace.h" @@ -62,6 +68,7 @@ xrep_attempt( trace_xrep_attempt(XFS_I(file_inode(sc->file)), sc->sm, error); xchk_ag_btcur_free(&sc->sa); + xchk_rtgroup_btcur_free(&sc->sr); /* Repair whatever's broken. */ ASSERT(sc->ops->repair); @@ -378,6 +385,41 @@ xrep_calc_ag_resblks( return max(max(bnobt_sz, inobt_sz), max(rmapbt_sz, refcbt_sz)); } +#ifdef CONFIG_XFS_RT +/* + * Figure out how many blocks to reserve for a rtgroup repair. We calculate + * the worst case estimate for the number of blocks we'd need to rebuild one of + * any type of per-rtgroup btree. + */ +xfs_extlen_t +xrep_calc_rtgroup_resblks( + struct xfs_scrub *sc) +{ + struct xfs_mount *mp = sc->mp; + struct xfs_scrub_metadata *sm = sc->sm; + uint64_t usedlen; + xfs_extlen_t rmapbt_sz = 0; + + if (!(sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)) + return 0; + if (!xfs_has_rtgroups(mp)) { + ASSERT(0); + return -EFSCORRUPTED; + } + + usedlen = xfs_rtbxlen_to_blen(mp, xfs_rtgroup_extents(mp, sm->sm_agno)); + ASSERT(usedlen <= XFS_MAX_RGBLOCKS); + + if (xfs_has_rmapbt(mp)) + rmapbt_sz = xfs_rtrmapbt_calc_size(mp, usedlen); + + trace_xrep_calc_rtgroup_resblks_btsize(mp, sm->sm_agno, usedlen, + rmapbt_sz); + + return rmapbt_sz; +} +#endif /* CONFIG_XFS_RT */ + /* * Reconstructing per-AG Btrees * @@ -954,6 +996,27 @@ xrep_ag_init( } #ifdef CONFIG_XFS_RT +/* Initialize all the btree cursors for a RT repair. */ +void +xrep_rtgroup_btcur_init( + struct xfs_scrub *sc, + struct xchk_rt *sr) +{ + struct xfs_mount *mp = sc->mp; + + ASSERT(sr->rtg != NULL); + + if (sc->sm->sm_type != XFS_SCRUB_TYPE_RTRMAPBT && + (sr->rtlock_flags & XFS_RTGLOCK_RMAP) && + xfs_has_rtrmapbt(mp)) + sr->rmap_cur = xfs_rtrmapbt_init_cursor(sc->tp, sr->rtg); + + if (sc->sm->sm_type != XFS_SCRUB_TYPE_RTREFCBT && + (sr->rtlock_flags & XFS_RTGLOCK_REFCOUNT) && + xfs_has_rtreflink(mp)) + sr->refc_cur = xfs_rtrefcountbt_init_cursor(sc->tp, sr->rtg); +} + /* * Given a reference to a rtgroup structure, lock rtgroup btree inodes and * create btree cursors. Must only be called to repair a regular rt file. @@ -972,6 +1035,33 @@ xrep_rtgroup_init( /* Grab our own passive reference from the caller's ref. */ sr->rtg = xfs_rtgroup_hold(rtg); + xrep_rtgroup_btcur_init(sc, sr); + return 0; +} + +/* Ensure that all rt blocks in the given range are not marked free. */ +int +xrep_require_rtext_inuse( + struct xfs_scrub *sc, + xfs_rgblock_t rgbno, + xfs_filblks_t len) +{ + struct xfs_mount *mp = sc->mp; + xfs_rtxnum_t startrtx; + xfs_rtxnum_t endrtx; + bool is_free = false; + int error; + + startrtx = xfs_rgbno_to_rtx(mp, rgbno); + endrtx = xfs_rgbno_to_rtx(mp, rgbno + len - 1); + + error = xfs_rtalloc_extent_is_free(sc->sr.rtg, sc->tp, startrtx, + endrtx - startrtx + 1, &is_free); + if (error) + return error; + if (is_free) + return -EFSCORRUPTED; + return 0; } #endif /* CONFIG_XFS_RT */ @@ -1237,3 +1327,110 @@ xrep_buf_verify_struct( return fa == NULL; } + +/* Check the sanity of a rmap record for a metadata btree inode. */ +int +xrep_check_ino_btree_mapping( + struct xfs_scrub *sc, + const struct xfs_rmap_irec *rec) +{ + enum xbtree_recpacking outcome; + int error; + + /* + * Metadata btree inodes never have extended attributes, and all blocks + * should have the bmbt block flag set. + */ + if ((rec->rm_flags & XFS_RMAP_ATTR_FORK) || + !(rec->rm_flags & XFS_RMAP_BMBT_BLOCK)) + return -EFSCORRUPTED; + + /* Make sure the block is within the AG. */ + if (!xfs_verify_agbext(sc->sa.pag, rec->rm_startblock, + rec->rm_blockcount)) + return -EFSCORRUPTED; + + /* Make sure this isn't free space. */ + error = xfs_alloc_has_records(sc->sa.bno_cur, rec->rm_startblock, + rec->rm_blockcount, &outcome); + if (error) + return error; + if (outcome != XBTREE_RECPACKING_EMPTY) + return -EFSCORRUPTED; + + return 0; +} + +/* + * Reset the block count of the inode being repaired, and adjust the dquot + * block usage to match. The inode must not have an xattr fork. + */ +void +xrep_inode_set_nblocks( + struct xfs_scrub *sc, + int64_t new_blocks) +{ + int64_t delta = + new_blocks - sc->ip->i_nblocks; + + sc->ip->i_nblocks = new_blocks; + + xfs_trans_log_inode(sc->tp, sc->ip, XFS_ILOG_CORE); + if (delta != 0) + xfs_trans_mod_dquot_byino(sc->tp, sc->ip, XFS_TRANS_DQ_BCOUNT, + delta); +} + +/* Reset the block reservation for a metadata inode. */ +int +xrep_reset_metafile_resv( + struct xfs_scrub *sc) +{ + struct xfs_inode *ip = sc->ip; + int64_t delta; + int error; + + delta = ip->i_nblocks + ip->i_delayed_blks - ip->i_meta_resv_asked; + if (delta == 0) + return 0; + + /* + * Too many blocks have been reserved, transfer some from the incore + * reservation back to the filesystem. + */ + if (delta > 0) { + int64_t give_back; + + give_back = min_t(uint64_t, delta, ip->i_delayed_blks); + if (give_back > 0) { + xfs_mod_delalloc(ip, 0, -give_back); + xfs_add_fdblocks(ip->i_mount, give_back); + ip->i_delayed_blks -= give_back; + } + + return 0; + } + + /* + * Not enough reservation; try to take some blocks from the filesystem + * to the metadata inode. @delta is negative here, so invert the sign. + */ + delta = -delta; + error = xfs_dec_fdblocks(sc->mp, delta, true); + while (error == -ENOSPC) { + delta--; + if (delta == 0) { + xfs_warn(sc->mp, +"Insufficient free space to reset space reservation for inode 0x%llx after repair.", + ip->i_ino); + return 0; + } + error = xfs_dec_fdblocks(sc->mp, delta, true); + } + if (error) + return error; + + xfs_mod_delalloc(ip, 0, delta); + ip->i_delayed_blks += delta; + return 0; +} diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h index b649da1a93eb..823c00d1a502 100644 --- a/fs/xfs/scrub/repair.h +++ b/fs/xfs/scrub/repair.h @@ -50,7 +50,9 @@ xrep_trans_commit( struct xbitmap; struct xagb_bitmap; +struct xrgb_bitmap; struct xfsb_bitmap; +struct xrtb_bitmap; int xrep_fix_freelist(struct xfs_scrub *sc, int alloc_flags); @@ -97,6 +99,8 @@ int xrep_setup_parent(struct xfs_scrub *sc); int xrep_setup_nlinks(struct xfs_scrub *sc); int xrep_setup_symlink(struct xfs_scrub *sc, unsigned int *resblks); int xrep_setup_dirtree(struct xfs_scrub *sc); +int xrep_setup_rtrmapbt(struct xfs_scrub *sc); +int xrep_setup_rtrefcountbt(struct xfs_scrub *sc); /* Repair setup functions */ int xrep_setup_ag_allocbt(struct xfs_scrub *sc); @@ -110,10 +114,18 @@ int xrep_ag_init(struct xfs_scrub *sc, struct xfs_perag *pag, #ifdef CONFIG_XFS_RT int xrep_rtgroup_init(struct xfs_scrub *sc, struct xfs_rtgroup *rtg, struct xchk_rt *sr, unsigned int rtglock_flags); +void xrep_rtgroup_btcur_init(struct xfs_scrub *sc, struct xchk_rt *sr); +int xrep_require_rtext_inuse(struct xfs_scrub *sc, xfs_rgblock_t rgbno, + xfs_filblks_t len); +xfs_extlen_t xrep_calc_rtgroup_resblks(struct xfs_scrub *sc); #else # define xrep_rtgroup_init(sc, rtg, sr, lockflags) (-ENOSYS) +# define xrep_calc_rtgroup_resblks(sc) (0) #endif /* CONFIG_XFS_RT */ +int xrep_check_ino_btree_mapping(struct xfs_scrub *sc, + const struct xfs_rmap_irec *rec); + /* Metadata revalidators */ int xrep_revalidate_allocbt(struct xfs_scrub *sc); @@ -147,10 +159,14 @@ int xrep_metapath(struct xfs_scrub *sc); int xrep_rtbitmap(struct xfs_scrub *sc); int xrep_rtsummary(struct xfs_scrub *sc); int xrep_rgsuperblock(struct xfs_scrub *sc); +int xrep_rtrmapbt(struct xfs_scrub *sc); +int xrep_rtrefcountbt(struct xfs_scrub *sc); #else # define xrep_rtbitmap xrep_notsupported # define xrep_rtsummary xrep_notsupported # define xrep_rgsuperblock xrep_notsupported +# define xrep_rtrmapbt xrep_notsupported +# define xrep_rtrefcountbt xrep_notsupported #endif /* CONFIG_XFS_RT */ #ifdef CONFIG_XFS_QUOTA @@ -169,6 +185,8 @@ int xrep_trans_alloc_hook_dummy(struct xfs_mount *mp, void **cookiep, void xrep_trans_cancel_hook_dummy(void **cookiep, struct xfs_trans *tp); bool xrep_buf_verify_struct(struct xfs_buf *bp, const struct xfs_buf_ops *ops); +void xrep_inode_set_nblocks(struct xfs_scrub *sc, int64_t new_blocks); +int xrep_reset_metafile_resv(struct xfs_scrub *sc); #else @@ -192,6 +210,8 @@ xrep_calc_ag_resblks( return 0; } +#define xrep_calc_rtgroup_resblks xrep_calc_ag_resblks + static inline int xrep_reset_perag_resv( struct xfs_scrub *sc) @@ -219,6 +239,8 @@ xrep_setup_nothing( #define xrep_setup_nlinks xrep_setup_nothing #define xrep_setup_dirtree xrep_setup_nothing #define xrep_setup_metapath xrep_setup_nothing +#define xrep_setup_rtrmapbt xrep_setup_nothing +#define xrep_setup_rtrefcountbt xrep_setup_nothing #define xrep_setup_inode(sc, imap) ((void)0) @@ -256,6 +278,8 @@ static inline int xrep_setup_symlink(struct xfs_scrub *sc, unsigned int *x) #define xrep_dirtree xrep_notsupported #define xrep_metapath xrep_notsupported #define xrep_rgsuperblock xrep_notsupported +#define xrep_rtrmapbt xrep_notsupported +#define xrep_rtrefcountbt xrep_notsupported #endif /* CONFIG_XFS_ONLINE_REPAIR */ diff --git a/fs/xfs/scrub/rgb_bitmap.h b/fs/xfs/scrub/rgb_bitmap.h new file mode 100644 index 000000000000..4c3126b66dcb --- /dev/null +++ b/fs/xfs/scrub/rgb_bitmap.h @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2020-2024 Oracle. All Rights Reserved. + * Author: Darrick J. Wong <djwong@kernel.org> + */ +#ifndef __XFS_SCRUB_RGB_BITMAP_H__ +#define __XFS_SCRUB_RGB_BITMAP_H__ + +/* Bitmaps, but for type-checked for xfs_rgblock_t */ + +struct xrgb_bitmap { + struct xbitmap32 rgbitmap; +}; + +static inline void xrgb_bitmap_init(struct xrgb_bitmap *bitmap) +{ + xbitmap32_init(&bitmap->rgbitmap); +} + +static inline void xrgb_bitmap_destroy(struct xrgb_bitmap *bitmap) +{ + xbitmap32_destroy(&bitmap->rgbitmap); +} + +static inline int xrgb_bitmap_set(struct xrgb_bitmap *bitmap, + xfs_rgblock_t start, xfs_extlen_t len) +{ + return xbitmap32_set(&bitmap->rgbitmap, start, len); +} + +static inline int xrgb_bitmap_walk(struct xrgb_bitmap *bitmap, + xbitmap32_walk_fn fn, void *priv) +{ + return xbitmap32_walk(&bitmap->rgbitmap, fn, priv); +} + +#endif /* __XFS_SCRUB_RGB_BITMAP_H__ */ diff --git a/fs/xfs/scrub/rgsuper.c b/fs/xfs/scrub/rgsuper.c index 463b3573bb76..d189732d0e24 100644 --- a/fs/xfs/scrub/rgsuper.c +++ b/fs/xfs/scrub/rgsuper.c @@ -13,6 +13,7 @@ #include "xfs_log_format.h" #include "xfs_trans.h" #include "xfs_sb.h" +#include "xfs_rmap.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/repair.h" @@ -34,6 +35,7 @@ xchk_rgsuperblock_xref( return; xchk_xref_is_used_rt_space(sc, xfs_rgbno_to_rtb(sc->sr.rtg, 0), 1); + xchk_xref_is_only_rt_owned_by(sc, 0, 1, &XFS_RMAP_OINFO_FS); } int @@ -61,7 +63,9 @@ xchk_rgsuperblock( if (!xchk_xref_process_error(sc, 0, 0, &error)) return error; - xchk_rtgroup_lock(&sc->sr, XFS_RTGLOCK_BITMAP_SHARED); + error = xchk_rtgroup_lock(sc, &sc->sr, XFS_RTGLOCK_BITMAP_SHARED); + if (error) + return error; /* * Since we already validated the rt superblock at mount time, we don't diff --git a/fs/xfs/scrub/rmap_repair.c b/fs/xfs/scrub/rmap_repair.c index a0a227d183d2..f5f73078ffe2 100644 --- a/fs/xfs/scrub/rmap_repair.c +++ b/fs/xfs/scrub/rmap_repair.c @@ -31,6 +31,9 @@ #include "xfs_refcount.h" #include "xfs_refcount_btree.h" #include "xfs_ag.h" +#include "xfs_rtrmap_btree.h" +#include "xfs_rtgroup.h" +#include "xfs_rtrefcount_btree.h" #include "scrub/xfs_scrub.h" #include "scrub/scrub.h" #include "scrub/common.h" @@ -499,6 +502,69 @@ xrep_rmap_scan_iext( return xrep_rmap_stash_accumulated(rf); } +static int +xrep_rmap_scan_meta_btree( + struct xrep_rmap_ifork *rf, + struct xfs_inode *ip) +{ + struct xfs_scrub *sc = rf->rr->sc; + struct xfs_rtgroup *rtg = NULL; + struct xfs_btree_cur *cur = NULL; + enum xfs_rtg_inodes type; + int error; + + if (rf->whichfork != XFS_DATA_FORK) + return -EFSCORRUPTED; + + switch (ip->i_metatype) { + case XFS_METAFILE_RTRMAP: + type = XFS_RTGI_RMAP; + break; + case XFS_METAFILE_RTREFCOUNT: + type = XFS_RTGI_REFCOUNT; + break; + default: + ASSERT(0); + return -EFSCORRUPTED; + } + + while ((rtg = xfs_rtgroup_next(sc->mp, rtg))) { + if (ip == rtg->rtg_inodes[type]) + goto found; + } + + /* + * We should never find an rt metadata btree inode that isn't + * associated with an rtgroup yet has ondisk blocks allocated to it. + */ + if (ip->i_nblocks) { + ASSERT(0); + return -EFSCORRUPTED; + } + + return 0; + +found: + switch (ip->i_metatype) { + case XFS_METAFILE_RTRMAP: + cur = xfs_rtrmapbt_init_cursor(sc->tp, rtg); + break; + case XFS_METAFILE_RTREFCOUNT: + cur = xfs_rtrefcountbt_init_cursor(sc->tp, rtg); + break; + default: + ASSERT(0); + error = -EFSCORRUPTED; + goto out_rtg; + } + + error = xrep_rmap_scan_iroot_btree(rf, cur); + xfs_btree_del_cursor(cur, error); +out_rtg: + xfs_rtgroup_rele(rtg); + return error; +} + /* Find all the extents from a given AG in an inode fork. */ STATIC int xrep_rmap_scan_ifork( @@ -512,14 +578,14 @@ xrep_rmap_scan_ifork( .whichfork = whichfork, }; struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); + bool mappings_done; int error = 0; if (!ifp) return 0; - if (ifp->if_format == XFS_DINODE_FMT_BTREE) { - bool mappings_done; - + switch (ifp->if_format) { + case XFS_DINODE_FMT_BTREE: /* * Scan the bmap btree for data device mappings. This includes * the btree blocks themselves, even if this is a realtime @@ -528,15 +594,18 @@ xrep_rmap_scan_ifork( error = xrep_rmap_scan_bmbt(&rf, ip, &mappings_done); if (error || mappings_done) return error; - } else if (ifp->if_format != XFS_DINODE_FMT_EXTENTS) { - return 0; + fallthrough; + case XFS_DINODE_FMT_EXTENTS: + /* Scan incore extent cache if this isn't a realtime file. */ + if (xfs_ifork_is_realtime(ip, whichfork)) + return 0; + + return xrep_rmap_scan_iext(&rf, ifp); + case XFS_DINODE_FMT_META_BTREE: + return xrep_rmap_scan_meta_btree(&rf, ip); } - /* Scan incore extent cache if this isn't a realtime file. */ - if (xfs_ifork_is_realtime(ip, whichfork)) - return 0; - - return xrep_rmap_scan_iext(&rf, ifp); + return 0; } /* @@ -1552,7 +1621,7 @@ xrep_rmapbt_live_update( if (!xrep_rmapbt_want_live_update(&rr->iscan, &p->oinfo)) goto out_unlock; - trace_xrep_rmap_live_update(rr->sc->sa.pag, action, p); + trace_xrep_rmap_live_update(pag_group(rr->sc->sa.pag), action, p); error = xrep_trans_alloc_hook_dummy(mp, &txcookie, &tp); if (error) diff --git a/fs/xfs/scrub/rtb_bitmap.h b/fs/xfs/scrub/rtb_bitmap.h new file mode 100644 index 000000000000..1313ef605511 --- /dev/null +++ b/fs/xfs/scrub/rtb_bitmap.h @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2022-2024 Oracle. All Rights Reserved. + * Author: Darrick J. Wong <djwong@kernel.org> + */ +#ifndef __XFS_SCRUB_RTB_BITMAP_H__ +#define __XFS_SCRUB_RTB_BITMAP_H__ + +/* Bitmaps, but for type-checked for xfs_rtblock_t */ + +struct xrtb_bitmap { + struct xbitmap64 rtbitmap; +}; + +static inline void xrtb_bitmap_init(struct xrtb_bitmap *bitmap) +{ + xbitmap64_init(&bitmap->rtbitmap); +} + +static inline void xrtb_bitmap_destroy(struct xrtb_bitmap *bitmap) +{ + xbitmap64_destroy(&bitmap->rtbitmap); +} + +static inline int xrtb_bitmap_set(struct xrtb_bitmap *bitmap, + xfs_rtblock_t start, xfs_filblks_t len) +{ + return xbitmap64_set(&bitmap->rtbitmap, start, len); +} + +static inline int xrtb_bitmap_walk(struct xrtb_bitmap *bitmap, + xbitmap64_walk_fn fn, void *priv) +{ + return xbitmap64_walk(&bitmap->rtbitmap, fn, priv); +} + +#endif /* __XFS_SCRUB_RTB_BITMAP_H__ */ diff --git a/fs/xfs/scrub/rtbitmap.c b/fs/xfs/scrub/rtbitmap.c index 376a36fd9a9c..e8c776a34c1d 100644 --- a/fs/xfs/scrub/rtbitmap.c +++ b/fs/xfs/scrub/rtbitmap.c @@ -9,17 +9,24 @@ #include "xfs_format.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" +#include "xfs_btree.h" #include "xfs_log_format.h" #include "xfs_trans.h" #include "xfs_rtbitmap.h" #include "xfs_inode.h" #include "xfs_bmap.h" #include "xfs_bit.h" +#include "xfs_rtgroup.h" #include "xfs_sb.h" +#include "xfs_rmap.h" +#include "xfs_rtrmap_btree.h" +#include "xfs_exchmaps.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/repair.h" +#include "scrub/tempexch.h" #include "scrub/rtbitmap.h" +#include "scrub/btree.h" /* Set us up with the realtime metadata locked. */ int @@ -30,10 +37,15 @@ xchk_setup_rtbitmap( struct xchk_rtbitmap *rtb; int error; - rtb = kzalloc(sizeof(struct xchk_rtbitmap), XCHK_GFP_FLAGS); + if (xchk_need_intent_drain(sc)) + xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN); + + rtb = kzalloc(struct_size(rtb, words, xchk_rtbitmap_wordcnt(sc)), + XCHK_GFP_FLAGS); if (!rtb) return -ENOMEM; sc->buf = rtb; + rtb->sc = sc; error = xchk_rtgroup_init(sc, sc->sm->sm_agno, &sc->sr); if (error) @@ -49,8 +61,7 @@ xchk_setup_rtbitmap( if (error) return error; - error = xchk_install_live_inode(sc, - sc->sr.rtg->rtg_inodes[XFS_RTGI_BITMAP]); + error = xchk_install_live_inode(sc, rtg_bitmap(sc->sr.rtg)); if (error) return error; @@ -58,12 +69,15 @@ xchk_setup_rtbitmap( if (error) return error; + error = xchk_rtgroup_lock(sc, &sc->sr, XCHK_RTGLOCK_ALL); + if (error) + return error; + /* * Now that we've locked the rtbitmap, we can't race with growfsrt * trying to expand the bitmap or change the size of the rt volume. * Hence it is safe to compute and check the geometry values. */ - xchk_rtgroup_lock(&sc->sr, XFS_RTGLOCK_BITMAP); if (mp->m_sb.sb_rblocks) { rtb->rextents = xfs_blen_to_rtbxlen(mp, mp->m_sb.sb_rblocks); rtb->rextslog = xfs_compute_rextslog(rtb->rextents); @@ -73,7 +87,32 @@ xchk_setup_rtbitmap( return 0; } -/* Realtime bitmap. */ +/* Per-rtgroup bitmap contents. */ + +/* Cross-reference rtbitmap entries with other metadata. */ +STATIC void +xchk_rtbitmap_xref( + struct xchk_rtbitmap *rtb, + xfs_rtblock_t startblock, + xfs_rtblock_t blockcount) +{ + struct xfs_scrub *sc = rtb->sc; + xfs_rgblock_t rgbno = xfs_rtb_to_rgbno(sc->mp, startblock); + + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return; + if (!sc->sr.rmap_cur) + return; + + xchk_xref_has_no_rt_owner(sc, rgbno, blockcount); + xchk_xref_is_not_rt_shared(sc, rgbno, blockcount); + xchk_xref_is_not_rt_cow_staging(sc, rgbno, blockcount); + + if (rtb->next_free_rgbno < rgbno) + xchk_xref_has_rt_owner(sc, rtb->next_free_rgbno, + rgbno - rtb->next_free_rgbno); + rtb->next_free_rgbno = rgbno + blockcount; +} /* Scrub a free extent record from the realtime bitmap. */ STATIC int @@ -83,7 +122,8 @@ xchk_rtbitmap_rec( const struct xfs_rtalloc_rec *rec, void *priv) { - struct xfs_scrub *sc = priv; + struct xchk_rtbitmap *rtb = priv; + struct xfs_scrub *sc = rtb->sc; xfs_rtblock_t startblock; xfs_filblks_t blockcount; @@ -92,6 +132,12 @@ xchk_rtbitmap_rec( if (!xfs_verify_rtbext(rtg_mount(rtg), startblock, blockcount)) xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); + + xchk_rtbitmap_xref(rtb, startblock, blockcount); + + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return -ECANCELED; + return 0; } @@ -139,15 +185,16 @@ xchk_rtbitmap_check_extents( return error; } -/* Scrub the realtime bitmap. */ +/* Scrub this group's realtime bitmap. */ int xchk_rtbitmap( struct xfs_scrub *sc) { struct xfs_mount *mp = sc->mp; struct xfs_rtgroup *rtg = sc->sr.rtg; - struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP]; + struct xfs_inode *rbmip = rtg_bitmap(rtg); struct xchk_rtbitmap *rtb = sc->buf; + xfs_rgblock_t last_rgbno; int error; /* Is sb_rextents correct? */ @@ -200,10 +247,20 @@ xchk_rtbitmap( if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) return error; - error = xfs_rtalloc_query_all(rtg, sc->tp, xchk_rtbitmap_rec, sc); + rtb->next_free_rgbno = 0; + error = xfs_rtalloc_query_all(rtg, sc->tp, xchk_rtbitmap_rec, rtb); if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error)) return error; + /* + * Check that the are rmappings for all rt extents between the end of + * the last free extent we saw and the last possible extent in the rt + * group. + */ + last_rgbno = rtg->rtg_extents * mp->m_sb.sb_rextsize - 1; + if (rtb->next_free_rgbno < last_rgbno) + xchk_xref_has_rt_owner(sc, rtb->next_free_rgbno, + last_rgbno - rtb->next_free_rgbno); return 0; } @@ -215,7 +272,7 @@ xchk_xref_is_used_rt_space( xfs_extlen_t len) { struct xfs_rtgroup *rtg = sc->sr.rtg; - struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP]; + struct xfs_inode *rbmip = rtg_bitmap(rtg); xfs_rtxnum_t startext; xfs_rtxnum_t endext; bool is_free; diff --git a/fs/xfs/scrub/rtbitmap.h b/fs/xfs/scrub/rtbitmap.h index 85304ff019e1..fe52b877253d 100644 --- a/fs/xfs/scrub/rtbitmap.h +++ b/fs/xfs/scrub/rtbitmap.h @@ -6,17 +6,72 @@ #ifndef __XFS_SCRUB_RTBITMAP_H__ #define __XFS_SCRUB_RTBITMAP_H__ +/* + * We use an xfile to construct new bitmap blocks for the portion of the + * rtbitmap file that we're replacing. Whereas the ondisk bitmap must be + * accessed through the buffer cache, the xfile bitmap supports direct + * word-level accesses. Therefore, we create a small abstraction for linear + * access. + */ +typedef unsigned long long xrep_wordoff_t; +typedef unsigned int xrep_wordcnt_t; + +/* Mask to round an rtx down to the nearest bitmap word. */ +#define XREP_RTBMP_WORDMASK ((1ULL << XFS_NBWORDLOG) - 1) + + struct xchk_rtbitmap { + struct xfs_scrub *sc; + uint64_t rextents; uint64_t rbmblocks; unsigned int rextslog; unsigned int resblks; + + /* The next free rt group block number that we expect to see. */ + xfs_rgblock_t next_free_rgbno; + +#ifdef CONFIG_XFS_ONLINE_REPAIR + /* stuff for staging a new bitmap */ + struct xfs_rtalloc_args args; + struct xrep_tempexch tempexch; +#endif + + /* The next rtgroup block we expect to see during our rtrmapbt walk. */ + xfs_rgblock_t next_rgbno; + + /* rtgroup lock flags */ + unsigned int rtglock_flags; + + /* rtword position of xfile as we write buffers to disk. */ + xrep_wordoff_t prep_wordoff; + + /* In-Memory rtbitmap for repair. */ + union xfs_rtword_raw words[]; }; #ifdef CONFIG_XFS_ONLINE_REPAIR int xrep_setup_rtbitmap(struct xfs_scrub *sc, struct xchk_rtbitmap *rtb); + +/* + * How big should the words[] buffer be? + * + * For repairs, we want a full fsblock worth of space so that we can memcpy a + * buffer full of 1s into the xfile bitmap. The xfile bitmap doesn't have + * rtbitmap block headers, so we don't use blockwsize. Scrub doesn't use the + * words buffer at all. + */ +static inline unsigned int +xchk_rtbitmap_wordcnt( + struct xfs_scrub *sc) +{ + if (xchk_could_repair(sc)) + return sc->mp->m_sb.sb_blocksize >> XFS_WORDLOG; + return 0; +} #else # define xrep_setup_rtbitmap(sc, rtb) (0) +# define xchk_rtbitmap_wordcnt(sc) (0) #endif /* CONFIG_XFS_ONLINE_REPAIR */ #endif /* __XFS_SCRUB_RTBITMAP_H__ */ diff --git a/fs/xfs/scrub/rtbitmap_repair.c b/fs/xfs/scrub/rtbitmap_repair.c index 0fef98e9f834..203a1a97c502 100644 --- a/fs/xfs/scrub/rtbitmap_repair.c +++ b/fs/xfs/scrub/rtbitmap_repair.c @@ -12,32 +12,66 @@ #include "xfs_btree.h" #include "xfs_log_format.h" #include "xfs_trans.h" +#include "xfs_rtalloc.h" #include "xfs_inode.h" #include "xfs_bit.h" #include "xfs_bmap.h" #include "xfs_bmap_btree.h" +#include "xfs_rmap.h" +#include "xfs_rtrmap_btree.h" +#include "xfs_exchmaps.h" +#include "xfs_rtbitmap.h" +#include "xfs_rtgroup.h" +#include "xfs_extent_busy.h" +#include "xfs_refcount.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/trace.h" #include "scrub/repair.h" #include "scrub/xfile.h" +#include "scrub/tempfile.h" +#include "scrub/tempexch.h" +#include "scrub/reap.h" #include "scrub/rtbitmap.h" -/* Set up to repair the realtime bitmap file metadata. */ +/* rt bitmap content repairs */ + +/* Set up to repair the realtime bitmap for this group. */ int xrep_setup_rtbitmap( struct xfs_scrub *sc, struct xchk_rtbitmap *rtb) { struct xfs_mount *mp = sc->mp; - unsigned long long blocks = 0; + char *descr; + unsigned long long blocks = mp->m_sb.sb_rbmblocks; + int error; + + error = xrep_tempfile_create(sc, S_IFREG); + if (error) + return error; + + /* Create an xfile to hold our reconstructed bitmap. */ + descr = xchk_xfile_rtgroup_descr(sc, "bitmap file"); + error = xfile_create(descr, blocks * mp->m_sb.sb_blocksize, &sc->xfile); + kfree(descr); + if (error) + return error; /* - * Reserve enough blocks to write out a completely new bmbt for a - * maximally fragmented bitmap file. We do not hold the rtbitmap - * ILOCK yet, so this is entirely speculative. + * Reserve enough blocks to write out a completely new bitmap file, + * plus twice as many blocks as we would need if we can only allocate + * one block per data fork mapping. This should cover the + * preallocation of the temporary file and exchanging the extent + * mappings. + * + * We cannot use xfs_exchmaps_estimate because we have not yet + * constructed the replacement bitmap and therefore do not know how + * many extents it will use. By the time we do, we will have a dirty + * transaction (which we cannot drop because we cannot drop the + * rtbitmap ILOCK) and cannot ask for more reservation. */ - blocks = xfs_bmbt_calc_size(mp, mp->m_sb.sb_rbmblocks); + blocks += xfs_bmbt_calc_size(mp, blocks) * 2; if (blocks > UINT_MAX) return -EOPNOTSUPP; @@ -45,6 +79,325 @@ xrep_setup_rtbitmap( return 0; } +static inline xrep_wordoff_t +rtx_to_wordoff( + struct xfs_mount *mp, + xfs_rtxnum_t rtx) +{ + return rtx >> XFS_NBWORDLOG; +} + +static inline xrep_wordcnt_t +rtxlen_to_wordcnt( + xfs_rtxlen_t rtxlen) +{ + return rtxlen >> XFS_NBWORDLOG; +} + +/* Helper functions to record rtwords in an xfile. */ + +static inline int +xfbmp_load( + struct xchk_rtbitmap *rtb, + xrep_wordoff_t wordoff, + xfs_rtword_t *word) +{ + union xfs_rtword_raw urk; + int error; + + ASSERT(xfs_has_rtgroups(rtb->sc->mp)); + + error = xfile_load(rtb->sc->xfile, &urk, + sizeof(union xfs_rtword_raw), + wordoff << XFS_WORDLOG); + if (error) + return error; + + *word = be32_to_cpu(urk.rtg); + return 0; +} + +static inline int +xfbmp_store( + struct xchk_rtbitmap *rtb, + xrep_wordoff_t wordoff, + const xfs_rtword_t word) +{ + union xfs_rtword_raw urk; + + ASSERT(xfs_has_rtgroups(rtb->sc->mp)); + + urk.rtg = cpu_to_be32(word); + return xfile_store(rtb->sc->xfile, &urk, + sizeof(union xfs_rtword_raw), + wordoff << XFS_WORDLOG); +} + +static inline int +xfbmp_copyin( + struct xchk_rtbitmap *rtb, + xrep_wordoff_t wordoff, + const union xfs_rtword_raw *word, + xrep_wordcnt_t nr_words) +{ + return xfile_store(rtb->sc->xfile, word, nr_words << XFS_WORDLOG, + wordoff << XFS_WORDLOG); +} + +static inline int +xfbmp_copyout( + struct xchk_rtbitmap *rtb, + xrep_wordoff_t wordoff, + union xfs_rtword_raw *word, + xrep_wordcnt_t nr_words) +{ + return xfile_load(rtb->sc->xfile, word, nr_words << XFS_WORDLOG, + wordoff << XFS_WORDLOG); +} + +/* Perform a logical OR operation on an rtword in the incore bitmap. */ +static int +xrep_rtbitmap_or( + struct xchk_rtbitmap *rtb, + xrep_wordoff_t wordoff, + xfs_rtword_t mask) +{ + xfs_rtword_t word; + int error; + + error = xfbmp_load(rtb, wordoff, &word); + if (error) + return error; + + trace_xrep_rtbitmap_or(rtb->sc->mp, wordoff, mask, word); + + return xfbmp_store(rtb, wordoff, word | mask); +} + +/* + * Mark as free every rt extent between the next rt block we expected to see + * in the rtrmap records and the given rt block. + */ +STATIC int +xrep_rtbitmap_mark_free( + struct xchk_rtbitmap *rtb, + xfs_rgblock_t rgbno) +{ + struct xfs_mount *mp = rtb->sc->mp; + struct xchk_rt *sr = &rtb->sc->sr; + struct xfs_rtgroup *rtg = sr->rtg; + xfs_rtxnum_t startrtx; + xfs_rtxnum_t nextrtx; + xrep_wordoff_t wordoff, nextwordoff; + unsigned int bit; + unsigned int bufwsize; + xfs_extlen_t mod; + xfs_rtword_t mask; + enum xbtree_recpacking outcome; + int error; + + if (!xfs_verify_rgbext(rtg, rtb->next_rgbno, rgbno - rtb->next_rgbno)) + return -EFSCORRUPTED; + + /* + * Convert rt blocks to rt extents The block range we find must be + * aligned to an rtextent boundary on both ends. + */ + startrtx = xfs_rgbno_to_rtx(mp, rtb->next_rgbno); + mod = xfs_rgbno_to_rtxoff(mp, rtb->next_rgbno); + if (mod) + return -EFSCORRUPTED; + + nextrtx = xfs_rgbno_to_rtx(mp, rgbno - 1) + 1; + mod = xfs_rgbno_to_rtxoff(mp, rgbno - 1); + if (mod != mp->m_sb.sb_rextsize - 1) + return -EFSCORRUPTED; + + /* Must not be shared or CoW staging. */ + if (sr->refc_cur) { + error = xfs_refcount_has_records(sr->refc_cur, + XFS_REFC_DOMAIN_SHARED, rtb->next_rgbno, + rgbno - rtb->next_rgbno, &outcome); + if (error) + return error; + if (outcome != XBTREE_RECPACKING_EMPTY) + return -EFSCORRUPTED; + + error = xfs_refcount_has_records(sr->refc_cur, + XFS_REFC_DOMAIN_COW, rtb->next_rgbno, + rgbno - rtb->next_rgbno, &outcome); + if (error) + return error; + if (outcome != XBTREE_RECPACKING_EMPTY) + return -EFSCORRUPTED; + } + + trace_xrep_rtbitmap_record_free(mp, startrtx, nextrtx - 1); + + /* Set bits as needed to round startrtx up to the nearest word. */ + bit = startrtx & XREP_RTBMP_WORDMASK; + if (bit) { + xfs_rtblock_t len = nextrtx - startrtx; + unsigned int lastbit; + + lastbit = min(bit + len, XFS_NBWORD); + mask = (((xfs_rtword_t)1 << (lastbit - bit)) - 1) << bit; + + error = xrep_rtbitmap_or(rtb, rtx_to_wordoff(mp, startrtx), + mask); + if (error || lastbit - bit == len) + return error; + startrtx += XFS_NBWORD - bit; + } + + /* Set bits as needed to round nextrtx down to the nearest word. */ + bit = nextrtx & XREP_RTBMP_WORDMASK; + if (bit) { + mask = ((xfs_rtword_t)1 << bit) - 1; + + error = xrep_rtbitmap_or(rtb, rtx_to_wordoff(mp, nextrtx), + mask); + if (error || startrtx + bit == nextrtx) + return error; + nextrtx -= bit; + } + + trace_xrep_rtbitmap_record_free_bulk(mp, startrtx, nextrtx - 1); + + /* Set all the words in between, up to a whole fs block at once. */ + wordoff = rtx_to_wordoff(mp, startrtx); + nextwordoff = rtx_to_wordoff(mp, nextrtx); + bufwsize = mp->m_sb.sb_blocksize >> XFS_WORDLOG; + + while (wordoff < nextwordoff) { + xrep_wordoff_t rem; + xrep_wordcnt_t wordcnt; + + wordcnt = min_t(xrep_wordcnt_t, nextwordoff - wordoff, + bufwsize); + + /* + * Try to keep us aligned to the rtwords buffer to reduce the + * number of xfile writes. + */ + rem = wordoff & (bufwsize - 1); + if (rem) + wordcnt = min_t(xrep_wordcnt_t, wordcnt, + bufwsize - rem); + + error = xfbmp_copyin(rtb, wordoff, rtb->words, wordcnt); + if (error) + return error; + + wordoff += wordcnt; + } + + return 0; +} + +/* Set free space in the rtbitmap based on rtrmapbt records. */ +STATIC int +xrep_rtbitmap_walk_rtrmap( + struct xfs_btree_cur *cur, + const struct xfs_rmap_irec *rec, + void *priv) +{ + struct xchk_rtbitmap *rtb = priv; + int error = 0; + + if (xchk_should_terminate(rtb->sc, &error)) + return error; + + if (rtb->next_rgbno < rec->rm_startblock) { + error = xrep_rtbitmap_mark_free(rtb, rec->rm_startblock); + if (error) + return error; + } + + rtb->next_rgbno = max(rtb->next_rgbno, + rec->rm_startblock + rec->rm_blockcount); + return 0; +} + +/* + * Walk the rtrmapbt to find all the gaps between records, and mark the gaps + * in the realtime bitmap that we're computing. + */ +STATIC int +xrep_rtbitmap_find_freespace( + struct xchk_rtbitmap *rtb) +{ + struct xfs_scrub *sc = rtb->sc; + struct xfs_mount *mp = sc->mp; + struct xfs_rtgroup *rtg = sc->sr.rtg; + uint64_t blockcount; + int error; + + /* Prepare a buffer of ones so that we can accelerate bulk setting. */ + memset(rtb->words, 0xFF, mp->m_sb.sb_blocksize); + + xrep_rtgroup_btcur_init(sc, &sc->sr); + error = xfs_rmap_query_all(sc->sr.rmap_cur, xrep_rtbitmap_walk_rtrmap, + rtb); + if (error) + goto out; + + /* + * Mark as free every possible rt extent from the last one we saw to + * the end of the rt group. + */ + blockcount = rtg->rtg_extents * mp->m_sb.sb_rextsize; + if (rtb->next_rgbno < blockcount) { + error = xrep_rtbitmap_mark_free(rtb, blockcount); + if (error) + goto out; + } + +out: + xchk_rtgroup_btcur_free(&sc->sr); + return error; +} + +static int +xrep_rtbitmap_prep_buf( + struct xfs_scrub *sc, + struct xfs_buf *bp, + void *data) +{ + struct xchk_rtbitmap *rtb = data; + struct xfs_mount *mp = sc->mp; + union xfs_rtword_raw *ondisk; + int error; + + rtb->args.mp = sc->mp; + rtb->args.tp = sc->tp; + rtb->args.rbmbp = bp; + ondisk = xfs_rbmblock_wordptr(&rtb->args, 0); + rtb->args.rbmbp = NULL; + + error = xfbmp_copyout(rtb, rtb->prep_wordoff, ondisk, + mp->m_blockwsize); + if (error) + return error; + + if (xfs_has_rtgroups(sc->mp)) { + struct xfs_rtbuf_blkinfo *hdr = bp->b_addr; + + hdr->rt_magic = cpu_to_be32(XFS_RTBITMAP_MAGIC); + hdr->rt_owner = cpu_to_be64(sc->ip->i_ino); + hdr->rt_blkno = cpu_to_be64(xfs_buf_daddr(bp)); + hdr->rt_lsn = 0; + uuid_copy(&hdr->rt_uuid, &sc->mp->m_sb.sb_meta_uuid); + bp->b_ops = &xfs_rtbitmap_buf_ops; + } else { + bp->b_ops = &xfs_rtbuf_ops; + } + + rtb->prep_wordoff += mp->m_blockwsize; + xfs_trans_buf_set_type(sc->tp, bp, XFS_BLFT_RTBITMAP_BUF); + return 0; +} + /* * Make sure that the given range of the data fork of the realtime file is * mapped to written blocks. The caller must ensure that the inode is joined @@ -160,9 +513,18 @@ xrep_rtbitmap( { struct xchk_rtbitmap *rtb = sc->buf; struct xfs_mount *mp = sc->mp; + struct xfs_group *xg = rtg_group(sc->sr.rtg); unsigned long long blocks = 0; + unsigned int busy_gen; int error; + /* We require the realtime rmapbt to rebuild anything. */ + if (!xfs_has_rtrmapbt(sc->mp)) + return -EOPNOTSUPP; + /* We require atomic file exchange range to rebuild anything. */ + if (!xfs_has_exchange_range(sc->mp)) + return -EOPNOTSUPP; + /* Impossibly large rtbitmap means we can't touch the filesystem. */ if (rtb->rbmblocks > U32_MAX) return 0; @@ -195,6 +557,79 @@ xrep_rtbitmap( if (error) return error; - /* Fix inconsistent bitmap geometry */ - return xrep_rtbitmap_geometry(sc, rtb); + /* + * Fix inconsistent bitmap geometry. This function returns with a + * clean scrub transaction. + */ + error = xrep_rtbitmap_geometry(sc, rtb); + if (error) + return error; + + /* + * Make sure the busy extent list is clear because we can't put extents + * on there twice. + */ + if (!xfs_extent_busy_list_empty(xg, &busy_gen)) { + error = xfs_extent_busy_flush(sc->tp, xg, busy_gen, 0); + if (error) + return error; + } + + /* + * Generate the new rtbitmap data. We don't need the rtbmp information + * once this call is finished. + */ + error = xrep_rtbitmap_find_freespace(rtb); + if (error) + return error; + + /* + * Try to take ILOCK_EXCL of the temporary file. We had better be the + * only ones holding onto this inode, but we can't block while holding + * the rtbitmap file's ILOCK_EXCL. + */ + while (!xrep_tempfile_ilock_nowait(sc)) { + if (xchk_should_terminate(sc, &error)) + return error; + delay(1); + } + + /* + * Make sure we have space allocated for the part of the bitmap + * file that corresponds to this group. We already joined sc->ip. + */ + xfs_trans_ijoin(sc->tp, sc->tempip, 0); + error = xrep_tempfile_prealloc(sc, 0, rtb->rbmblocks); + if (error) + return error; + + /* Last chance to abort before we start committing fixes. */ + if (xchk_should_terminate(sc, &error)) + return error; + + /* Copy the bitmap file that we generated. */ + error = xrep_tempfile_copyin(sc, 0, rtb->rbmblocks, + xrep_rtbitmap_prep_buf, rtb); + if (error) + return error; + error = xrep_tempfile_set_isize(sc, + XFS_FSB_TO_B(sc->mp, sc->mp->m_sb.sb_rbmblocks)); + if (error) + return error; + + /* + * Now exchange the data fork contents. We're done with the temporary + * buffer, so we can reuse it for the tempfile exchmaps information. + */ + error = xrep_tempexch_trans_reserve(sc, XFS_DATA_FORK, 0, + rtb->rbmblocks, &rtb->tempexch); + if (error) + return error; + + error = xrep_tempexch_contents(sc, &rtb->tempexch); + if (error) + return error; + + /* Free the old rtbitmap blocks if they're not in use. */ + return xrep_reap_ifork(sc, sc->tempip, XFS_DATA_FORK); } diff --git a/fs/xfs/scrub/rtrefcount.c b/fs/xfs/scrub/rtrefcount.c new file mode 100644 index 000000000000..4c5dffc73641 --- /dev/null +++ b/fs/xfs/scrub/rtrefcount.c @@ -0,0 +1,661 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2021-2024 Oracle. All Rights Reserved. + * Author: Darrick J. Wong <djwong@kernel.org> + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_log_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_trans.h" +#include "xfs_btree.h" +#include "xfs_rmap.h" +#include "xfs_refcount.h" +#include "xfs_inode.h" +#include "xfs_rtbitmap.h" +#include "xfs_rtgroup.h" +#include "xfs_metafile.h" +#include "xfs_rtrefcount_btree.h" +#include "xfs_rtalloc.h" +#include "scrub/scrub.h" +#include "scrub/common.h" +#include "scrub/btree.h" +#include "scrub/repair.h" + +/* Set us up with the realtime refcount metadata locked. */ +int +xchk_setup_rtrefcountbt( + struct xfs_scrub *sc) +{ + int error; + + if (xchk_need_intent_drain(sc)) + xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN); + + if (xchk_could_repair(sc)) { + error = xrep_setup_rtrefcountbt(sc); + if (error) + return error; + } + + error = xchk_rtgroup_init(sc, sc->sm->sm_agno, &sc->sr); + if (error) + return error; + + error = xchk_setup_rt(sc); + if (error) + return error; + + error = xchk_install_live_inode(sc, rtg_refcount(sc->sr.rtg)); + if (error) + return error; + + return xchk_rtgroup_lock(sc, &sc->sr, XCHK_RTGLOCK_ALL); +} + +/* Realtime Reference count btree scrubber. */ + +/* + * Confirming Reference Counts via Reverse Mappings + * + * We want to count the reverse mappings overlapping a refcount record + * (bno, len, refcount), allowing for the possibility that some of the + * overlap may come from smaller adjoining reverse mappings, while some + * comes from single extents which overlap the range entirely. The + * outer loop is as follows: + * + * 1. For all reverse mappings overlapping the refcount extent, + * a. If a given rmap completely overlaps, mark it as seen. + * b. Otherwise, record the fragment (in agbno order) for later + * processing. + * + * Once we've seen all the rmaps, we know that for all blocks in the + * refcount record we want to find $refcount owners and we've already + * visited $seen extents that overlap all the blocks. Therefore, we + * need to find ($refcount - $seen) owners for every block in the + * extent; call that quantity $target_nr. Proceed as follows: + * + * 2. Pull the first $target_nr fragments from the list; all of them + * should start at or before the start of the extent. + * Call this subset of fragments the working set. + * 3. Until there are no more unprocessed fragments, + * a. Find the shortest fragments in the set and remove them. + * b. Note the block number of the end of these fragments. + * c. Pull the same number of fragments from the list. All of these + * fragments should start at the block number recorded in the + * previous step. + * d. Put those fragments in the set. + * 4. Check that there are $target_nr fragments remaining in the list, + * and that they all end at or beyond the end of the refcount extent. + * + * If the refcount is correct, all the check conditions in the algorithm + * should always hold true. If not, the refcount is incorrect. + */ +struct xchk_rtrefcnt_frag { + struct list_head list; + struct xfs_rmap_irec rm; +}; + +struct xchk_rtrefcnt_check { + struct xfs_scrub *sc; + struct list_head fragments; + + /* refcount extent we're examining */ + xfs_rgblock_t bno; + xfs_extlen_t len; + xfs_nlink_t refcount; + + /* number of owners seen */ + xfs_nlink_t seen; +}; + +/* + * Decide if the given rmap is large enough that we can redeem it + * towards refcount verification now, or if it's a fragment, in + * which case we'll hang onto it in the hopes that we'll later + * discover that we've collected exactly the correct number of + * fragments as the rtrefcountbt says we should have. + */ +STATIC int +xchk_rtrefcountbt_rmap_check( + struct xfs_btree_cur *cur, + const struct xfs_rmap_irec *rec, + void *priv) +{ + struct xchk_rtrefcnt_check *refchk = priv; + struct xchk_rtrefcnt_frag *frag; + xfs_rgblock_t rm_last; + xfs_rgblock_t rc_last; + int error = 0; + + if (xchk_should_terminate(refchk->sc, &error)) + return error; + + rm_last = rec->rm_startblock + rec->rm_blockcount - 1; + rc_last = refchk->bno + refchk->len - 1; + + /* Confirm that a single-owner refc extent is a CoW stage. */ + if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) { + xchk_btree_xref_set_corrupt(refchk->sc, cur, 0); + return 0; + } + + if (rec->rm_startblock <= refchk->bno && rm_last >= rc_last) { + /* + * The rmap overlaps the refcount record, so we can confirm + * one refcount owner seen. + */ + refchk->seen++; + } else { + /* + * This rmap covers only part of the refcount record, so + * save the fragment for later processing. If the rmapbt + * is healthy each rmap_irec we see will be in agbno order + * so we don't need insertion sort here. + */ + frag = kmalloc(sizeof(struct xchk_rtrefcnt_frag), + XCHK_GFP_FLAGS); + if (!frag) + return -ENOMEM; + memcpy(&frag->rm, rec, sizeof(frag->rm)); + list_add_tail(&frag->list, &refchk->fragments); + } + + return 0; +} + +/* + * Given a bunch of rmap fragments, iterate through them, keeping + * a running tally of the refcount. If this ever deviates from + * what we expect (which is the rtrefcountbt's refcount minus the + * number of extents that totally covered the rtrefcountbt extent), + * we have a rtrefcountbt error. + */ +STATIC void +xchk_rtrefcountbt_process_rmap_fragments( + struct xchk_rtrefcnt_check *refchk) +{ + struct list_head worklist; + struct xchk_rtrefcnt_frag *frag; + struct xchk_rtrefcnt_frag *n; + xfs_rgblock_t bno; + xfs_rgblock_t rbno; + xfs_rgblock_t next_rbno; + xfs_nlink_t nr; + xfs_nlink_t target_nr; + + target_nr = refchk->refcount - refchk->seen; + if (target_nr == 0) + return; + + /* + * There are (refchk->rc.rc_refcount - refchk->nr refcount) + * references we haven't found yet. Pull that many off the + * fragment list and figure out where the smallest rmap ends + * (and therefore the next rmap should start). All the rmaps + * we pull off should start at or before the beginning of the + * refcount record's range. + */ + INIT_LIST_HEAD(&worklist); + rbno = NULLRGBLOCK; + + /* Make sure the fragments actually /are/ in bno order. */ + bno = 0; + list_for_each_entry(frag, &refchk->fragments, list) { + if (frag->rm.rm_startblock < bno) + goto done; + bno = frag->rm.rm_startblock; + } + + /* + * Find all the rmaps that start at or before the refc extent, + * and put them on the worklist. + */ + nr = 0; + list_for_each_entry_safe(frag, n, &refchk->fragments, list) { + if (frag->rm.rm_startblock > refchk->bno || nr > target_nr) + break; + bno = frag->rm.rm_startblock + frag->rm.rm_blockcount; + if (bno < rbno) + rbno = bno; + list_move_tail(&frag->list, &worklist); + nr++; + } + + /* + * We should have found exactly $target_nr rmap fragments starting + * at or before the refcount extent. + */ + if (nr != target_nr) + goto done; + + while (!list_empty(&refchk->fragments)) { + /* Discard any fragments ending at rbno from the worklist. */ + nr = 0; + next_rbno = NULLRGBLOCK; + list_for_each_entry_safe(frag, n, &worklist, list) { + bno = frag->rm.rm_startblock + frag->rm.rm_blockcount; + if (bno != rbno) { + if (bno < next_rbno) + next_rbno = bno; + continue; + } + list_del(&frag->list); + kfree(frag); + nr++; + } + + /* Try to add nr rmaps starting at rbno to the worklist. */ + list_for_each_entry_safe(frag, n, &refchk->fragments, list) { + bno = frag->rm.rm_startblock + frag->rm.rm_blockcount; + if (frag->rm.rm_startblock != rbno) + goto done; + list_move_tail(&frag->list, &worklist); + if (next_rbno > bno) + next_rbno = bno; + nr--; + if (nr == 0) + break; + } + + /* + * If we get here and nr > 0, this means that we added fewer + * items to the worklist than we discarded because the fragment + * list ran out of items. Therefore, we cannot maintain the + * required refcount. Something is wrong, so we're done. + */ + if (nr) + goto done; + + rbno = next_rbno; + } + + /* + * Make sure the last extent we processed ends at or beyond + * the end of the refcount extent. + */ + if (rbno < refchk->bno + refchk->len) + goto done; + + /* Actually record us having seen the remaining refcount. */ + refchk->seen = refchk->refcount; +done: + /* Delete fragments and work list. */ + list_for_each_entry_safe(frag, n, &worklist, list) { + list_del(&frag->list); + kfree(frag); + } + list_for_each_entry_safe(frag, n, &refchk->fragments, list) { + list_del(&frag->list); + kfree(frag); + } +} + +/* Use the rmap entries covering this extent to verify the refcount. */ +STATIC void +xchk_rtrefcountbt_xref_rmap( + struct xfs_scrub *sc, + const struct xfs_refcount_irec *irec) +{ + struct xchk_rtrefcnt_check refchk = { + .sc = sc, + .bno = irec->rc_startblock, + .len = irec->rc_blockcount, + .refcount = irec->rc_refcount, + .seen = 0, + }; + struct xfs_rmap_irec low; + struct xfs_rmap_irec high; + struct xchk_rtrefcnt_frag *frag; + struct xchk_rtrefcnt_frag *n; + int error; + + if (!sc->sr.rmap_cur || xchk_skip_xref(sc->sm)) + return; + + /* Cross-reference with the rmapbt to confirm the refcount. */ + memset(&low, 0, sizeof(low)); + low.rm_startblock = irec->rc_startblock; + memset(&high, 0xFF, sizeof(high)); + high.rm_startblock = irec->rc_startblock + irec->rc_blockcount - 1; + + INIT_LIST_HEAD(&refchk.fragments); + error = xfs_rmap_query_range(sc->sr.rmap_cur, &low, &high, + xchk_rtrefcountbt_rmap_check, &refchk); + if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur)) + goto out_free; + + xchk_rtrefcountbt_process_rmap_fragments(&refchk); + if (irec->rc_refcount != refchk.seen) + xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0); + +out_free: + list_for_each_entry_safe(frag, n, &refchk.fragments, list) { + list_del(&frag->list); + kfree(frag); + } +} + +/* Cross-reference with the other btrees. */ +STATIC void +xchk_rtrefcountbt_xref( + struct xfs_scrub *sc, + const struct xfs_refcount_irec *irec) +{ + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return; + + xchk_xref_is_used_rt_space(sc, + xfs_rgbno_to_rtb(sc->sr.rtg, irec->rc_startblock), + irec->rc_blockcount); + xchk_rtrefcountbt_xref_rmap(sc, irec); +} + +struct xchk_rtrefcbt_records { + /* Previous refcount record. */ + struct xfs_refcount_irec prev_rec; + + /* The next rtgroup block where we aren't expecting shared extents. */ + xfs_rgblock_t next_unshared_rgbno; + + /* Number of CoW blocks we expect. */ + xfs_extlen_t cow_blocks; + + /* Was the last record a shared or CoW staging extent? */ + enum xfs_refc_domain prev_domain; +}; + +static inline bool +xchk_rtrefcount_mergeable( + struct xchk_rtrefcbt_records *rrc, + const struct xfs_refcount_irec *r2) +{ + const struct xfs_refcount_irec *r1 = &rrc->prev_rec; + + /* Ignore if prev_rec is not yet initialized. */ + if (r1->rc_blockcount > 0) + return false; + + if (r1->rc_startblock + r1->rc_blockcount != r2->rc_startblock) + return false; + if (r1->rc_refcount != r2->rc_refcount) + return false; + if ((unsigned long long)r1->rc_blockcount + r2->rc_blockcount > + XFS_REFC_LEN_MAX) + return false; + + return true; +} + +/* Flag failures for records that could be merged. */ +STATIC void +xchk_rtrefcountbt_check_mergeable( + struct xchk_btree *bs, + struct xchk_rtrefcbt_records *rrc, + const struct xfs_refcount_irec *irec) +{ + if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return; + + if (xchk_rtrefcount_mergeable(rrc, irec)) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + + memcpy(&rrc->prev_rec, irec, sizeof(struct xfs_refcount_irec)); +} + +STATIC int +xchk_rtrefcountbt_rmap_check_gap( + struct xfs_btree_cur *cur, + const struct xfs_rmap_irec *rec, + void *priv) +{ + xfs_rgblock_t *next_bno = priv; + + if (*next_bno != NULLRGBLOCK && rec->rm_startblock < *next_bno) + return -ECANCELED; + + *next_bno = rec->rm_startblock + rec->rm_blockcount; + return 0; +} + +/* + * Make sure that a gap in the reference count records does not correspond to + * overlapping records (i.e. shared extents) in the reverse mappings. + */ +static inline void +xchk_rtrefcountbt_xref_gaps( + struct xfs_scrub *sc, + struct xchk_rtrefcbt_records *rrc, + xfs_rtblock_t bno) +{ + struct xfs_rmap_irec low; + struct xfs_rmap_irec high; + xfs_rgblock_t next_bno = NULLRGBLOCK; + int error; + + if (bno <= rrc->next_unshared_rgbno || !sc->sr.rmap_cur || + xchk_skip_xref(sc->sm)) + return; + + memset(&low, 0, sizeof(low)); + low.rm_startblock = rrc->next_unshared_rgbno; + memset(&high, 0xFF, sizeof(high)); + high.rm_startblock = bno - 1; + + error = xfs_rmap_query_range(sc->sr.rmap_cur, &low, &high, + xchk_rtrefcountbt_rmap_check_gap, &next_bno); + if (error == -ECANCELED) + xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0); + else + xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur); +} + +/* Scrub a rtrefcountbt record. */ +STATIC int +xchk_rtrefcountbt_rec( + struct xchk_btree *bs, + const union xfs_btree_rec *rec) +{ + struct xfs_mount *mp = bs->cur->bc_mp; + struct xchk_rtrefcbt_records *rrc = bs->private; + struct xfs_refcount_irec irec; + u32 mod; + + xfs_refcount_btrec_to_irec(rec, &irec); + if (xfs_rtrefcount_check_irec(to_rtg(bs->cur->bc_group), &irec) != + NULL) { + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + return 0; + } + + /* We can only share full rt extents. */ + mod = xfs_rgbno_to_rtxoff(mp, irec.rc_startblock); + if (mod) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + mod = xfs_extlen_to_rtxmod(mp, irec.rc_blockcount); + if (mod) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + + if (irec.rc_domain == XFS_REFC_DOMAIN_COW) + rrc->cow_blocks += irec.rc_blockcount; + + /* Shared records always come before CoW records. */ + if (irec.rc_domain == XFS_REFC_DOMAIN_SHARED && + rrc->prev_domain == XFS_REFC_DOMAIN_COW) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + rrc->prev_domain = irec.rc_domain; + + xchk_rtrefcountbt_check_mergeable(bs, rrc, &irec); + xchk_rtrefcountbt_xref(bs->sc, &irec); + + /* + * If this is a record for a shared extent, check that all blocks + * between the previous record and this one have at most one reverse + * mapping. + */ + if (irec.rc_domain == XFS_REFC_DOMAIN_SHARED) { + xchk_rtrefcountbt_xref_gaps(bs->sc, rrc, irec.rc_startblock); + rrc->next_unshared_rgbno = irec.rc_startblock + + irec.rc_blockcount; + } + + return 0; +} + +/* Make sure we have as many refc blocks as the rmap says. */ +STATIC void +xchk_refcount_xref_rmap( + struct xfs_scrub *sc, + const struct xfs_owner_info *btree_oinfo, + xfs_extlen_t cow_blocks) +{ + xfs_filblks_t refcbt_blocks = 0; + xfs_filblks_t blocks; + int error; + + if (!sc->sr.rmap_cur || !sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) + return; + + /* Check that we saw as many refcbt blocks as the rmap knows about. */ + error = xfs_btree_count_blocks(sc->sr.refc_cur, &refcbt_blocks); + if (!xchk_btree_process_error(sc, sc->sr.refc_cur, 0, &error)) + return; + error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, btree_oinfo, + &blocks); + if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) + return; + if (blocks != refcbt_blocks) + xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0); + + /* Check that we saw as many cow blocks as the rmap knows about. */ + error = xchk_count_rmap_ownedby_ag(sc, sc->sr.rmap_cur, + &XFS_RMAP_OINFO_COW, &blocks); + if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur)) + return; + if (blocks != cow_blocks) + xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0); +} + +/* Scrub the refcount btree for some AG. */ +int +xchk_rtrefcountbt( + struct xfs_scrub *sc) +{ + struct xfs_owner_info btree_oinfo; + struct xchk_rtrefcbt_records rrc = { + .cow_blocks = 0, + .next_unshared_rgbno = 0, + .prev_domain = XFS_REFC_DOMAIN_SHARED, + }; + int error; + + error = xchk_metadata_inode_forks(sc); + if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) + return error; + + xfs_rmap_ino_bmbt_owner(&btree_oinfo, rtg_refcount(sc->sr.rtg)->i_ino, + XFS_DATA_FORK); + error = xchk_btree(sc, sc->sr.refc_cur, xchk_rtrefcountbt_rec, + &btree_oinfo, &rrc); + if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) + return error; + + /* + * Check that all blocks between the last refcount > 1 record and the + * end of the rt volume have at most one reverse mapping. + */ + xchk_rtrefcountbt_xref_gaps(sc, &rrc, sc->mp->m_sb.sb_rblocks); + + xchk_refcount_xref_rmap(sc, &btree_oinfo, rrc.cow_blocks); + + return 0; +} + +/* xref check that a cow staging extent is marked in the rtrefcountbt. */ +void +xchk_xref_is_rt_cow_staging( + struct xfs_scrub *sc, + xfs_rgblock_t bno, + xfs_extlen_t len) +{ + struct xfs_refcount_irec rc; + int has_refcount; + int error; + + if (!sc->sr.refc_cur || xchk_skip_xref(sc->sm)) + return; + + /* Find the CoW staging extent. */ + error = xfs_refcount_lookup_le(sc->sr.refc_cur, XFS_REFC_DOMAIN_COW, + bno, &has_refcount); + if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur)) + return; + if (!has_refcount) { + xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0); + return; + } + + error = xfs_refcount_get_rec(sc->sr.refc_cur, &rc, &has_refcount); + if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur)) + return; + if (!has_refcount) { + xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0); + return; + } + + /* CoW lookup returned a shared extent record? */ + if (rc.rc_domain != XFS_REFC_DOMAIN_COW) + xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); + + /* Must be at least as long as what was passed in */ + if (rc.rc_blockcount < len) + xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0); +} + +/* + * xref check that the extent is not shared. Only file data blocks + * can have multiple owners. + */ +void +xchk_xref_is_not_rt_shared( + struct xfs_scrub *sc, + xfs_rgblock_t bno, + xfs_extlen_t len) +{ + enum xbtree_recpacking outcome; + int error; + + if (!sc->sr.refc_cur || xchk_skip_xref(sc->sm)) + return; + + error = xfs_refcount_has_records(sc->sr.refc_cur, + XFS_REFC_DOMAIN_SHARED, bno, len, &outcome); + if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur)) + return; + if (outcome != XBTREE_RECPACKING_EMPTY) + xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0); +} + +/* xref check that the extent is not being used for CoW staging. */ +void +xchk_xref_is_not_rt_cow_staging( + struct xfs_scrub *sc, + xfs_rgblock_t bno, + xfs_extlen_t len) +{ + enum xbtree_recpacking outcome; + int error; + + if (!sc->sr.refc_cur || xchk_skip_xref(sc->sm)) + return; + + error = xfs_refcount_has_records(sc->sr.refc_cur, XFS_REFC_DOMAIN_COW, + bno, len, &outcome); + if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur)) + return; + if (outcome != XBTREE_RECPACKING_EMPTY) + xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0); +} diff --git a/fs/xfs/scrub/rtrefcount_repair.c b/fs/xfs/scrub/rtrefcount_repair.c new file mode 100644 index 000000000000..257cfb24beb4 --- /dev/null +++ b/fs/xfs/scrub/rtrefcount_repair.c @@ -0,0 +1,783 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2021-2024 Oracle. All Rights Reserved. + * Author: Darrick J. Wong <djwong@kernel.org> + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_defer.h" +#include "xfs_btree.h" +#include "xfs_btree_staging.h" +#include "xfs_bit.h" +#include "xfs_log_format.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_alloc.h" +#include "xfs_ialloc.h" +#include "xfs_rmap.h" +#include "xfs_rmap_btree.h" +#include "xfs_rtrmap_btree.h" +#include "xfs_refcount.h" +#include "xfs_rtrefcount_btree.h" +#include "xfs_error.h" +#include "xfs_health.h" +#include "xfs_inode.h" +#include "xfs_quota.h" +#include "xfs_rtalloc.h" +#include "xfs_ag.h" +#include "xfs_rtgroup.h" +#include "xfs_rtbitmap.h" +#include "scrub/xfs_scrub.h" +#include "scrub/scrub.h" +#include "scrub/common.h" +#include "scrub/btree.h" +#include "scrub/trace.h" +#include "scrub/repair.h" +#include "scrub/bitmap.h" +#include "scrub/fsb_bitmap.h" +#include "scrub/xfile.h" +#include "scrub/xfarray.h" +#include "scrub/newbt.h" +#include "scrub/reap.h" +#include "scrub/rcbag.h" + +/* + * Rebuilding the Reference Count Btree + * ==================================== + * + * This algorithm is "borrowed" from xfs_repair. Imagine the rmap + * entries as rectangles representing extents of physical blocks, and + * that the rectangles can be laid down to allow them to overlap each + * other; then we know that we must emit a refcnt btree entry wherever + * the amount of overlap changes, i.e. the emission stimulus is + * level-triggered: + * + * - --- + * -- ----- ---- --- ------ + * -- ---- ----------- ---- --------- + * -------------------------------- ----------- + * ^ ^ ^^ ^^ ^ ^^ ^^^ ^^^^ ^ ^^ ^ ^ ^ + * 2 1 23 21 3 43 234 2123 1 01 2 3 0 + * + * For our purposes, a rmap is a tuple (startblock, len, fileoff, owner). + * + * Note that in the actual refcnt btree we don't store the refcount < 2 + * cases because the bnobt tells us which blocks are free; single-use + * blocks aren't recorded in the bnobt or the refcntbt. If the rmapbt + * supports storing multiple entries covering a given block we could + * theoretically dispense with the refcntbt and simply count rmaps, but + * that's inefficient in the (hot) write path, so we'll take the cost of + * the extra tree to save time. Also there's no guarantee that rmap + * will be enabled. + * + * Given an array of rmaps sorted by physical block number, a starting + * physical block (sp), a bag to hold rmaps that cover sp, and the next + * physical block where the level changes (np), we can reconstruct the + * rt refcount btree as follows: + * + * While there are still unprocessed rmaps in the array, + * - Set sp to the physical block (pblk) of the next unprocessed rmap. + * - Add to the bag all rmaps in the array where startblock == sp. + * - Set np to the physical block where the bag size will change. This + * is the minimum of (the pblk of the next unprocessed rmap) and + * (startblock + len of each rmap in the bag). + * - Record the bag size as old_bag_size. + * + * - While the bag isn't empty, + * - Remove from the bag all rmaps where startblock + len == np. + * - Add to the bag all rmaps in the array where startblock == np. + * - If the bag size isn't old_bag_size, store the refcount entry + * (sp, np - sp, bag_size) in the refcnt btree. + * - If the bag is empty, break out of the inner loop. + * - Set old_bag_size to the bag size + * - Set sp = np. + * - Set np to the physical block where the bag size will change. + * This is the minimum of (the pblk of the next unprocessed rmap) + * and (startblock + len of each rmap in the bag). + * + * Like all the other repairers, we make a list of all the refcount + * records we need, then reinitialize the rt refcount btree root and + * insert all the records. + */ + +struct xrep_rtrefc { + /* refcount extents */ + struct xfarray *refcount_records; + + /* new refcountbt information */ + struct xrep_newbt new_btree; + + /* old refcountbt blocks */ + struct xfsb_bitmap old_rtrefcountbt_blocks; + + struct xfs_scrub *sc; + + /* get_records()'s position in the rt refcount record array. */ + xfarray_idx_t array_cur; + + /* # of refcountbt blocks */ + xfs_filblks_t btblocks; +}; + +/* Set us up to repair refcount btrees. */ +int +xrep_setup_rtrefcountbt( + struct xfs_scrub *sc) +{ + char *descr; + int error; + + descr = xchk_xfile_ag_descr(sc, "rmap record bag"); + error = xrep_setup_xfbtree(sc, descr); + kfree(descr); + return error; +} + +/* Check for any obvious conflicts with this shared/CoW staging extent. */ +STATIC int +xrep_rtrefc_check_ext( + struct xfs_scrub *sc, + const struct xfs_refcount_irec *rec) +{ + xfs_rgblock_t last; + + if (xfs_rtrefcount_check_irec(sc->sr.rtg, rec) != NULL) + return -EFSCORRUPTED; + + if (xfs_rgbno_to_rtxoff(sc->mp, rec->rc_startblock) != 0) + return -EFSCORRUPTED; + + last = rec->rc_startblock + rec->rc_blockcount - 1; + if (xfs_rgbno_to_rtxoff(sc->mp, last) != sc->mp->m_sb.sb_rextsize - 1) + return -EFSCORRUPTED; + + /* Make sure this isn't free space or misaligned. */ + return xrep_require_rtext_inuse(sc, rec->rc_startblock, + rec->rc_blockcount); +} + +/* Record a reference count extent. */ +STATIC int +xrep_rtrefc_stash( + struct xrep_rtrefc *rr, + enum xfs_refc_domain domain, + xfs_rgblock_t bno, + xfs_extlen_t len, + uint64_t refcount) +{ + struct xfs_refcount_irec irec = { + .rc_startblock = bno, + .rc_blockcount = len, + .rc_refcount = refcount, + .rc_domain = domain, + }; + int error = 0; + + if (xchk_should_terminate(rr->sc, &error)) + return error; + + irec.rc_refcount = min_t(uint64_t, XFS_REFC_REFCOUNT_MAX, refcount); + + error = xrep_rtrefc_check_ext(rr->sc, &irec); + if (error) + return error; + + trace_xrep_refc_found(rtg_group(rr->sc->sr.rtg), &irec); + + return xfarray_append(rr->refcount_records, &irec); +} + +/* Record a CoW staging extent. */ +STATIC int +xrep_rtrefc_stash_cow( + struct xrep_rtrefc *rr, + xfs_rgblock_t bno, + xfs_extlen_t len) +{ + return xrep_rtrefc_stash(rr, XFS_REFC_DOMAIN_COW, bno, len, 1); +} + +/* Decide if an rmap could describe a shared extent. */ +static inline bool +xrep_rtrefc_rmap_shareable( + const struct xfs_rmap_irec *rmap) +{ + /* rt metadata are never sharable */ + if (XFS_RMAP_NON_INODE_OWNER(rmap->rm_owner)) + return false; + + /* Unwritten file blocks are not shareable. */ + if (rmap->rm_flags & XFS_RMAP_UNWRITTEN) + return false; + + return true; +} + +/* Grab the next (abbreviated) rmap record from the rmapbt. */ +STATIC int +xrep_rtrefc_walk_rmaps( + struct xrep_rtrefc *rr, + struct xfs_rmap_irec *rmap, + bool *have_rec) +{ + struct xfs_btree_cur *cur = rr->sc->sr.rmap_cur; + struct xfs_mount *mp = cur->bc_mp; + int have_gt; + int error = 0; + + *have_rec = false; + + /* + * Loop through the remaining rmaps. Remember CoW staging + * extents and the refcountbt blocks from the old tree for later + * disposal. We can only share written data fork extents, so + * keep looping until we find an rmap for one. + */ + do { + if (xchk_should_terminate(rr->sc, &error)) + return error; + + error = xfs_btree_increment(cur, 0, &have_gt); + if (error) + return error; + if (!have_gt) + return 0; + + error = xfs_rmap_get_rec(cur, rmap, &have_gt); + if (error) + return error; + if (XFS_IS_CORRUPT(mp, !have_gt)) { + xfs_btree_mark_sick(cur); + return -EFSCORRUPTED; + } + + if (rmap->rm_owner == XFS_RMAP_OWN_COW) { + error = xrep_rtrefc_stash_cow(rr, rmap->rm_startblock, + rmap->rm_blockcount); + if (error) + return error; + } else if (xfs_is_sb_inum(mp, rmap->rm_owner) || + (rmap->rm_flags & (XFS_RMAP_ATTR_FORK | + XFS_RMAP_BMBT_BLOCK))) { + xfs_btree_mark_sick(cur); + return -EFSCORRUPTED; + } + } while (!xrep_rtrefc_rmap_shareable(rmap)); + + *have_rec = true; + return 0; +} + +static inline uint32_t +xrep_rtrefc_encode_startblock( + const struct xfs_refcount_irec *irec) +{ + uint32_t start; + + start = irec->rc_startblock & ~XFS_REFC_COWFLAG; + if (irec->rc_domain == XFS_REFC_DOMAIN_COW) + start |= XFS_REFC_COWFLAG; + + return start; +} + +/* + * Compare two refcount records. We want to sort in order of increasing block + * number. + */ +static int +xrep_rtrefc_extent_cmp( + const void *a, + const void *b) +{ + const struct xfs_refcount_irec *ap = a; + const struct xfs_refcount_irec *bp = b; + uint32_t sa, sb; + + sa = xrep_rtrefc_encode_startblock(ap); + sb = xrep_rtrefc_encode_startblock(bp); + + if (sa > sb) + return 1; + if (sa < sb) + return -1; + return 0; +} + +/* + * Sort the refcount extents by startblock or else the btree records will be in + * the wrong order. Make sure the records do not overlap in physical space. + */ +STATIC int +xrep_rtrefc_sort_records( + struct xrep_rtrefc *rr) +{ + struct xfs_refcount_irec irec; + xfarray_idx_t cur; + enum xfs_refc_domain dom = XFS_REFC_DOMAIN_SHARED; + xfs_rgblock_t next_rgbno = 0; + int error; + + error = xfarray_sort(rr->refcount_records, xrep_rtrefc_extent_cmp, + XFARRAY_SORT_KILLABLE); + if (error) + return error; + + foreach_xfarray_idx(rr->refcount_records, cur) { + if (xchk_should_terminate(rr->sc, &error)) + return error; + + error = xfarray_load(rr->refcount_records, cur, &irec); + if (error) + return error; + + if (dom == XFS_REFC_DOMAIN_SHARED && + irec.rc_domain == XFS_REFC_DOMAIN_COW) { + dom = irec.rc_domain; + next_rgbno = 0; + } + + if (dom != irec.rc_domain) + return -EFSCORRUPTED; + if (irec.rc_startblock < next_rgbno) + return -EFSCORRUPTED; + + next_rgbno = irec.rc_startblock + irec.rc_blockcount; + } + + return error; +} + +/* Record extents that belong to the realtime refcount inode. */ +STATIC int +xrep_rtrefc_walk_rmap( + struct xfs_btree_cur *cur, + const struct xfs_rmap_irec *rec, + void *priv) +{ + struct xrep_rtrefc *rr = priv; + int error = 0; + + if (xchk_should_terminate(rr->sc, &error)) + return error; + + /* Skip extents which are not owned by this inode and fork. */ + if (rec->rm_owner != rr->sc->ip->i_ino) + return 0; + + error = xrep_check_ino_btree_mapping(rr->sc, rec); + if (error) + return error; + + return xfsb_bitmap_set(&rr->old_rtrefcountbt_blocks, + xfs_gbno_to_fsb(cur->bc_group, rec->rm_startblock), + rec->rm_blockcount); +} + +/* + * Walk forward through the rmap btree to collect all rmaps starting at + * @bno in @rmap_bag. These represent the file(s) that share ownership of + * the current block. Upon return, the rmap cursor points to the last record + * satisfying the startblock constraint. + */ +static int +xrep_rtrefc_push_rmaps_at( + struct xrep_rtrefc *rr, + struct rcbag *rcstack, + xfs_rgblock_t bno, + struct xfs_rmap_irec *rmap, + bool *have) +{ + struct xfs_scrub *sc = rr->sc; + int have_gt; + int error; + + while (*have && rmap->rm_startblock == bno) { + error = rcbag_add(rcstack, rr->sc->tp, rmap); + if (error) + return error; + + error = xrep_rtrefc_walk_rmaps(rr, rmap, have); + if (error) + return error; + } + + error = xfs_btree_decrement(sc->sr.rmap_cur, 0, &have_gt); + if (error) + return error; + if (XFS_IS_CORRUPT(sc->mp, !have_gt)) { + xfs_btree_mark_sick(sc->sr.rmap_cur); + return -EFSCORRUPTED; + } + + return 0; +} + +/* Scan one AG for reverse mappings for the realtime refcount btree. */ +STATIC int +xrep_rtrefc_scan_ag( + struct xrep_rtrefc *rr, + struct xfs_perag *pag) +{ + struct xfs_scrub *sc = rr->sc; + int error; + + error = xrep_ag_init(sc, pag, &sc->sa); + if (error) + return error; + + error = xfs_rmap_query_all(sc->sa.rmap_cur, xrep_rtrefc_walk_rmap, rr); + xchk_ag_free(sc, &sc->sa); + return error; +} + +/* Iterate all the rmap records to generate reference count data. */ +STATIC int +xrep_rtrefc_find_refcounts( + struct xrep_rtrefc *rr) +{ + struct xfs_scrub *sc = rr->sc; + struct rcbag *rcstack; + struct xfs_perag *pag = NULL; + uint64_t old_stack_height; + xfs_rgblock_t sbno; + xfs_rgblock_t cbno; + xfs_rgblock_t nbno; + bool have; + int error; + + /* Scan for old rtrefc btree blocks. */ + while ((pag = xfs_perag_next(sc->mp, pag))) { + error = xrep_rtrefc_scan_ag(rr, pag); + if (error) { + xfs_perag_rele(pag); + return error; + } + } + + xrep_rtgroup_btcur_init(sc, &sc->sr); + + /* + * Set up a bag to store all the rmap records that we're tracking to + * generate a reference count record. If this exceeds + * XFS_REFC_REFCOUNT_MAX, we clamp rc_refcount. + */ + error = rcbag_init(sc->mp, sc->xmbtp, &rcstack); + if (error) + goto out_cur; + + /* Start the rtrmapbt cursor to the left of all records. */ + error = xfs_btree_goto_left_edge(sc->sr.rmap_cur); + if (error) + goto out_bag; + + /* Process reverse mappings into refcount data. */ + while (xfs_btree_has_more_records(sc->sr.rmap_cur)) { + struct xfs_rmap_irec rmap; + + /* Push all rmaps with pblk == sbno onto the stack */ + error = xrep_rtrefc_walk_rmaps(rr, &rmap, &have); + if (error) + goto out_bag; + if (!have) + break; + sbno = cbno = rmap.rm_startblock; + error = xrep_rtrefc_push_rmaps_at(rr, rcstack, sbno, &rmap, + &have); + if (error) + goto out_bag; + + /* Set nbno to the bno of the next refcount change */ + error = rcbag_next_edge(rcstack, sc->tp, &rmap, have, &nbno); + if (error) + goto out_bag; + + ASSERT(nbno > sbno); + old_stack_height = rcbag_count(rcstack); + + /* While stack isn't empty... */ + while (rcbag_count(rcstack) > 0) { + /* Pop all rmaps that end at nbno */ + error = rcbag_remove_ending_at(rcstack, sc->tp, nbno); + if (error) + goto out_bag; + + /* Push array items that start at nbno */ + error = xrep_rtrefc_walk_rmaps(rr, &rmap, &have); + if (error) + goto out_bag; + if (have) { + error = xrep_rtrefc_push_rmaps_at(rr, rcstack, + nbno, &rmap, &have); + if (error) + goto out_bag; + } + + /* Emit refcount if necessary */ + ASSERT(nbno > cbno); + if (rcbag_count(rcstack) != old_stack_height) { + if (old_stack_height > 1) { + error = xrep_rtrefc_stash(rr, + XFS_REFC_DOMAIN_SHARED, + cbno, nbno - cbno, + old_stack_height); + if (error) + goto out_bag; + } + cbno = nbno; + } + + /* Stack empty, go find the next rmap */ + if (rcbag_count(rcstack) == 0) + break; + old_stack_height = rcbag_count(rcstack); + sbno = nbno; + + /* Set nbno to the bno of the next refcount change */ + error = rcbag_next_edge(rcstack, sc->tp, &rmap, have, + &nbno); + if (error) + goto out_bag; + + ASSERT(nbno > sbno); + } + } + + ASSERT(rcbag_count(rcstack) == 0); +out_bag: + rcbag_free(&rcstack); +out_cur: + xchk_rtgroup_btcur_free(&sc->sr); + return error; +} + +/* Retrieve refcountbt data for bulk load. */ +STATIC int +xrep_rtrefc_get_records( + struct xfs_btree_cur *cur, + unsigned int idx, + struct xfs_btree_block *block, + unsigned int nr_wanted, + void *priv) +{ + struct xrep_rtrefc *rr = priv; + union xfs_btree_rec *block_rec; + unsigned int loaded; + int error; + + for (loaded = 0; loaded < nr_wanted; loaded++, idx++) { + error = xfarray_load(rr->refcount_records, rr->array_cur++, + &cur->bc_rec.rc); + if (error) + return error; + + block_rec = xfs_btree_rec_addr(cur, idx, block); + cur->bc_ops->init_rec_from_cur(cur, block_rec); + } + + return loaded; +} + +/* Feed one of the new btree blocks to the bulk loader. */ +STATIC int +xrep_rtrefc_claim_block( + struct xfs_btree_cur *cur, + union xfs_btree_ptr *ptr, + void *priv) +{ + struct xrep_rtrefc *rr = priv; + + return xrep_newbt_claim_block(cur, &rr->new_btree, ptr); +} + +/* Figure out how much space we need to create the incore btree root block. */ +STATIC size_t +xrep_rtrefc_iroot_size( + struct xfs_btree_cur *cur, + unsigned int level, + unsigned int nr_this_level, + void *priv) +{ + return xfs_rtrefcount_broot_space_calc(cur->bc_mp, level, + nr_this_level); +} + +/* + * Use the collected refcount information to stage a new rt refcount btree. If + * this is successful we'll return with the new btree root information logged + * to the repair transaction but not yet committed. + */ +STATIC int +xrep_rtrefc_build_new_tree( + struct xrep_rtrefc *rr) +{ + struct xfs_scrub *sc = rr->sc; + struct xfs_rtgroup *rtg = sc->sr.rtg; + struct xfs_btree_cur *refc_cur; + int error; + + error = xrep_rtrefc_sort_records(rr); + if (error) + return error; + + /* + * Prepare to construct the new btree by reserving disk space for the + * new btree and setting up all the accounting information we'll need + * to root the new btree while it's under construction and before we + * attach it to the realtime refcount inode. + */ + error = xrep_newbt_init_metadir_inode(&rr->new_btree, sc); + if (error) + return error; + + rr->new_btree.bload.get_records = xrep_rtrefc_get_records; + rr->new_btree.bload.claim_block = xrep_rtrefc_claim_block; + rr->new_btree.bload.iroot_size = xrep_rtrefc_iroot_size; + + refc_cur = xfs_rtrefcountbt_init_cursor(NULL, rtg); + xfs_btree_stage_ifakeroot(refc_cur, &rr->new_btree.ifake); + + /* Compute how many blocks we'll need. */ + error = xfs_btree_bload_compute_geometry(refc_cur, &rr->new_btree.bload, + xfarray_length(rr->refcount_records)); + if (error) + goto err_cur; + + /* Last chance to abort before we start committing fixes. */ + if (xchk_should_terminate(sc, &error)) + goto err_cur; + + /* + * Guess how many blocks we're going to need to rebuild an entire + * rtrefcountbt from the number of extents we found, and pump up our + * transaction to have sufficient block reservation. We're allowed + * to exceed quota to repair inconsistent metadata, though this is + * unlikely. + */ + error = xfs_trans_reserve_more_inode(sc->tp, rtg_refcount(rtg), + rr->new_btree.bload.nr_blocks, 0, true); + if (error) + goto err_cur; + + /* Reserve the space we'll need for the new btree. */ + error = xrep_newbt_alloc_blocks(&rr->new_btree, + rr->new_btree.bload.nr_blocks); + if (error) + goto err_cur; + + /* Add all observed refcount records. */ + rr->new_btree.ifake.if_fork->if_format = XFS_DINODE_FMT_META_BTREE; + rr->array_cur = XFARRAY_CURSOR_INIT; + error = xfs_btree_bload(refc_cur, &rr->new_btree.bload, rr); + if (error) + goto err_cur; + + /* + * Install the new rtrefc btree in the inode. After this point the old + * btree is no longer accessible, the new tree is live, and we can + * delete the cursor. + */ + xfs_rtrefcountbt_commit_staged_btree(refc_cur, sc->tp); + xrep_inode_set_nblocks(rr->sc, rr->new_btree.ifake.if_blocks); + xfs_btree_del_cursor(refc_cur, 0); + + /* Dispose of any unused blocks and the accounting information. */ + error = xrep_newbt_commit(&rr->new_btree); + if (error) + return error; + + return xrep_roll_trans(sc); +err_cur: + xfs_btree_del_cursor(refc_cur, error); + xrep_newbt_cancel(&rr->new_btree); + return error; +} + +/* + * Now that we've logged the roots of the new btrees, invalidate all of the + * old blocks and free them. + */ +STATIC int +xrep_rtrefc_remove_old_tree( + struct xrep_rtrefc *rr) +{ + int error; + + /* + * Free all the extents that were allocated to the former rtrefcountbt + * and aren't cross-linked with something else. + */ + error = xrep_reap_metadir_fsblocks(rr->sc, + &rr->old_rtrefcountbt_blocks); + if (error) + return error; + + /* + * Ensure the proper reservation for the rtrefcount inode so that we + * don't fail to expand the btree. + */ + return xrep_reset_metafile_resv(rr->sc); +} + +/* Rebuild the rt refcount btree. */ +int +xrep_rtrefcountbt( + struct xfs_scrub *sc) +{ + struct xrep_rtrefc *rr; + struct xfs_mount *mp = sc->mp; + char *descr; + int error; + + /* We require the rmapbt to rebuild anything. */ + if (!xfs_has_rtrmapbt(mp)) + return -EOPNOTSUPP; + + /* Make sure any problems with the fork are fixed. */ + error = xrep_metadata_inode_forks(sc); + if (error) + return error; + + rr = kzalloc(sizeof(struct xrep_rtrefc), XCHK_GFP_FLAGS); + if (!rr) + return -ENOMEM; + rr->sc = sc; + + /* Set up enough storage to handle one refcount record per rt extent. */ + descr = xchk_xfile_ag_descr(sc, "reference count records"); + error = xfarray_create(descr, mp->m_sb.sb_rextents, + sizeof(struct xfs_refcount_irec), + &rr->refcount_records); + kfree(descr); + if (error) + goto out_rr; + + /* Collect all reference counts. */ + xfsb_bitmap_init(&rr->old_rtrefcountbt_blocks); + error = xrep_rtrefc_find_refcounts(rr); + if (error) + goto out_bitmap; + + xfs_trans_ijoin(sc->tp, sc->ip, 0); + + /* Rebuild the refcount information. */ + error = xrep_rtrefc_build_new_tree(rr); + if (error) + goto out_bitmap; + + /* Kill the old tree. */ + error = xrep_rtrefc_remove_old_tree(rr); + if (error) + goto out_bitmap; + +out_bitmap: + xfsb_bitmap_destroy(&rr->old_rtrefcountbt_blocks); + xfarray_destroy(rr->refcount_records); +out_rr: + kfree(rr); + return error; +} diff --git a/fs/xfs/scrub/rtrmap.c b/fs/xfs/scrub/rtrmap.c new file mode 100644 index 000000000000..12989fe80e8b --- /dev/null +++ b/fs/xfs/scrub/rtrmap.c @@ -0,0 +1,323 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2018-2024 Oracle. All Rights Reserved. + * Author: Darrick J. Wong <djwong@kernel.org> + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_defer.h" +#include "xfs_btree.h" +#include "xfs_bit.h" +#include "xfs_log_format.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_rmap.h" +#include "xfs_rmap_btree.h" +#include "xfs_rtrmap_btree.h" +#include "xfs_inode.h" +#include "xfs_rtalloc.h" +#include "xfs_rtgroup.h" +#include "xfs_metafile.h" +#include "xfs_refcount.h" +#include "scrub/xfs_scrub.h" +#include "scrub/scrub.h" +#include "scrub/common.h" +#include "scrub/btree.h" +#include "scrub/trace.h" +#include "scrub/repair.h" + +/* Set us up with the realtime metadata locked. */ +int +xchk_setup_rtrmapbt( + struct xfs_scrub *sc) +{ + int error; + + if (xchk_need_intent_drain(sc)) + xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN); + + if (xchk_could_repair(sc)) { + error = xrep_setup_rtrmapbt(sc); + if (error) + return error; + } + + error = xchk_rtgroup_init(sc, sc->sm->sm_agno, &sc->sr); + if (error) + return error; + + error = xchk_setup_rt(sc); + if (error) + return error; + + error = xchk_install_live_inode(sc, rtg_rmap(sc->sr.rtg)); + if (error) + return error; + + return xchk_rtgroup_lock(sc, &sc->sr, XCHK_RTGLOCK_ALL); +} + +/* Realtime reverse mapping. */ + +struct xchk_rtrmap { + /* + * The furthest-reaching of the rmapbt records that we've already + * processed. This enables us to detect overlapping records for space + * allocations that cannot be shared. + */ + struct xfs_rmap_irec overlap_rec; + + /* + * The previous rmapbt record, so that we can check for two records + * that could be one. + */ + struct xfs_rmap_irec prev_rec; +}; + +static inline bool +xchk_rtrmapbt_is_shareable( + struct xfs_scrub *sc, + const struct xfs_rmap_irec *irec) +{ + if (!xfs_has_rtreflink(sc->mp)) + return false; + if (irec->rm_flags & XFS_RMAP_UNWRITTEN) + return false; + return true; +} + +/* Flag failures for records that overlap but cannot. */ +STATIC void +xchk_rtrmapbt_check_overlapping( + struct xchk_btree *bs, + struct xchk_rtrmap *cr, + const struct xfs_rmap_irec *irec) +{ + xfs_rtblock_t pnext, inext; + + if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return; + + /* No previous record? */ + if (cr->overlap_rec.rm_blockcount == 0) + goto set_prev; + + /* Do overlap_rec and irec overlap? */ + pnext = cr->overlap_rec.rm_startblock + cr->overlap_rec.rm_blockcount; + if (pnext <= irec->rm_startblock) + goto set_prev; + + /* Overlap is only allowed if both records are data fork mappings. */ + if (!xchk_rtrmapbt_is_shareable(bs->sc, &cr->overlap_rec) || + !xchk_rtrmapbt_is_shareable(bs->sc, irec)) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + + /* Save whichever rmap record extends furthest. */ + inext = irec->rm_startblock + irec->rm_blockcount; + if (pnext > inext) + return; + +set_prev: + memcpy(&cr->overlap_rec, irec, sizeof(struct xfs_rmap_irec)); +} + +/* Decide if two reverse-mapping records can be merged. */ +static inline bool +xchk_rtrmap_mergeable( + struct xchk_rtrmap *cr, + const struct xfs_rmap_irec *r2) +{ + const struct xfs_rmap_irec *r1 = &cr->prev_rec; + + /* Ignore if prev_rec is not yet initialized. */ + if (cr->prev_rec.rm_blockcount == 0) + return false; + + if (r1->rm_owner != r2->rm_owner) + return false; + if (r1->rm_startblock + r1->rm_blockcount != r2->rm_startblock) + return false; + if ((unsigned long long)r1->rm_blockcount + r2->rm_blockcount > + XFS_RMAP_LEN_MAX) + return false; + if (r1->rm_flags != r2->rm_flags) + return false; + return r1->rm_offset + r1->rm_blockcount == r2->rm_offset; +} + +/* Flag failures for records that could be merged. */ +STATIC void +xchk_rtrmapbt_check_mergeable( + struct xchk_btree *bs, + struct xchk_rtrmap *cr, + const struct xfs_rmap_irec *irec) +{ + if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return; + + if (xchk_rtrmap_mergeable(cr, irec)) + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + + memcpy(&cr->prev_rec, irec, sizeof(struct xfs_rmap_irec)); +} + +/* Cross-reference a rmap against the refcount btree. */ +STATIC void +xchk_rtrmapbt_xref_rtrefc( + struct xfs_scrub *sc, + struct xfs_rmap_irec *irec) +{ + xfs_rgblock_t fbno; + xfs_extlen_t flen; + bool is_inode; + bool is_bmbt; + bool is_attr; + bool is_unwritten; + int error; + + if (!sc->sr.refc_cur || xchk_skip_xref(sc->sm)) + return; + + is_inode = !XFS_RMAP_NON_INODE_OWNER(irec->rm_owner); + is_bmbt = irec->rm_flags & XFS_RMAP_BMBT_BLOCK; + is_attr = irec->rm_flags & XFS_RMAP_ATTR_FORK; + is_unwritten = irec->rm_flags & XFS_RMAP_UNWRITTEN; + + /* If this is shared, must be a data fork extent. */ + error = xfs_refcount_find_shared(sc->sr.refc_cur, irec->rm_startblock, + irec->rm_blockcount, &fbno, &flen, false); + if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur)) + return; + if (flen != 0 && (!is_inode || is_attr || is_bmbt || is_unwritten)) + xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0); +} + +/* Cross-reference with other metadata. */ +STATIC void +xchk_rtrmapbt_xref( + struct xfs_scrub *sc, + struct xfs_rmap_irec *irec) +{ + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return; + + xchk_xref_is_used_rt_space(sc, + xfs_rgbno_to_rtb(sc->sr.rtg, irec->rm_startblock), + irec->rm_blockcount); + if (irec->rm_owner == XFS_RMAP_OWN_COW) + xchk_xref_is_cow_staging(sc, irec->rm_startblock, + irec->rm_blockcount); + else + xchk_rtrmapbt_xref_rtrefc(sc, irec); +} + +/* Scrub a realtime rmapbt record. */ +STATIC int +xchk_rtrmapbt_rec( + struct xchk_btree *bs, + const union xfs_btree_rec *rec) +{ + struct xchk_rtrmap *cr = bs->private; + struct xfs_rmap_irec irec; + + if (xfs_rmap_btrec_to_irec(rec, &irec) != NULL || + xfs_rtrmap_check_irec(to_rtg(bs->cur->bc_group), &irec) != NULL) { + xchk_btree_set_corrupt(bs->sc, bs->cur, 0); + return 0; + } + + if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) + return 0; + + xchk_rtrmapbt_check_mergeable(bs, cr, &irec); + xchk_rtrmapbt_check_overlapping(bs, cr, &irec); + xchk_rtrmapbt_xref(bs->sc, &irec); + return 0; +} + +/* Scrub the realtime rmap btree. */ +int +xchk_rtrmapbt( + struct xfs_scrub *sc) +{ + struct xfs_inode *ip = rtg_rmap(sc->sr.rtg); + struct xfs_owner_info oinfo; + struct xchk_rtrmap cr = { }; + int error; + + error = xchk_metadata_inode_forks(sc); + if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) + return error; + + xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, XFS_DATA_FORK); + return xchk_btree(sc, sc->sr.rmap_cur, xchk_rtrmapbt_rec, &oinfo, &cr); +} + +/* xref check that the extent has no realtime reverse mapping at all */ +void +xchk_xref_has_no_rt_owner( + struct xfs_scrub *sc, + xfs_rgblock_t bno, + xfs_extlen_t len) +{ + enum xbtree_recpacking outcome; + int error; + + if (!sc->sr.rmap_cur || xchk_skip_xref(sc->sm)) + return; + + error = xfs_rmap_has_records(sc->sr.rmap_cur, bno, len, &outcome); + if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur)) + return; + if (outcome != XBTREE_RECPACKING_EMPTY) + xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0); +} + +/* xref check that the extent is completely mapped */ +void +xchk_xref_has_rt_owner( + struct xfs_scrub *sc, + xfs_rgblock_t bno, + xfs_extlen_t len) +{ + enum xbtree_recpacking outcome; + int error; + + if (!sc->sr.rmap_cur || xchk_skip_xref(sc->sm)) + return; + + error = xfs_rmap_has_records(sc->sr.rmap_cur, bno, len, &outcome); + if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur)) + return; + if (outcome != XBTREE_RECPACKING_FULL) + xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0); +} + +/* xref check that the extent is only owned by a given owner */ +void +xchk_xref_is_only_rt_owned_by( + struct xfs_scrub *sc, + xfs_agblock_t bno, + xfs_extlen_t len, + const struct xfs_owner_info *oinfo) +{ + struct xfs_rmap_matches res; + int error; + + if (!sc->sr.rmap_cur || xchk_skip_xref(sc->sm)) + return; + + error = xfs_rmap_count_owners(sc->sr.rmap_cur, bno, len, oinfo, &res); + if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur)) + return; + if (res.matches != 1) + xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0); + if (res.bad_non_owner_matches) + xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0); + if (res.non_owner_matches) + xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0); +} diff --git a/fs/xfs/scrub/rtrmap_repair.c b/fs/xfs/scrub/rtrmap_repair.c new file mode 100644 index 000000000000..f2fdd7a9fc24 --- /dev/null +++ b/fs/xfs/scrub/rtrmap_repair.c @@ -0,0 +1,1006 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2020-2024 Oracle. All Rights Reserved. + * Author: Darrick J. Wong <djwong@kernel.org> + */ +#include "xfs.h" +#include "xfs_fs.h" +#include "xfs_shared.h" +#include "xfs_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" +#include "xfs_defer.h" +#include "xfs_btree.h" +#include "xfs_btree_staging.h" +#include "xfs_buf_mem.h" +#include "xfs_btree_mem.h" +#include "xfs_bit.h" +#include "xfs_log_format.h" +#include "xfs_trans.h" +#include "xfs_sb.h" +#include "xfs_alloc.h" +#include "xfs_rmap.h" +#include "xfs_rmap_btree.h" +#include "xfs_rtrmap_btree.h" +#include "xfs_inode.h" +#include "xfs_icache.h" +#include "xfs_bmap.h" +#include "xfs_bmap_btree.h" +#include "xfs_quota.h" +#include "xfs_rtalloc.h" +#include "xfs_ag.h" +#include "xfs_rtgroup.h" +#include "xfs_refcount.h" +#include "scrub/xfs_scrub.h" +#include "scrub/scrub.h" +#include "scrub/common.h" +#include "scrub/btree.h" +#include "scrub/trace.h" +#include "scrub/repair.h" +#include "scrub/bitmap.h" +#include "scrub/fsb_bitmap.h" +#include "scrub/rgb_bitmap.h" +#include "scrub/xfile.h" +#include "scrub/xfarray.h" +#include "scrub/iscan.h" +#include "scrub/newbt.h" +#include "scrub/reap.h" + +/* + * Realtime Reverse Mapping Btree Repair + * ===================================== + * + * This isn't quite as difficult as repairing the rmap btree on the data + * device, since we only store the data fork extents of realtime files on the + * realtime device. We still have to freeze the filesystem and stop the + * background threads like we do for the rmap repair, but we only have to scan + * realtime inodes. + * + * Collecting entries for the new realtime rmap btree is easy -- all we have + * to do is generate rtrmap entries from the data fork mappings of all realtime + * files in the filesystem. We then scan the rmap btrees of the data device + * looking for extents belonging to the old btree and note them in a bitmap. + * + * To rebuild the realtime rmap btree, we bulk-load the collected mappings into + * a new btree cursor and atomically swap that into the realtime inode. Then + * we can free the blocks from the old btree. + * + * We use the 'xrep_rtrmap' prefix for all the rmap functions. + */ + +/* Context for collecting rmaps */ +struct xrep_rtrmap { + /* new rtrmapbt information */ + struct xrep_newbt new_btree; + + /* lock for the xfbtree and xfile */ + struct mutex lock; + + /* rmap records generated from primary metadata */ + struct xfbtree rtrmap_btree; + + struct xfs_scrub *sc; + + /* bitmap of old rtrmapbt blocks */ + struct xfsb_bitmap old_rtrmapbt_blocks; + + /* Hooks into rtrmap update code. */ + struct xfs_rmap_hook rhook; + + /* inode scan cursor */ + struct xchk_iscan iscan; + + /* in-memory btree cursor for the ->get_blocks walk */ + struct xfs_btree_cur *mcur; + + /* Number of records we're staging in the new btree. */ + uint64_t nr_records; +}; + +/* Set us up to repair rt reverse mapping btrees. */ +int +xrep_setup_rtrmapbt( + struct xfs_scrub *sc) +{ + struct xrep_rtrmap *rr; + char *descr; + int error; + + xchk_fsgates_enable(sc, XCHK_FSGATES_RMAP); + + descr = xchk_xfile_rtgroup_descr(sc, "reverse mapping records"); + error = xrep_setup_xfbtree(sc, descr); + kfree(descr); + if (error) + return error; + + rr = kzalloc(sizeof(struct xrep_rtrmap), XCHK_GFP_FLAGS); + if (!rr) + return -ENOMEM; + + rr->sc = sc; + sc->buf = rr; + return 0; +} + +/* Make sure there's nothing funny about this mapping. */ +STATIC int +xrep_rtrmap_check_mapping( + struct xfs_scrub *sc, + const struct xfs_rmap_irec *rec) +{ + if (xfs_rtrmap_check_irec(sc->sr.rtg, rec) != NULL) + return -EFSCORRUPTED; + + /* Make sure this isn't free space. */ + return xrep_require_rtext_inuse(sc, rec->rm_startblock, + rec->rm_blockcount); +} + +/* Store a reverse-mapping record. */ +static inline int +xrep_rtrmap_stash( + struct xrep_rtrmap *rr, + xfs_rgblock_t startblock, + xfs_extlen_t blockcount, + uint64_t owner, + uint64_t offset, + unsigned int flags) +{ + struct xfs_rmap_irec rmap = { + .rm_startblock = startblock, + .rm_blockcount = blockcount, + .rm_owner = owner, + .rm_offset = offset, + .rm_flags = flags, + }; + struct xfs_scrub *sc = rr->sc; + struct xfs_btree_cur *mcur; + int error = 0; + + if (xchk_should_terminate(sc, &error)) + return error; + + if (xchk_iscan_aborted(&rr->iscan)) + return -EFSCORRUPTED; + + trace_xrep_rtrmap_found(sc->mp, &rmap); + + /* Add entry to in-memory btree. */ + mutex_lock(&rr->lock); + mcur = xfs_rtrmapbt_mem_cursor(sc->sr.rtg, sc->tp, &rr->rtrmap_btree); + error = xfs_rmap_map_raw(mcur, &rmap); + xfs_btree_del_cursor(mcur, error); + if (error) + goto out_cancel; + + error = xfbtree_trans_commit(&rr->rtrmap_btree, sc->tp); + if (error) + goto out_abort; + + mutex_unlock(&rr->lock); + return 0; + +out_cancel: + xfbtree_trans_cancel(&rr->rtrmap_btree, sc->tp); +out_abort: + xchk_iscan_abort(&rr->iscan); + mutex_unlock(&rr->lock); + return error; +} + +/* Finding all file and bmbt extents. */ + +/* Context for accumulating rmaps for an inode fork. */ +struct xrep_rtrmap_ifork { + /* + * Accumulate rmap data here to turn multiple adjacent bmaps into a + * single rmap. + */ + struct xfs_rmap_irec accum; + + struct xrep_rtrmap *rr; +}; + +/* Stash an rmap that we accumulated while walking an inode fork. */ +STATIC int +xrep_rtrmap_stash_accumulated( + struct xrep_rtrmap_ifork *rf) +{ + if (rf->accum.rm_blockcount == 0) + return 0; + + return xrep_rtrmap_stash(rf->rr, rf->accum.rm_startblock, + rf->accum.rm_blockcount, rf->accum.rm_owner, + rf->accum.rm_offset, rf->accum.rm_flags); +} + +/* Accumulate a bmbt record. */ +STATIC int +xrep_rtrmap_visit_bmbt( + struct xfs_btree_cur *cur, + struct xfs_bmbt_irec *rec, + void *priv) +{ + struct xrep_rtrmap_ifork *rf = priv; + struct xfs_rmap_irec *accum = &rf->accum; + struct xfs_mount *mp = rf->rr->sc->mp; + xfs_rgblock_t rgbno; + unsigned int rmap_flags = 0; + int error; + + if (xfs_rtb_to_rgno(mp, rec->br_startblock) != + rtg_rgno(rf->rr->sc->sr.rtg)) + return 0; + + if (rec->br_state == XFS_EXT_UNWRITTEN) + rmap_flags |= XFS_RMAP_UNWRITTEN; + + /* If this bmap is adjacent to the previous one, just add it. */ + rgbno = xfs_rtb_to_rgbno(mp, rec->br_startblock); + if (accum->rm_blockcount > 0 && + rec->br_startoff == accum->rm_offset + accum->rm_blockcount && + rgbno == accum->rm_startblock + accum->rm_blockcount && + rmap_flags == accum->rm_flags) { + accum->rm_blockcount += rec->br_blockcount; + return 0; + } + + /* Otherwise stash the old rmap and start accumulating a new one. */ + error = xrep_rtrmap_stash_accumulated(rf); + if (error) + return error; + + accum->rm_startblock = rgbno; + accum->rm_blockcount = rec->br_blockcount; + accum->rm_offset = rec->br_startoff; + accum->rm_flags = rmap_flags; + return 0; +} + +/* + * Iterate the block mapping btree to collect rmap records for anything in this + * fork that maps to the rt volume. Sets @mappings_done to true if we've + * scanned the block mappings in this fork. + */ +STATIC int +xrep_rtrmap_scan_bmbt( + struct xrep_rtrmap_ifork *rf, + struct xfs_inode *ip, + bool *mappings_done) +{ + struct xrep_rtrmap *rr = rf->rr; + struct xfs_btree_cur *cur; + struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK); + int error = 0; + + *mappings_done = false; + + /* + * If the incore extent cache is already loaded, we'll just use the + * incore extent scanner to record mappings. Don't bother walking the + * ondisk extent tree. + */ + if (!xfs_need_iread_extents(ifp)) + return 0; + + /* Accumulate all the mappings in the bmap btree. */ + cur = xfs_bmbt_init_cursor(rr->sc->mp, rr->sc->tp, ip, XFS_DATA_FORK); + error = xfs_bmap_query_all(cur, xrep_rtrmap_visit_bmbt, rf); + xfs_btree_del_cursor(cur, error); + if (error) + return error; + + /* Stash any remaining accumulated rmaps and exit. */ + *mappings_done = true; + return xrep_rtrmap_stash_accumulated(rf); +} + +/* + * Iterate the in-core extent cache to collect rmap records for anything in + * this fork that matches the AG. + */ +STATIC int +xrep_rtrmap_scan_iext( + struct xrep_rtrmap_ifork *rf, + struct xfs_ifork *ifp) +{ + struct xfs_bmbt_irec rec; + struct xfs_iext_cursor icur; + int error; + + for_each_xfs_iext(ifp, &icur, &rec) { + if (isnullstartblock(rec.br_startblock)) + continue; + error = xrep_rtrmap_visit_bmbt(NULL, &rec, rf); + if (error) + return error; + } + + return xrep_rtrmap_stash_accumulated(rf); +} + +/* Find all the extents on the realtime device mapped by an inode fork. */ +STATIC int +xrep_rtrmap_scan_dfork( + struct xrep_rtrmap *rr, + struct xfs_inode *ip) +{ + struct xrep_rtrmap_ifork rf = { + .accum = { .rm_owner = ip->i_ino, }, + .rr = rr, + }; + struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK); + int error = 0; + + if (ifp->if_format == XFS_DINODE_FMT_BTREE) { + bool mappings_done; + + /* + * Scan the bmbt for mappings. If the incore extent tree is + * loaded, we want to scan the cached mappings since that's + * faster when the extent counts are very high. + */ + error = xrep_rtrmap_scan_bmbt(&rf, ip, &mappings_done); + if (error || mappings_done) + return error; + } else if (ifp->if_format != XFS_DINODE_FMT_EXTENTS) { + /* realtime data forks should only be extents or btree */ + return -EFSCORRUPTED; + } + + /* Scan incore extent cache. */ + return xrep_rtrmap_scan_iext(&rf, ifp); +} + +/* Record reverse mappings for a file. */ +STATIC int +xrep_rtrmap_scan_inode( + struct xrep_rtrmap *rr, + struct xfs_inode *ip) +{ + unsigned int lock_mode; + int error = 0; + + /* Skip the rt rmap btree inode. */ + if (rr->sc->ip == ip) + return 0; + + lock_mode = xfs_ilock_data_map_shared(ip); + + /* Check the data fork if it's on the realtime device. */ + if (XFS_IS_REALTIME_INODE(ip)) { + error = xrep_rtrmap_scan_dfork(rr, ip); + if (error) + goto out_unlock; + } + + xchk_iscan_mark_visited(&rr->iscan, ip); +out_unlock: + xfs_iunlock(ip, lock_mode); + return error; +} + +/* Record extents that belong to the realtime rmap inode. */ +STATIC int +xrep_rtrmap_walk_rmap( + struct xfs_btree_cur *cur, + const struct xfs_rmap_irec *rec, + void *priv) +{ + struct xrep_rtrmap *rr = priv; + int error = 0; + + if (xchk_should_terminate(rr->sc, &error)) + return error; + + /* Skip extents which are not owned by this inode and fork. */ + if (rec->rm_owner != rr->sc->ip->i_ino) + return 0; + + error = xrep_check_ino_btree_mapping(rr->sc, rec); + if (error) + return error; + + return xfsb_bitmap_set(&rr->old_rtrmapbt_blocks, + xfs_gbno_to_fsb(cur->bc_group, rec->rm_startblock), + rec->rm_blockcount); +} + +/* Scan one AG for reverse mappings for the realtime rmap btree. */ +STATIC int +xrep_rtrmap_scan_ag( + struct xrep_rtrmap *rr, + struct xfs_perag *pag) +{ + struct xfs_scrub *sc = rr->sc; + int error; + + error = xrep_ag_init(sc, pag, &sc->sa); + if (error) + return error; + + error = xfs_rmap_query_all(sc->sa.rmap_cur, xrep_rtrmap_walk_rmap, rr); + xchk_ag_free(sc, &sc->sa); + return error; +} + +struct xrep_rtrmap_stash_run { + struct xrep_rtrmap *rr; + uint64_t owner; +}; + +static int +xrep_rtrmap_stash_run( + uint32_t start, + uint32_t len, + void *priv) +{ + struct xrep_rtrmap_stash_run *rsr = priv; + struct xrep_rtrmap *rr = rsr->rr; + xfs_rgblock_t rgbno = start; + + return xrep_rtrmap_stash(rr, rgbno, len, rsr->owner, 0, 0); +} + +/* + * Emit rmaps for every extent of bits set in the bitmap. Caller must ensure + * that the ranges are in units of FS blocks. + */ +STATIC int +xrep_rtrmap_stash_bitmap( + struct xrep_rtrmap *rr, + struct xrgb_bitmap *bitmap, + const struct xfs_owner_info *oinfo) +{ + struct xrep_rtrmap_stash_run rsr = { + .rr = rr, + .owner = oinfo->oi_owner, + }; + + return xrgb_bitmap_walk(bitmap, xrep_rtrmap_stash_run, &rsr); +} + +/* Record a CoW staging extent. */ +STATIC int +xrep_rtrmap_walk_cowblocks( + struct xfs_btree_cur *cur, + const struct xfs_refcount_irec *irec, + void *priv) +{ + struct xrgb_bitmap *bitmap = priv; + + if (!xfs_refcount_check_domain(irec) || + irec->rc_domain != XFS_REFC_DOMAIN_COW) + return -EFSCORRUPTED; + + return xrgb_bitmap_set(bitmap, irec->rc_startblock, + irec->rc_blockcount); +} + +/* + * Collect rmaps for the blocks containing the refcount btree, and all CoW + * staging extents. + */ +STATIC int +xrep_rtrmap_find_refcount_rmaps( + struct xrep_rtrmap *rr) +{ + struct xrgb_bitmap cow_blocks; /* COWBIT */ + struct xfs_refcount_irec low = { + .rc_startblock = 0, + .rc_domain = XFS_REFC_DOMAIN_COW, + }; + struct xfs_refcount_irec high = { + .rc_startblock = -1U, + .rc_domain = XFS_REFC_DOMAIN_COW, + }; + struct xfs_scrub *sc = rr->sc; + int error; + + if (!xfs_has_rtreflink(sc->mp)) + return 0; + + xrgb_bitmap_init(&cow_blocks); + + /* Collect rmaps for CoW staging extents. */ + error = xfs_refcount_query_range(sc->sr.refc_cur, &low, &high, + xrep_rtrmap_walk_cowblocks, &cow_blocks); + if (error) + goto out_bitmap; + + /* Generate rmaps for everything. */ + error = xrep_rtrmap_stash_bitmap(rr, &cow_blocks, &XFS_RMAP_OINFO_COW); + if (error) + goto out_bitmap; + +out_bitmap: + xrgb_bitmap_destroy(&cow_blocks); + return error; +} + +/* Count and check all collected records. */ +STATIC int +xrep_rtrmap_check_record( + struct xfs_btree_cur *cur, + const struct xfs_rmap_irec *rec, + void *priv) +{ + struct xrep_rtrmap *rr = priv; + int error; + + error = xrep_rtrmap_check_mapping(rr->sc, rec); + if (error) + return error; + + rr->nr_records++; + return 0; +} + +/* Generate all the reverse-mappings for the realtime device. */ +STATIC int +xrep_rtrmap_find_rmaps( + struct xrep_rtrmap *rr) +{ + struct xfs_scrub *sc = rr->sc; + struct xfs_perag *pag = NULL; + struct xfs_inode *ip; + struct xfs_btree_cur *mcur; + int error; + + /* Generate rmaps for the realtime superblock */ + if (xfs_has_rtsb(sc->mp) && rtg_rgno(rr->sc->sr.rtg) == 0) { + error = xrep_rtrmap_stash(rr, 0, sc->mp->m_sb.sb_rextsize, + XFS_RMAP_OWN_FS, 0, 0); + if (error) + return error; + } + + /* Find CoW staging extents. */ + xrep_rtgroup_btcur_init(sc, &sc->sr); + error = xrep_rtrmap_find_refcount_rmaps(rr); + xchk_rtgroup_btcur_free(&sc->sr); + if (error) + return error; + + /* + * Set up for a potentially lengthy filesystem scan by reducing our + * transaction resource usage for the duration. Specifically: + * + * Unlock the realtime metadata inodes and cancel the transaction to + * release the log grant space while we scan the filesystem. + * + * Create a new empty transaction to eliminate the possibility of the + * inode scan deadlocking on cyclical metadata. + * + * We pass the empty transaction to the file scanning function to avoid + * repeatedly cycling empty transactions. This can be done even though + * we take the IOLOCK to quiesce the file because empty transactions + * do not take sb_internal. + */ + xchk_trans_cancel(sc); + xchk_rtgroup_unlock(&sc->sr); + error = xchk_trans_alloc_empty(sc); + if (error) + return error; + + while ((error = xchk_iscan_iter(&rr->iscan, &ip)) == 1) { + error = xrep_rtrmap_scan_inode(rr, ip); + xchk_irele(sc, ip); + if (error) + break; + + if (xchk_should_terminate(sc, &error)) + break; + } + xchk_iscan_iter_finish(&rr->iscan); + if (error) + return error; + + /* + * Switch out for a real transaction and lock the RT metadata in + * preparation for building a new tree. + */ + xchk_trans_cancel(sc); + error = xchk_setup_rt(sc); + if (error) + return error; + error = xchk_rtgroup_lock(sc, &sc->sr, XCHK_RTGLOCK_ALL); + if (error) + return error; + + /* + * If a hook failed to update the in-memory btree, we lack the data to + * continue the repair. + */ + if (xchk_iscan_aborted(&rr->iscan)) + return -EFSCORRUPTED; + + /* Scan for old rtrmap blocks. */ + while ((pag = xfs_perag_next(sc->mp, pag))) { + error = xrep_rtrmap_scan_ag(rr, pag); + if (error) { + xfs_perag_rele(pag); + return error; + } + } + + /* + * Now that we have everything locked again, we need to count the + * number of rmap records stashed in the btree. This should reflect + * all actively-owned rt files in the filesystem. At the same time, + * check all our records before we start building a new btree, which + * requires the rtbitmap lock. + */ + mcur = xfs_rtrmapbt_mem_cursor(rr->sc->sr.rtg, NULL, &rr->rtrmap_btree); + rr->nr_records = 0; + error = xfs_rmap_query_all(mcur, xrep_rtrmap_check_record, rr); + xfs_btree_del_cursor(mcur, error); + + return error; +} + +/* Building the new rtrmap btree. */ + +/* Retrieve rtrmapbt data for bulk load. */ +STATIC int +xrep_rtrmap_get_records( + struct xfs_btree_cur *cur, + unsigned int idx, + struct xfs_btree_block *block, + unsigned int nr_wanted, + void *priv) +{ + struct xrep_rtrmap *rr = priv; + union xfs_btree_rec *block_rec; + unsigned int loaded; + int error; + + for (loaded = 0; loaded < nr_wanted; loaded++, idx++) { + int stat = 0; + + error = xfs_btree_increment(rr->mcur, 0, &stat); + if (error) + return error; + if (!stat) + return -EFSCORRUPTED; + + error = xfs_rmap_get_rec(rr->mcur, &cur->bc_rec.r, &stat); + if (error) + return error; + if (!stat) + return -EFSCORRUPTED; + + block_rec = xfs_btree_rec_addr(cur, idx, block); + cur->bc_ops->init_rec_from_cur(cur, block_rec); + } + + return loaded; +} + +/* Feed one of the new btree blocks to the bulk loader. */ +STATIC int +xrep_rtrmap_claim_block( + struct xfs_btree_cur *cur, + union xfs_btree_ptr *ptr, + void *priv) +{ + struct xrep_rtrmap *rr = priv; + + return xrep_newbt_claim_block(cur, &rr->new_btree, ptr); +} + +/* Figure out how much space we need to create the incore btree root block. */ +STATIC size_t +xrep_rtrmap_iroot_size( + struct xfs_btree_cur *cur, + unsigned int level, + unsigned int nr_this_level, + void *priv) +{ + return xfs_rtrmap_broot_space_calc(cur->bc_mp, level, nr_this_level); +} + +/* + * Use the collected rmap information to stage a new rmap btree. If this is + * successful we'll return with the new btree root information logged to the + * repair transaction but not yet committed. This implements section (III) + * above. + */ +STATIC int +xrep_rtrmap_build_new_tree( + struct xrep_rtrmap *rr) +{ + struct xfs_scrub *sc = rr->sc; + struct xfs_rtgroup *rtg = sc->sr.rtg; + struct xfs_btree_cur *rmap_cur; + int error; + + /* + * Prepare to construct the new btree by reserving disk space for the + * new btree and setting up all the accounting information we'll need + * to root the new btree while it's under construction and before we + * attach it to the realtime rmapbt inode. + */ + error = xrep_newbt_init_metadir_inode(&rr->new_btree, sc); + if (error) + return error; + + rr->new_btree.bload.get_records = xrep_rtrmap_get_records; + rr->new_btree.bload.claim_block = xrep_rtrmap_claim_block; + rr->new_btree.bload.iroot_size = xrep_rtrmap_iroot_size; + + rmap_cur = xfs_rtrmapbt_init_cursor(NULL, rtg); + xfs_btree_stage_ifakeroot(rmap_cur, &rr->new_btree.ifake); + + /* Compute how many blocks we'll need for the rmaps collected. */ + error = xfs_btree_bload_compute_geometry(rmap_cur, + &rr->new_btree.bload, rr->nr_records); + if (error) + goto err_cur; + + /* Last chance to abort before we start committing fixes. */ + if (xchk_should_terminate(sc, &error)) + goto err_cur; + + /* + * Guess how many blocks we're going to need to rebuild an entire + * rtrmapbt from the number of extents we found, and pump up our + * transaction to have sufficient block reservation. We're allowed + * to exceed quota to repair inconsistent metadata, though this is + * unlikely. + */ + error = xfs_trans_reserve_more_inode(sc->tp, rtg_rmap(rtg), + rr->new_btree.bload.nr_blocks, 0, true); + if (error) + goto err_cur; + + /* Reserve the space we'll need for the new btree. */ + error = xrep_newbt_alloc_blocks(&rr->new_btree, + rr->new_btree.bload.nr_blocks); + if (error) + goto err_cur; + + /* + * Create a cursor to the in-memory btree so that we can bulk load the + * new btree. + */ + rr->mcur = xfs_rtrmapbt_mem_cursor(sc->sr.rtg, NULL, &rr->rtrmap_btree); + error = xfs_btree_goto_left_edge(rr->mcur); + if (error) + goto err_mcur; + + /* Add all observed rmap records. */ + rr->new_btree.ifake.if_fork->if_format = XFS_DINODE_FMT_META_BTREE; + error = xfs_btree_bload(rmap_cur, &rr->new_btree.bload, rr); + if (error) + goto err_mcur; + + /* + * Install the new rtrmap btree in the inode. After this point the old + * btree is no longer accessible, the new tree is live, and we can + * delete the cursor. + */ + xfs_rtrmapbt_commit_staged_btree(rmap_cur, sc->tp); + xrep_inode_set_nblocks(rr->sc, rr->new_btree.ifake.if_blocks); + xfs_btree_del_cursor(rmap_cur, 0); + xfs_btree_del_cursor(rr->mcur, 0); + rr->mcur = NULL; + + /* + * Now that we've written the new btree to disk, we don't need to keep + * updating the in-memory btree. Abort the scan to stop live updates. + */ + xchk_iscan_abort(&rr->iscan); + + /* Dispose of any unused blocks and the accounting information. */ + error = xrep_newbt_commit(&rr->new_btree); + if (error) + return error; + + return xrep_roll_trans(sc); + +err_mcur: + xfs_btree_del_cursor(rr->mcur, error); +err_cur: + xfs_btree_del_cursor(rmap_cur, error); + xrep_newbt_cancel(&rr->new_btree); + return error; +} + +/* Reaping the old btree. */ + +/* Reap the old rtrmapbt blocks. */ +STATIC int +xrep_rtrmap_remove_old_tree( + struct xrep_rtrmap *rr) +{ + int error; + + /* + * Free all the extents that were allocated to the former rtrmapbt and + * aren't cross-linked with something else. + */ + error = xrep_reap_metadir_fsblocks(rr->sc, &rr->old_rtrmapbt_blocks); + if (error) + return error; + + /* + * Ensure the proper reservation for the rtrmap inode so that we don't + * fail to expand the new btree. + */ + return xrep_reset_metafile_resv(rr->sc); +} + +static inline bool +xrep_rtrmapbt_want_live_update( + struct xchk_iscan *iscan, + const struct xfs_owner_info *oi) +{ + if (xchk_iscan_aborted(iscan)) + return false; + + /* + * We scanned the CoW staging extents before we started the iscan, so + * we need all the updates. + */ + if (XFS_RMAP_NON_INODE_OWNER(oi->oi_owner)) + return true; + + /* Ignore updates to files that the scanner hasn't visited yet. */ + return xchk_iscan_want_live_update(iscan, oi->oi_owner); +} + +/* + * Apply a rtrmapbt update from the regular filesystem into our shadow btree. + * We're running from the thread that owns the rtrmap ILOCK and is generating + * the update, so we must be careful about which parts of the struct + * xrep_rtrmap that we change. + */ +static int +xrep_rtrmapbt_live_update( + struct notifier_block *nb, + unsigned long action, + void *data) +{ + struct xfs_rmap_update_params *p = data; + struct xrep_rtrmap *rr; + struct xfs_mount *mp; + struct xfs_btree_cur *mcur; + struct xfs_trans *tp; + void *txcookie; + int error; + + rr = container_of(nb, struct xrep_rtrmap, rhook.rmap_hook.nb); + mp = rr->sc->mp; + + if (!xrep_rtrmapbt_want_live_update(&rr->iscan, &p->oinfo)) + goto out_unlock; + + trace_xrep_rmap_live_update(rtg_group(rr->sc->sr.rtg), action, p); + + error = xrep_trans_alloc_hook_dummy(mp, &txcookie, &tp); + if (error) + goto out_abort; + + mutex_lock(&rr->lock); + mcur = xfs_rtrmapbt_mem_cursor(rr->sc->sr.rtg, tp, &rr->rtrmap_btree); + error = __xfs_rmap_finish_intent(mcur, action, p->startblock, + p->blockcount, &p->oinfo, p->unwritten); + xfs_btree_del_cursor(mcur, error); + if (error) + goto out_cancel; + + error = xfbtree_trans_commit(&rr->rtrmap_btree, tp); + if (error) + goto out_cancel; + + xrep_trans_cancel_hook_dummy(&txcookie, tp); + mutex_unlock(&rr->lock); + return NOTIFY_DONE; + +out_cancel: + xfbtree_trans_cancel(&rr->rtrmap_btree, tp); + xrep_trans_cancel_hook_dummy(&txcookie, tp); +out_abort: + xchk_iscan_abort(&rr->iscan); + mutex_unlock(&rr->lock); +out_unlock: + return NOTIFY_DONE; +} + +/* Set up the filesystem scan components. */ +STATIC int +xrep_rtrmap_setup_scan( + struct xrep_rtrmap *rr) +{ + struct xfs_scrub *sc = rr->sc; + int error; + + mutex_init(&rr->lock); + xfsb_bitmap_init(&rr->old_rtrmapbt_blocks); + + /* Set up some storage */ + error = xfs_rtrmapbt_mem_init(sc->mp, &rr->rtrmap_btree, sc->xmbtp, + rtg_rgno(sc->sr.rtg)); + if (error) + goto out_bitmap; + + /* Retry iget every tenth of a second for up to 30 seconds. */ + xchk_iscan_start(sc, 30000, 100, &rr->iscan); + + /* + * Hook into live rtrmap operations so that we can update our in-memory + * btree to reflect live changes on the filesystem. Since we drop the + * rtrmap ILOCK to scan all the inodes, we need this piece to avoid + * installing a stale btree. + */ + ASSERT(sc->flags & XCHK_FSGATES_RMAP); + xfs_rmap_hook_setup(&rr->rhook, xrep_rtrmapbt_live_update); + error = xfs_rmap_hook_add(rtg_group(sc->sr.rtg), &rr->rhook); + if (error) + goto out_iscan; + return 0; + +out_iscan: + xchk_iscan_teardown(&rr->iscan); + xfbtree_destroy(&rr->rtrmap_btree); +out_bitmap: + xfsb_bitmap_destroy(&rr->old_rtrmapbt_blocks); + mutex_destroy(&rr->lock); + return error; +} + +/* Tear down scan components. */ +STATIC void +xrep_rtrmap_teardown( + struct xrep_rtrmap *rr) +{ + struct xfs_scrub *sc = rr->sc; + + xchk_iscan_abort(&rr->iscan); + xfs_rmap_hook_del(rtg_group(sc->sr.rtg), &rr->rhook); + xchk_iscan_teardown(&rr->iscan); + xfbtree_destroy(&rr->rtrmap_btree); + xfsb_bitmap_destroy(&rr->old_rtrmapbt_blocks); + mutex_destroy(&rr->lock); +} + +/* Repair the realtime rmap btree. */ +int +xrep_rtrmapbt( + struct xfs_scrub *sc) +{ + struct xrep_rtrmap *rr = sc->buf; + int error; + + /* Make sure any problems with the fork are fixed. */ + error = xrep_metadata_inode_forks(sc); + if (error) + return error; + + error = xrep_rtrmap_setup_scan(rr); + if (error) + return error; + + /* Collect rmaps for realtime files. */ + error = xrep_rtrmap_find_rmaps(rr); + if (error) + goto out_records; + + xfs_trans_ijoin(sc->tp, sc->ip, 0); + + /* Rebuild the rtrmap information. */ + error = xrep_rtrmap_build_new_tree(rr); + if (error) + goto out_records; + + /* Kill the old tree. */ + error = xrep_rtrmap_remove_old_tree(rr); + if (error) + goto out_records; + +out_records: + xrep_rtrmap_teardown(rr); + return error; +} diff --git a/fs/xfs/scrub/rtsummary.c b/fs/xfs/scrub/rtsummary.c index 49fc6250bafc..4ac679c1bd29 100644 --- a/fs/xfs/scrub/rtsummary.c +++ b/fs/xfs/scrub/rtsummary.c @@ -81,8 +81,7 @@ xchk_setup_rtsummary( if (error) return error; - error = xchk_install_live_inode(sc, - sc->sr.rtg->rtg_inodes[XFS_RTGI_SUMMARY]); + error = xchk_install_live_inode(sc, rtg_summary(sc->sr.rtg)); if (error) return error; @@ -90,6 +89,10 @@ xchk_setup_rtsummary( if (error) return error; + error = xchk_rtgroup_lock(sc, &sc->sr, XFS_RTGLOCK_BITMAP); + if (error) + return error; + /* * Now that we've locked the rtbitmap and rtsummary, we can't race with * growfsrt trying to expand the summary or change the size of the rt @@ -100,7 +103,6 @@ xchk_setup_rtsummary( * exclusively here. If we ever start caring about running concurrent * fsmap with scrub this could be changed. */ - xchk_rtgroup_lock(&sc->sr, XFS_RTGLOCK_BITMAP); if (mp->m_sb.sb_rblocks) { rts->rextents = xfs_blen_to_rtbxlen(mp, mp->m_sb.sb_rblocks); rts->rbmblocks = xfs_rtbitmap_blockcount(mp); @@ -191,8 +193,7 @@ xchk_rtsum_record_free( rtlen = xfs_rtxlen_to_extlen(mp, rec->ar_extcount); if (!xfs_verify_rtbext(mp, rtbno, rtlen)) { - xchk_ino_xref_set_corrupt(sc, - rtg->rtg_inodes[XFS_RTGI_BITMAP]->i_ino); + xchk_ino_xref_set_corrupt(sc, rtg_bitmap(rtg)->i_ino); return -EFSCORRUPTED; } @@ -218,7 +219,7 @@ xchk_rtsum_compute( /* If the bitmap size doesn't match the computed size, bail. */ if (XFS_FSB_TO_B(mp, xfs_rtbitmap_blockcount(mp)) != - rtg->rtg_inodes[XFS_RTGI_BITMAP]->i_disk_size) + rtg_bitmap(rtg)->i_disk_size) return -EFSCORRUPTED; return xfs_rtalloc_query_all(rtg, sc->tp, xchk_rtsum_record_free, sc); @@ -310,8 +311,8 @@ xchk_rtsummary( { struct xfs_mount *mp = sc->mp; struct xfs_rtgroup *rtg = sc->sr.rtg; - struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP]; - struct xfs_inode *rsumip = rtg->rtg_inodes[XFS_RTGI_SUMMARY]; + struct xfs_inode *rbmip = rtg_bitmap(rtg); + struct xfs_inode *rsumip = rtg_summary(rtg); struct xchk_rtsummary *rts = sc->buf; int error; diff --git a/fs/xfs/scrub/rtsummary_repair.c b/fs/xfs/scrub/rtsummary_repair.c index 8198ea84ad70..d593977d70df 100644 --- a/fs/xfs/scrub/rtsummary_repair.c +++ b/fs/xfs/scrub/rtsummary_repair.c @@ -165,7 +165,8 @@ xrep_rtsummary( * Now exchange the contents. Nothing in repair uses the temporary * buffer, so we can reuse it for the tempfile exchrange information. */ - error = xrep_tempexch_trans_reserve(sc, XFS_DATA_FORK, &rts->tempexch); + error = xrep_tempexch_trans_reserve(sc, XFS_DATA_FORK, 0, + rts->rsumblocks, &rts->tempexch); if (error) return error; diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c index 950f5a58dcd9..7567dd5cad14 100644 --- a/fs/xfs/scrub/scrub.c +++ b/fs/xfs/scrub/scrub.c @@ -164,7 +164,7 @@ xchk_fsgates_disable( trace_xchk_fsgates_disable(sc, sc->flags & XCHK_FSGATES_ALL); if (sc->flags & XCHK_FSGATES_DRAIN) - xfs_drain_wait_disable(); + xfs_defer_drain_wait_disable(); if (sc->flags & XCHK_FSGATES_QUOTA) xfs_dqtrx_hook_disable(); @@ -218,6 +218,8 @@ xchk_teardown( int error) { xchk_ag_free(sc, &sc->sa); + xchk_rtgroup_btcur_free(&sc->sr); + if (sc->tp) { if (error == 0 && (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)) error = xfs_trans_commit(sc->tp); @@ -458,6 +460,20 @@ static const struct xchk_meta_ops meta_scrub_ops[] = { .has = xfs_has_rtsb, .repair = xrep_rgsuperblock, }, + [XFS_SCRUB_TYPE_RTRMAPBT] = { /* realtime group rmapbt */ + .type = ST_RTGROUP, + .setup = xchk_setup_rtrmapbt, + .scrub = xchk_rtrmapbt, + .has = xfs_has_rtrmapbt, + .repair = xrep_rtrmapbt, + }, + [XFS_SCRUB_TYPE_RTREFCBT] = { /* realtime refcountbt */ + .type = ST_RTGROUP, + .setup = xchk_setup_rtrefcountbt, + .scrub = xchk_rtrefcountbt, + .has = xfs_has_rtreflink, + .repair = xrep_rtrefcountbt, + }, }; static int diff --git a/fs/xfs/scrub/scrub.h b/fs/xfs/scrub/scrub.h index 5dbbe93cb49b..a3f1abc91390 100644 --- a/fs/xfs/scrub/scrub.h +++ b/fs/xfs/scrub/scrub.h @@ -96,7 +96,7 @@ struct xchk_meta_ops { int (*repair_eval)(struct xfs_scrub *sc); /* Decide if we even have this piece of metadata. */ - bool (*has)(struct xfs_mount *); + bool (*has)(const struct xfs_mount *); /* type describing required/allowed inputs */ enum xchk_type type; @@ -126,6 +126,10 @@ struct xchk_rt { /* XFS_RTGLOCK_* lock state if locked */ unsigned int rtlock_flags; + + /* rtgroup btrees */ + struct xfs_btree_cur *rmap_cur; + struct xfs_btree_cur *refc_cur; }; struct xfs_scrub { @@ -280,10 +284,14 @@ int xchk_metapath(struct xfs_scrub *sc); int xchk_rtbitmap(struct xfs_scrub *sc); int xchk_rtsummary(struct xfs_scrub *sc); int xchk_rgsuperblock(struct xfs_scrub *sc); +int xchk_rtrmapbt(struct xfs_scrub *sc); +int xchk_rtrefcountbt(struct xfs_scrub *sc); #else # define xchk_rtbitmap xchk_nothing # define xchk_rtsummary xchk_nothing # define xchk_rgsuperblock xchk_nothing +# define xchk_rtrmapbt xchk_nothing +# define xchk_rtrefcountbt xchk_nothing #endif #ifdef CONFIG_XFS_QUOTA int xchk_quota(struct xfs_scrub *sc); @@ -317,8 +325,26 @@ void xchk_xref_is_not_cow_staging(struct xfs_scrub *sc, xfs_agblock_t bno, #ifdef CONFIG_XFS_RT void xchk_xref_is_used_rt_space(struct xfs_scrub *sc, xfs_rtblock_t rtbno, xfs_extlen_t len); +void xchk_xref_has_no_rt_owner(struct xfs_scrub *sc, xfs_rgblock_t rgbno, + xfs_extlen_t len); +void xchk_xref_has_rt_owner(struct xfs_scrub *sc, xfs_rgblock_t rgbno, + xfs_extlen_t len); +void xchk_xref_is_only_rt_owned_by(struct xfs_scrub *sc, xfs_rgblock_t rgbno, + xfs_extlen_t len, const struct xfs_owner_info *oinfo); +void xchk_xref_is_rt_cow_staging(struct xfs_scrub *sc, xfs_rgblock_t rgbno, + xfs_extlen_t len); +void xchk_xref_is_not_rt_shared(struct xfs_scrub *sc, xfs_rgblock_t rgbno, + xfs_extlen_t len); +void xchk_xref_is_not_rt_cow_staging(struct xfs_scrub *sc, xfs_rgblock_t rgbno, + xfs_extlen_t len); #else # define xchk_xref_is_used_rt_space(sc, rtbno, len) do { } while (0) +# define xchk_xref_has_no_rt_owner(sc, rtbno, len) do { } while (0) +# define xchk_xref_has_rt_owner(sc, rtbno, len) do { } while (0) +# define xchk_xref_is_only_rt_owned_by(sc, bno, len, oinfo) do { } while (0) +# define xchk_xref_is_rt_cow_staging(sc, bno, len) do { } while (0) +# define xchk_xref_is_not_rt_shared(sc, bno, len) do { } while (0) +# define xchk_xref_is_not_rt_cow_staging(sc, bno, len) do { } while (0) #endif #endif /* __XFS_SCRUB_SCRUB_H__ */ diff --git a/fs/xfs/scrub/stats.c b/fs/xfs/scrub/stats.c index a476c7b2ab75..f8a37ea97791 100644 --- a/fs/xfs/scrub/stats.c +++ b/fs/xfs/scrub/stats.c @@ -82,6 +82,8 @@ static const char *name_map[XFS_SCRUB_TYPE_NR] = { [XFS_SCRUB_TYPE_DIRTREE] = "dirtree", [XFS_SCRUB_TYPE_METAPATH] = "metapath", [XFS_SCRUB_TYPE_RGSUPER] = "rgsuper", + [XFS_SCRUB_TYPE_RTRMAPBT] = "rtrmapbt", + [XFS_SCRUB_TYPE_RTREFCBT] = "rtrefcountbt", }; /* Format the scrub stats into a text buffer, similar to pcp style. */ diff --git a/fs/xfs/scrub/tempexch.h b/fs/xfs/scrub/tempexch.h index 995ba187c5aa..eccda720c2ca 100644 --- a/fs/xfs/scrub/tempexch.h +++ b/fs/xfs/scrub/tempexch.h @@ -12,7 +12,7 @@ struct xrep_tempexch { }; int xrep_tempexch_trans_reserve(struct xfs_scrub *sc, int whichfork, - struct xrep_tempexch *ti); + xfs_fileoff_t off, xfs_filblks_t len, struct xrep_tempexch *ti); int xrep_tempexch_trans_alloc(struct xfs_scrub *sc, int whichfork, struct xrep_tempexch *ti); diff --git a/fs/xfs/scrub/tempfile.c b/fs/xfs/scrub/tempfile.c index 2d7ca7e1bbca..cf99e0ca51b0 100644 --- a/fs/xfs/scrub/tempfile.c +++ b/fs/xfs/scrub/tempfile.c @@ -606,6 +606,8 @@ STATIC int xrep_tempexch_prep_request( struct xfs_scrub *sc, int whichfork, + xfs_fileoff_t off, + xfs_filblks_t len, struct xrep_tempexch *tx) { struct xfs_exchmaps_req *req = &tx->req; @@ -629,18 +631,19 @@ xrep_tempexch_prep_request( /* Exchange all mappings in both forks. */ req->ip1 = sc->tempip; req->ip2 = sc->ip; - req->startoff1 = 0; - req->startoff2 = 0; + req->startoff1 = off; + req->startoff2 = off; switch (whichfork) { case XFS_ATTR_FORK: req->flags |= XFS_EXCHMAPS_ATTR_FORK; break; case XFS_DATA_FORK: - /* Always exchange sizes when exchanging data fork mappings. */ - req->flags |= XFS_EXCHMAPS_SET_SIZES; + /* Exchange sizes when exchanging all data fork mappings. */ + if (off == 0 && len == XFS_MAX_FILEOFF) + req->flags |= XFS_EXCHMAPS_SET_SIZES; break; } - req->blockcount = XFS_MAX_FILEOFF; + req->blockcount = len; return 0; } @@ -749,6 +752,7 @@ xrep_tempexch_reserve_quota( * or the two inodes have the same dquots. */ if (!XFS_IS_QUOTA_ON(tp->t_mountp) || req->ip1 == req->ip2 || + xfs_is_metadir_inode(req->ip1) || (req->ip1->i_udquot == req->ip2->i_udquot && req->ip1->i_gdquot == req->ip2->i_gdquot && req->ip1->i_pdquot == req->ip2->i_pdquot)) @@ -795,6 +799,8 @@ int xrep_tempexch_trans_reserve( struct xfs_scrub *sc, int whichfork, + xfs_fileoff_t off, + xfs_filblks_t len, struct xrep_tempexch *tx) { int error; @@ -803,7 +809,7 @@ xrep_tempexch_trans_reserve( xfs_assert_ilocked(sc->ip, XFS_ILOCK_EXCL); xfs_assert_ilocked(sc->tempip, XFS_ILOCK_EXCL); - error = xrep_tempexch_prep_request(sc, whichfork, tx); + error = xrep_tempexch_prep_request(sc, whichfork, off, len, tx); if (error) return error; @@ -841,7 +847,8 @@ xrep_tempexch_trans_alloc( ASSERT(sc->tp == NULL); ASSERT(xfs_has_exchange_range(sc->mp)); - error = xrep_tempexch_prep_request(sc, whichfork, tx); + error = xrep_tempexch_prep_request(sc, whichfork, 0, XFS_MAX_FILEOFF, + tx); if (error) return error; diff --git a/fs/xfs/scrub/trace.c b/fs/xfs/scrub/trace.c index 98f923ae664d..2450e214103f 100644 --- a/fs/xfs/scrub/trace.c +++ b/fs/xfs/scrub/trace.c @@ -21,6 +21,7 @@ #include "xfs_rmap.h" #include "xfs_parent.h" #include "xfs_metafile.h" +#include "xfs_rtgroup.h" #include "scrub/scrub.h" #include "scrub/xfile.h" #include "scrub/xfarray.h" diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h index d2ae7e93acb0..d7c4ced47c15 100644 --- a/fs/xfs/scrub/trace.h +++ b/fs/xfs/scrub/trace.h @@ -17,6 +17,7 @@ #include "xfs_bit.h" #include "xfs_quota_defs.h" +struct xfs_rtgroup; struct xfs_scrub; struct xfile; struct xfarray; @@ -40,6 +41,9 @@ struct xchk_dirtree_outcomes; TRACE_DEFINE_ENUM(XFS_REFC_DOMAIN_SHARED); TRACE_DEFINE_ENUM(XFS_REFC_DOMAIN_COW); +TRACE_DEFINE_ENUM(XG_TYPE_AG); +TRACE_DEFINE_ENUM(XG_TYPE_RTG); + TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_PROBE); TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_SB); TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_AGF); @@ -72,6 +76,8 @@ TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_DIRTREE); TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_BARRIER); TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_METAPATH); TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_RGSUPER); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_RTRMAPBT); +TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_RTREFCBT); #define XFS_SCRUB_TYPE_STRINGS \ { XFS_SCRUB_TYPE_PROBE, "probe" }, \ @@ -105,7 +111,9 @@ TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_RGSUPER); { XFS_SCRUB_TYPE_DIRTREE, "dirtree" }, \ { XFS_SCRUB_TYPE_BARRIER, "barrier" }, \ { XFS_SCRUB_TYPE_METAPATH, "metapath" }, \ - { XFS_SCRUB_TYPE_RGSUPER, "rgsuper" } + { XFS_SCRUB_TYPE_RGSUPER, "rgsuper" }, \ + { XFS_SCRUB_TYPE_RTRMAPBT, "rtrmapbt" }, \ + { XFS_SCRUB_TYPE_RTREFCBT, "rtrefcountbt" } #define XFS_SCRUB_FLAG_STRINGS \ { XFS_SCRUB_IFLAG_REPAIR, "repair" }, \ @@ -1956,32 +1964,36 @@ DEFINE_XCHK_METAPATH_EVENT(xchk_metapath_lookup); #if IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR) DECLARE_EVENT_CLASS(xrep_extent_class, - TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno, + TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno, xfs_extlen_t len), - TP_ARGS(pag, agbno, len), + TP_ARGS(xg, agbno, len), TP_STRUCT__entry( __field(dev_t, dev) + __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(xfs_agblock_t, agbno) __field(xfs_extlen_t, len) ), TP_fast_assign( - __entry->dev = pag_mount(pag)->m_super->s_dev; - __entry->agno = pag_agno(pag); + __entry->dev = xg->xg_mount->m_super->s_dev; + __entry->type = xg->xg_type; + __entry->agno = xg->xg_gno; __entry->agbno = agbno; __entry->len = len; ), - TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x", + TP_printk("dev %d:%d %sno 0x%x %sbno 0x%x fsbcount 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agbno, __entry->len) ); #define DEFINE_REPAIR_EXTENT_EVENT(name) \ DEFINE_EVENT(xrep_extent_class, name, \ - TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno, \ + TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno, \ xfs_extlen_t len), \ - TP_ARGS(pag, agbno, len)) + TP_ARGS(xg, agbno, len)) DEFINE_REPAIR_EXTENT_EVENT(xreap_dispose_unmap_extent); DEFINE_REPAIR_EXTENT_EVENT(xreap_dispose_free_extent); DEFINE_REPAIR_EXTENT_EVENT(xreap_agextent_binval); @@ -1989,35 +2001,39 @@ DEFINE_REPAIR_EXTENT_EVENT(xreap_bmapi_binval); DEFINE_REPAIR_EXTENT_EVENT(xrep_agfl_insert); DECLARE_EVENT_CLASS(xrep_reap_find_class, - TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno, + TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno, xfs_extlen_t len, bool crosslinked), - TP_ARGS(pag, agbno, len, crosslinked), + TP_ARGS(xg, agbno, len, crosslinked), TP_STRUCT__entry( __field(dev_t, dev) + __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(xfs_agblock_t, agbno) __field(xfs_extlen_t, len) __field(bool, crosslinked) ), TP_fast_assign( - __entry->dev = pag_mount(pag)->m_super->s_dev; - __entry->agno = pag_agno(pag); + __entry->dev = xg->xg_mount->m_super->s_dev; + __entry->type = xg->xg_type; + __entry->agno = xg->xg_gno; __entry->agbno = agbno; __entry->len = len; __entry->crosslinked = crosslinked; ), - TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x crosslinked %d", + TP_printk("dev %d:%d %sno 0x%x %sbno 0x%x fsbcount 0x%x crosslinked %d", MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agbno, __entry->len, __entry->crosslinked ? 1 : 0) ); #define DEFINE_REPAIR_REAP_FIND_EVENT(name) \ DEFINE_EVENT(xrep_reap_find_class, name, \ - TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno, \ + TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno, \ xfs_extlen_t len, bool crosslinked), \ - TP_ARGS(pag, agbno, len, crosslinked)) + TP_ARGS(xg, agbno, len, crosslinked)) DEFINE_REPAIR_REAP_FIND_EVENT(xreap_agextent_select); DEFINE_REPAIR_REAP_FIND_EVENT(xreap_bmapi_select); @@ -2108,29 +2124,33 @@ TRACE_EVENT(xrep_ibt_found, ) TRACE_EVENT(xrep_refc_found, - TP_PROTO(const struct xfs_perag *pag, + TP_PROTO(const struct xfs_group *xg, const struct xfs_refcount_irec *rec), - TP_ARGS(pag, rec), + TP_ARGS(xg, rec), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) __field(enum xfs_refc_domain, domain) + __field(enum xfs_group_type, type) __field(xfs_agblock_t, startblock) __field(xfs_extlen_t, blockcount) __field(xfs_nlink_t, refcount) ), TP_fast_assign( - __entry->dev = pag_mount(pag)->m_super->s_dev; - __entry->agno = pag_agno(pag); + __entry->dev = xg->xg_mount->m_super->s_dev; + __entry->agno = xg->xg_gno; + __entry->type = xg->xg_type; __entry->domain = rec->rc_domain; __entry->startblock = rec->rc_startblock; __entry->blockcount = rec->rc_blockcount; __entry->refcount = rec->rc_refcount; ), - TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u", + TP_printk("dev %d:%d %sno 0x%x dom %s %sbno 0x%x fsbcount 0x%x refcount %u", MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __print_symbolic(__entry->domain, XFS_REFC_DOMAIN_STRINGS), + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->startblock, __entry->blockcount, __entry->refcount) @@ -2282,6 +2302,32 @@ TRACE_EVENT(xrep_calc_ag_resblks_btsize, __entry->rmapbt_sz, __entry->refcbt_sz) ) + +#ifdef CONFIG_XFS_RT +TRACE_EVENT(xrep_calc_rtgroup_resblks_btsize, + TP_PROTO(struct xfs_mount *mp, xfs_rgnumber_t rgno, + xfs_rgblock_t usedlen, xfs_rgblock_t rmapbt_sz), + TP_ARGS(mp, rgno, usedlen, rmapbt_sz), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_rgnumber_t, rgno) + __field(xfs_rgblock_t, usedlen) + __field(xfs_rgblock_t, rmapbt_sz) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->rgno = rgno; + __entry->usedlen = usedlen; + __entry->rmapbt_sz = rmapbt_sz; + ), + TP_printk("dev %d:%d rgno 0x%x usedlen %u rmapbt %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->rgno, + __entry->usedlen, + __entry->rmapbt_sz) +); +#endif /* CONFIG_XFS_RT */ + TRACE_EVENT(xrep_reset_counters, TP_PROTO(struct xfs_mount *mp, struct xchk_fscounters *fsc), TP_ARGS(mp, fsc), @@ -2680,11 +2726,12 @@ DEFINE_SCRUB_NLINKS_DIFF_EVENT(xrep_nlinks_update_inode); DEFINE_SCRUB_NLINKS_DIFF_EVENT(xrep_nlinks_unfixable_inode); TRACE_EVENT(xrep_rmap_live_update, - TP_PROTO(const struct xfs_perag *pag, unsigned int op, + TP_PROTO(const struct xfs_group *xg, unsigned int op, const struct xfs_rmap_update_params *p), - TP_ARGS(pag, op, p), + TP_ARGS(xg, op, p), TP_STRUCT__entry( __field(dev_t, dev) + __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(unsigned int, op) __field(xfs_agblock_t, agbno) @@ -2694,8 +2741,9 @@ TRACE_EVENT(xrep_rmap_live_update, __field(unsigned int, flags) ), TP_fast_assign( - __entry->dev = pag_mount(pag)->m_super->s_dev; - __entry->agno = pag_agno(pag); + __entry->dev = xg->xg_mount->m_super->s_dev; + __entry->type = xg->xg_type; + __entry->agno = xg->xg_gno; __entry->op = op; __entry->agbno = p->startblock; __entry->len = p->blockcount; @@ -2704,10 +2752,12 @@ TRACE_EVENT(xrep_rmap_live_update, if (p->unwritten) __entry->flags |= XFS_RMAP_UNWRITTEN; ), - TP_printk("dev %d:%d agno 0x%x op %d agbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%x", + TP_printk("dev %d:%d %sno 0x%x op %d %sbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __entry->op, + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agbno, __entry->len, __entry->owner, @@ -3605,6 +3655,186 @@ DEFINE_XCHK_METAPATH_EVENT(xrep_metapath_try_unlink); DEFINE_XCHK_METAPATH_EVENT(xrep_metapath_unlink); DEFINE_XCHK_METAPATH_EVENT(xrep_metapath_link); +#ifdef CONFIG_XFS_RT +DECLARE_EVENT_CLASS(xrep_rtbitmap_class, + TP_PROTO(struct xfs_mount *mp, xfs_rtxnum_t start, xfs_rtxnum_t end), + TP_ARGS(mp, start, end), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(dev_t, rtdev) + __field(xfs_rtxnum_t, start) + __field(xfs_rtxnum_t, end) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->rtdev = mp->m_rtdev_targp->bt_dev; + __entry->start = start; + __entry->end = end; + ), + TP_printk("dev %d:%d rtdev %d:%d startrtx 0x%llx endrtx 0x%llx", + MAJOR(__entry->dev), MINOR(__entry->dev), + MAJOR(__entry->rtdev), MINOR(__entry->rtdev), + __entry->start, + __entry->end) +); +#define DEFINE_REPAIR_RGBITMAP_EVENT(name) \ +DEFINE_EVENT(xrep_rtbitmap_class, name, \ + TP_PROTO(struct xfs_mount *mp, xfs_rtxnum_t start, \ + xfs_rtxnum_t end), \ + TP_ARGS(mp, start, end)) +DEFINE_REPAIR_RGBITMAP_EVENT(xrep_rtbitmap_record_free); +DEFINE_REPAIR_RGBITMAP_EVENT(xrep_rtbitmap_record_free_bulk); + +TRACE_EVENT(xrep_rtbitmap_or, + TP_PROTO(struct xfs_mount *mp, unsigned long long wordoff, + xfs_rtword_t mask, xfs_rtword_t word), + TP_ARGS(mp, wordoff, mask, word), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(dev_t, rtdev) + __field(unsigned long long, wordoff) + __field(unsigned int, mask) + __field(unsigned int, word) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->rtdev = mp->m_rtdev_targp->bt_dev; + __entry->wordoff = wordoff; + __entry->mask = mask; + __entry->word = word; + ), + TP_printk("dev %d:%d rtdev %d:%d wordoff 0x%llx mask 0x%x word 0x%x", + MAJOR(__entry->dev), MINOR(__entry->dev), + MAJOR(__entry->rtdev), MINOR(__entry->rtdev), + __entry->wordoff, + __entry->mask, + __entry->word) +); + +TRACE_EVENT(xrep_rtbitmap_load, + TP_PROTO(struct xfs_rtgroup *rtg, xfs_fileoff_t rbmoff, + xfs_rtxnum_t rtx, xfs_rtxnum_t len), + TP_ARGS(rtg, rbmoff, rtx, len), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(dev_t, rtdev) + __field(xfs_rgnumber_t, rgno) + __field(xfs_fileoff_t, rbmoff) + __field(xfs_rtxnum_t, rtx) + __field(xfs_rtxnum_t, len) + ), + TP_fast_assign( + __entry->dev = rtg_mount(rtg)->m_super->s_dev; + __entry->rtdev = rtg_mount(rtg)->m_rtdev_targp->bt_dev; + __entry->rgno = rtg_rgno(rtg); + __entry->rbmoff = rbmoff; + __entry->rtx = rtx; + __entry->len = len; + ), + TP_printk("dev %d:%d rtdev %d:%d rgno 0x%x rbmoff 0x%llx rtx 0x%llx rtxcount 0x%llx", + MAJOR(__entry->dev), MINOR(__entry->dev), + MAJOR(__entry->rtdev), MINOR(__entry->rtdev), + __entry->rgno, + __entry->rbmoff, + __entry->rtx, + __entry->len) +); + +TRACE_EVENT(xrep_rtbitmap_load_words, + TP_PROTO(struct xfs_mount *mp, xfs_fileoff_t rbmoff, + unsigned long long wordoff, unsigned int wordcnt), + TP_ARGS(mp, rbmoff, wordoff, wordcnt), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(dev_t, rtdev) + __field(xfs_fileoff_t, rbmoff) + __field(unsigned long long, wordoff) + __field(unsigned int, wordcnt) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->rtdev = mp->m_rtdev_targp->bt_dev; + __entry->rbmoff = rbmoff; + __entry->wordoff = wordoff; + __entry->wordcnt = wordcnt; + ), + TP_printk("dev %d:%d rtdev %d:%d rbmoff 0x%llx wordoff 0x%llx wordcnt 0x%x", + MAJOR(__entry->dev), MINOR(__entry->dev), + MAJOR(__entry->rtdev), MINOR(__entry->rtdev), + __entry->rbmoff, + __entry->wordoff, + __entry->wordcnt) +); + +TRACE_EVENT(xrep_rtbitmap_load_word, + TP_PROTO(struct xfs_mount *mp, unsigned long long wordoff, + unsigned int bit, xfs_rtword_t ondisk_word, + xfs_rtword_t xfile_word, xfs_rtword_t word_mask), + TP_ARGS(mp, wordoff, bit, ondisk_word, xfile_word, word_mask), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(dev_t, rtdev) + __field(unsigned long long, wordoff) + __field(unsigned int, bit) + __field(xfs_rtword_t, ondisk_word) + __field(xfs_rtword_t, xfile_word) + __field(xfs_rtword_t, word_mask) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->rtdev = mp->m_rtdev_targp->bt_dev; + __entry->wordoff = wordoff; + __entry->bit = bit; + __entry->ondisk_word = ondisk_word; + __entry->xfile_word = xfile_word; + __entry->word_mask = word_mask; + ), + TP_printk("dev %d:%d rtdev %d:%d wordoff 0x%llx bit %u ondisk 0x%x(0x%x) inmem 0x%x(0x%x) result 0x%x mask 0x%x", + MAJOR(__entry->dev), MINOR(__entry->dev), + MAJOR(__entry->rtdev), MINOR(__entry->rtdev), + __entry->wordoff, + __entry->bit, + __entry->ondisk_word, + __entry->ondisk_word & __entry->word_mask, + __entry->xfile_word, + __entry->xfile_word & ~__entry->word_mask, + (__entry->xfile_word & ~__entry->word_mask) | + (__entry->ondisk_word & __entry->word_mask), + __entry->word_mask) +); + +TRACE_EVENT(xrep_rtrmap_found, + TP_PROTO(struct xfs_mount *mp, const struct xfs_rmap_irec *rec), + TP_ARGS(mp, rec), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(dev_t, rtdev) + __field(xfs_rgblock_t, rgbno) + __field(xfs_extlen_t, len) + __field(uint64_t, owner) + __field(uint64_t, offset) + __field(unsigned int, flags) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->rtdev = mp->m_rtdev_targp->bt_dev; + __entry->rgbno = rec->rm_startblock; + __entry->len = rec->rm_blockcount; + __entry->owner = rec->rm_owner; + __entry->offset = rec->rm_offset; + __entry->flags = rec->rm_flags; + ), + TP_printk("dev %d:%d rtdev %d:%d rgbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%x", + MAJOR(__entry->dev), MINOR(__entry->dev), + MAJOR(__entry->rtdev), MINOR(__entry->rtdev), + __entry->rgbno, + __entry->len, + __entry->owner, + __entry->offset, + __entry->flags) +); +#endif /* CONFIG_XFS_RT */ + #endif /* IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR) */ #endif /* _TRACE_XFS_SCRUB_TRACE_H */ diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 559a3a577097..67877c36ed11 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -131,7 +131,7 @@ xfs_end_ioend( error = xfs_iomap_write_unwritten(ip, offset, size, false); if (!error && xfs_ioend_is_append(ioend)) - error = xfs_setfilesize(ip, ioend->io_offset, ioend->io_size); + error = xfs_setfilesize(ip, offset, size); done: iomap_finish_ioends(ioend, error); memalloc_nofs_restore(nofs_flag); diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c index 24fb12986a56..319004bf089f 100644 --- a/fs/xfs/xfs_attr_inactive.c +++ b/fs/xfs/xfs_attr_inactive.c @@ -305,11 +305,6 @@ xfs_attr3_root_inactive( XFS_FSB_TO_BB(mp, mp->m_attr_geo->fsbcount), 0, &bp); if (error) return error; - error = bp->b_error; - if (error) { - xfs_trans_brelse(*trans, bp); - return error; - } xfs_trans_binval(*trans, bp); /* remove from cache */ /* * Commit the invalidate and start the next transaction. diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index aa63b8efd782..15bb790359f8 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -22,6 +22,7 @@ #include "xfs_error.h" #include "xfs_ag.h" #include "xfs_buf_mem.h" +#include "xfs_notify_failure.h" struct kmem_cache *xfs_buf_cache; @@ -40,8 +41,7 @@ struct kmem_cache *xfs_buf_cache; * * xfs_buf_rele: * b_lock - * pag_buf_lock - * lru_lock + * lru_lock * * xfs_buftarg_drain_rele * lru_lock @@ -52,14 +52,8 @@ struct kmem_cache *xfs_buf_cache; * b_lock (trylock due to inversion) */ -static int __xfs_buf_submit(struct xfs_buf *bp, bool wait); - -static inline int -xfs_buf_submit( - struct xfs_buf *bp) -{ - return __xfs_buf_submit(bp, !(bp->b_flags & XBF_ASYNC)); -} +static void xfs_buf_submit(struct xfs_buf *bp); +static int xfs_buf_iowait(struct xfs_buf *bp); static inline bool xfs_buf_is_uncached(struct xfs_buf *bp) { @@ -132,15 +126,6 @@ __xfs_buf_ioacct_dec( } } -static inline void -xfs_buf_ioacct_dec( - struct xfs_buf *bp) -{ - spin_lock(&bp->b_lock); - __xfs_buf_ioacct_dec(bp); - spin_unlock(&bp->b_lock); -} - /* * When we mark a buffer stale, we remove the buffer from the LRU and clear the * b_lru_ref count so that the buffer is freed immediately when the buffer @@ -176,9 +161,9 @@ xfs_buf_stale( atomic_set(&bp->b_lru_ref, 0); if (!(bp->b_state & XFS_BSTATE_DISPOSE) && (list_lru_del_obj(&bp->b_target->bt_lru, &bp->b_lru))) - atomic_dec(&bp->b_hold); + bp->b_hold--; - ASSERT(atomic_read(&bp->b_hold) >= 1); + ASSERT(bp->b_hold >= 1); spin_unlock(&bp->b_lock); } @@ -202,9 +187,6 @@ xfs_buf_get_maps( return 0; } -/* - * Frees b_pages if it was allocated. - */ static void xfs_buf_free_maps( struct xfs_buf *bp) @@ -237,23 +219,25 @@ _xfs_buf_alloc( */ flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD); - atomic_set(&bp->b_hold, 1); + /* + * A new buffer is held and locked by the owner. This ensures that the + * buffer is owned by the caller and racing RCU lookups right after + * inserting into the hash table are safe (and will have to wait for + * the unlock to do anything non-trivial). + */ + bp->b_hold = 1; + sema_init(&bp->b_sema, 0); /* held, no waiters */ + + spin_lock_init(&bp->b_lock); atomic_set(&bp->b_lru_ref, 1); init_completion(&bp->b_iowait); INIT_LIST_HEAD(&bp->b_lru); INIT_LIST_HEAD(&bp->b_list); INIT_LIST_HEAD(&bp->b_li_list); - sema_init(&bp->b_sema, 0); /* held, no waiters */ - spin_lock_init(&bp->b_lock); bp->b_target = target; bp->b_mount = target->bt_mount; bp->b_flags = flags; - /* - * Set length and io_length to the same value initially. - * I/O routines should use io_length, which will be the same in - * most cases but may be reset (e.g. XFS recovery). - */ error = xfs_buf_get_maps(bp, nmaps); if (error) { kmem_cache_free(xfs_buf_cache, bp); @@ -395,8 +379,8 @@ xfs_buf_alloc_pages( for (;;) { long last = filled; - filled = alloc_pages_bulk_array(gfp_mask, bp->b_page_count, - bp->b_pages); + filled = alloc_pages_bulk(gfp_mask, bp->b_page_count, + bp->b_pages); if (filled == bp->b_page_count) { XFS_STATS_INC(bp->b_mount, xb_page_found); break; @@ -519,7 +503,6 @@ int xfs_buf_cache_init( struct xfs_buf_cache *bch) { - spin_lock_init(&bch->bc_lock); return rhashtable_init(&bch->bc_hash, &xfs_buf_hash_params); } @@ -588,6 +571,20 @@ xfs_buf_find_lock( return 0; } +static bool +xfs_buf_try_hold( + struct xfs_buf *bp) +{ + spin_lock(&bp->b_lock); + if (bp->b_hold == 0) { + spin_unlock(&bp->b_lock); + return false; + } + bp->b_hold++; + spin_unlock(&bp->b_lock); + return true; +} + static inline int xfs_buf_lookup( struct xfs_buf_cache *bch, @@ -600,7 +597,7 @@ xfs_buf_lookup( rcu_read_lock(); bp = rhashtable_lookup(&bch->bc_hash, map, xfs_buf_hash_params); - if (!bp || !atomic_inc_not_zero(&bp->b_hold)) { + if (!bp || !xfs_buf_try_hold(bp)) { rcu_read_unlock(); return -ENOENT; } @@ -655,18 +652,20 @@ xfs_buf_find_insert( if (error) goto out_free_buf; - spin_lock(&bch->bc_lock); + /* The new buffer keeps the perag reference until it is freed. */ + new_bp->b_pag = pag; + + rcu_read_lock(); bp = rhashtable_lookup_get_insert_fast(&bch->bc_hash, &new_bp->b_rhash_head, xfs_buf_hash_params); if (IS_ERR(bp)) { + rcu_read_unlock(); error = PTR_ERR(bp); - spin_unlock(&bch->bc_lock); goto out_free_buf; } - if (bp) { + if (bp && xfs_buf_try_hold(bp)) { /* found an existing buffer */ - atomic_inc(&bp->b_hold); - spin_unlock(&bch->bc_lock); + rcu_read_unlock(); error = xfs_buf_find_lock(bp, flags); if (error) xfs_buf_rele(bp); @@ -674,10 +673,8 @@ xfs_buf_find_insert( *bpp = bp; goto out_free_buf; } + rcu_read_unlock(); - /* The new buffer keeps the perag reference until it is freed. */ - new_bp->b_pag = pag; - spin_unlock(&bch->bc_lock); *bpp = new_bp; return 0; @@ -806,7 +803,10 @@ _xfs_buf_read( bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD | XBF_DONE); bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); - return xfs_buf_submit(bp); + xfs_buf_submit(bp); + if (flags & XBF_ASYNC) + return 0; + return xfs_buf_iowait(bp); } /* @@ -982,8 +982,8 @@ xfs_buf_read_uncached( bp->b_ops = ops; xfs_buf_submit(bp); - if (bp->b_error) { - error = bp->b_error; + error = xfs_buf_iowait(bp); + if (error) { xfs_buf_relse(bp); return error; } @@ -1043,7 +1043,10 @@ xfs_buf_hold( struct xfs_buf *bp) { trace_xfs_buf_hold(bp, _RET_IP_); - atomic_inc(&bp->b_hold); + + spin_lock(&bp->b_lock); + bp->b_hold++; + spin_unlock(&bp->b_lock); } static void @@ -1051,10 +1054,15 @@ xfs_buf_rele_uncached( struct xfs_buf *bp) { ASSERT(list_empty(&bp->b_lru)); - if (atomic_dec_and_test(&bp->b_hold)) { - xfs_buf_ioacct_dec(bp); - xfs_buf_free(bp); + + spin_lock(&bp->b_lock); + if (--bp->b_hold) { + spin_unlock(&bp->b_lock); + return; } + __xfs_buf_ioacct_dec(bp); + spin_unlock(&bp->b_lock); + xfs_buf_free(bp); } static void @@ -1064,51 +1072,38 @@ xfs_buf_rele_cached( struct xfs_buftarg *btp = bp->b_target; struct xfs_perag *pag = bp->b_pag; struct xfs_buf_cache *bch = xfs_buftarg_buf_cache(btp, pag); - bool release; bool freebuf = false; trace_xfs_buf_rele(bp, _RET_IP_); - ASSERT(atomic_read(&bp->b_hold) > 0); - - /* - * We grab the b_lock here first to serialise racing xfs_buf_rele() - * calls. The pag_buf_lock being taken on the last reference only - * serialises against racing lookups in xfs_buf_find(). IOWs, the second - * to last reference we drop here is not serialised against the last - * reference until we take bp->b_lock. Hence if we don't grab b_lock - * first, the last "release" reference can win the race to the lock and - * free the buffer before the second-to-last reference is processed, - * leading to a use-after-free scenario. - */ spin_lock(&bp->b_lock); - release = atomic_dec_and_lock(&bp->b_hold, &bch->bc_lock); - if (!release) { + ASSERT(bp->b_hold >= 1); + if (bp->b_hold > 1) { /* * Drop the in-flight state if the buffer is already on the LRU * and it holds the only reference. This is racy because we * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT * ensures the decrement occurs only once per-buf. */ - if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru)) + if (--bp->b_hold == 1 && !list_empty(&bp->b_lru)) __xfs_buf_ioacct_dec(bp); goto out_unlock; } - /* the last reference has been dropped ... */ + /* we are asked to drop the last reference */ __xfs_buf_ioacct_dec(bp); if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { /* - * If the buffer is added to the LRU take a new reference to the + * If the buffer is added to the LRU, keep the reference to the * buffer for the LRU and clear the (now stale) dispose list - * state flag + * state flag, else drop the reference. */ - if (list_lru_add_obj(&btp->bt_lru, &bp->b_lru)) { + if (list_lru_add_obj(&btp->bt_lru, &bp->b_lru)) bp->b_state &= ~XFS_BSTATE_DISPOSE; - atomic_inc(&bp->b_hold); - } - spin_unlock(&bch->bc_lock); + else + bp->b_hold--; } else { + bp->b_hold--; /* * most of the time buffers will already be removed from the * LRU, so optimise that case by checking for the @@ -1124,7 +1119,6 @@ xfs_buf_rele_cached( ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); rhashtable_remove_fast(&bch->bc_hash, &bp->b_rhash_head, xfs_buf_hash_params); - spin_unlock(&bch->bc_lock); if (pag) xfs_perag_put(pag); freebuf = true; @@ -1291,6 +1285,7 @@ xfs_buf_ioend_handle_error( { struct xfs_mount *mp = bp->b_mount; struct xfs_error_cfg *cfg; + struct xfs_log_item *lip; /* * If we've already shutdown the journal because of I/O errors, there's @@ -1338,12 +1333,11 @@ xfs_buf_ioend_handle_error( } /* Still considered a transient error. Caller will schedule retries. */ - if (bp->b_flags & _XBF_INODES) - xfs_buf_inode_io_fail(bp); - else if (bp->b_flags & _XBF_DQUOTS) - xfs_buf_dquot_io_fail(bp); - else - ASSERT(list_empty(&bp->b_li_list)); + list_for_each_entry(lip, &bp->b_li_list, li_bio_list) { + set_bit(XFS_LI_FAILED, &lip->li_flags); + clear_bit(XFS_LI_FLUSHING, &lip->li_flags); + } + xfs_buf_ioerror(bp, 0); xfs_buf_relse(bp); return true; @@ -1367,14 +1361,10 @@ xfs_buf_ioend( { trace_xfs_buf_iodone(bp, _RET_IP_); - /* - * Pull in IO completion errors now. We are guaranteed to be running - * single threaded, so we don't need the lock to read b_io_error. - */ - if (!bp->b_error && bp->b_io_error) - xfs_buf_ioerror(bp, bp->b_io_error); - if (bp->b_flags & XBF_READ) { + if (!bp->b_error && xfs_buf_is_vmapped(bp)) + invalidate_kernel_vmap_range(bp->b_addr, + xfs_buf_vmap_len(bp)); if (!bp->b_error && bp->b_ops) bp->b_ops->verify_read(bp); if (!bp->b_error) @@ -1401,11 +1391,8 @@ xfs_buf_ioend( if (bp->b_log_item) xfs_buf_item_done(bp); - if (bp->b_flags & _XBF_INODES) - xfs_buf_inode_iodone(bp); - else if (bp->b_flags & _XBF_DQUOTS) - xfs_buf_dquot_iodone(bp); - + if (bp->b_iodone) + bp->b_iodone(bp); } bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD | @@ -1485,7 +1472,8 @@ xfs_bwrite( bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | XBF_DONE); - error = xfs_buf_submit(bp); + xfs_buf_submit(bp); + error = xfs_buf_iowait(bp); if (error) xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR); return error; @@ -1495,188 +1483,79 @@ static void xfs_buf_bio_end_io( struct bio *bio) { - struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private; - - if (!bio->bi_status && - (bp->b_flags & XBF_WRITE) && (bp->b_flags & XBF_ASYNC) && - XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR)) - bio->bi_status = BLK_STS_IOERR; - - /* - * don't overwrite existing errors - otherwise we can lose errors on - * buffers that require multiple bios to complete. - */ - if (bio->bi_status) { - int error = blk_status_to_errno(bio->bi_status); - - cmpxchg(&bp->b_io_error, 0, error); - } - - if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) - invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); - - if (atomic_dec_and_test(&bp->b_io_remaining) == 1) - xfs_buf_ioend_async(bp); - bio_put(bio); -} - -static void -xfs_buf_ioapply_map( - struct xfs_buf *bp, - int map, - int *buf_offset, - int *count, - blk_opf_t op) -{ - int page_index; - unsigned int total_nr_pages = bp->b_page_count; - int nr_pages; - struct bio *bio; - sector_t sector = bp->b_maps[map].bm_bn; - int size; - int offset; - - /* skip the pages in the buffer before the start offset */ - page_index = 0; - offset = *buf_offset; - while (offset >= PAGE_SIZE) { - page_index++; - offset -= PAGE_SIZE; - } - - /* - * Limit the IO size to the length of the current vector, and update the - * remaining IO count for the next time around. - */ - size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count); - *count -= size; - *buf_offset += size; - -next_chunk: - atomic_inc(&bp->b_io_remaining); - nr_pages = bio_max_segs(total_nr_pages); - - bio = bio_alloc(bp->b_target->bt_bdev, nr_pages, op, GFP_NOIO); - bio->bi_iter.bi_sector = sector; - bio->bi_end_io = xfs_buf_bio_end_io; - bio->bi_private = bp; - - for (; size && nr_pages; nr_pages--, page_index++) { - int rbytes, nbytes = PAGE_SIZE - offset; - - if (nbytes > size) - nbytes = size; - - rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes, - offset); - if (rbytes < nbytes) - break; - - offset = 0; - sector += BTOBB(nbytes); - size -= nbytes; - total_nr_pages--; - } + struct xfs_buf *bp = bio->bi_private; - if (likely(bio->bi_iter.bi_size)) { - if (xfs_buf_is_vmapped(bp)) { - flush_kernel_vmap_range(bp->b_addr, - xfs_buf_vmap_len(bp)); - } - submit_bio(bio); - if (size) - goto next_chunk; - } else { - /* - * This is guaranteed not to be the last io reference count - * because the caller (xfs_buf_submit) holds a count itself. - */ - atomic_dec(&bp->b_io_remaining); + if (bio->bi_status) + xfs_buf_ioerror(bp, blk_status_to_errno(bio->bi_status)); + else if ((bp->b_flags & XBF_WRITE) && (bp->b_flags & XBF_ASYNC) && + XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR)) xfs_buf_ioerror(bp, -EIO); - bio_put(bio); - } + xfs_buf_ioend_async(bp); + bio_put(bio); } -STATIC void -_xfs_buf_ioapply( - struct xfs_buf *bp) +static inline blk_opf_t +xfs_buf_bio_op( + struct xfs_buf *bp) { - struct blk_plug plug; - blk_opf_t op; - int offset; - int size; - int i; - - /* - * Make sure we capture only current IO errors rather than stale errors - * left over from previous use of the buffer (e.g. failed readahead). - */ - bp->b_error = 0; + blk_opf_t op; if (bp->b_flags & XBF_WRITE) { op = REQ_OP_WRITE; - - /* - * Run the write verifier callback function if it exists. If - * this function fails it will mark the buffer with an error and - * the IO should not be dispatched. - */ - if (bp->b_ops) { - bp->b_ops->verify_write(bp); - if (bp->b_error) { - xfs_force_shutdown(bp->b_mount, - SHUTDOWN_CORRUPT_INCORE); - return; - } - } else if (bp->b_rhash_key != XFS_BUF_DADDR_NULL) { - struct xfs_mount *mp = bp->b_mount; - - /* - * non-crc filesystems don't attach verifiers during - * log recovery, so don't warn for such filesystems. - */ - if (xfs_has_crc(mp)) { - xfs_warn(mp, - "%s: no buf ops on daddr 0x%llx len %d", - __func__, xfs_buf_daddr(bp), - bp->b_length); - xfs_hex_dump(bp->b_addr, - XFS_CORRUPTION_DUMP_LEN); - dump_stack(); - } - } } else { op = REQ_OP_READ; if (bp->b_flags & XBF_READ_AHEAD) op |= REQ_RAHEAD; } - /* we only use the buffer cache for meta-data */ - op |= REQ_META; + return op | REQ_META; +} - /* in-memory targets are directly mapped, no IO required. */ - if (xfs_buftarg_is_mem(bp->b_target)) { - xfs_buf_ioend(bp); - return; +static void +xfs_buf_submit_bio( + struct xfs_buf *bp) +{ + unsigned int size = BBTOB(bp->b_length); + unsigned int map = 0, p; + struct blk_plug plug; + struct bio *bio; + + bio = bio_alloc(bp->b_target->bt_bdev, bp->b_page_count, + xfs_buf_bio_op(bp), GFP_NOIO); + bio->bi_private = bp; + bio->bi_end_io = xfs_buf_bio_end_io; + + if (bp->b_flags & _XBF_KMEM) { + __bio_add_page(bio, virt_to_page(bp->b_addr), size, + bp->b_offset); + } else { + for (p = 0; p < bp->b_page_count; p++) + __bio_add_page(bio, bp->b_pages[p], PAGE_SIZE, 0); + bio->bi_iter.bi_size = size; /* limit to the actual size used */ + + if (xfs_buf_is_vmapped(bp)) + flush_kernel_vmap_range(bp->b_addr, + xfs_buf_vmap_len(bp)); } /* - * Walk all the vectors issuing IO on them. Set up the initial offset - * into the buffer and the desired IO size before we start - - * _xfs_buf_ioapply_vec() will modify them appropriately for each - * subsequent call. + * If there is more than one map segment, split out a new bio for each + * map except of the last one. The last map is handled by the + * remainder of the original bio outside the loop. */ - offset = bp->b_offset; - size = BBTOB(bp->b_length); blk_start_plug(&plug); - for (i = 0; i < bp->b_map_count; i++) { - xfs_buf_ioapply_map(bp, i, &offset, &size, op); - if (bp->b_error) - break; - if (size <= 0) - break; /* all done */ + for (map = 0; map < bp->b_map_count - 1; map++) { + struct bio *split; + + split = bio_split(bio, bp->b_maps[map].bm_len, GFP_NOFS, + &fs_bio_set); + split->bi_iter.bi_sector = bp->b_maps[map].bm_bn; + bio_chain(split, bio); + submit_bio(split); } + bio->bi_iter.bi_sector = bp->b_maps[map].bm_bn; + submit_bio(bio); blk_finish_plug(&plug); } @@ -1697,18 +1576,45 @@ xfs_buf_iowait( } /* + * Run the write verifier callback function if it exists. If this fails, mark + * the buffer with an error and do not dispatch the I/O. + */ +static bool +xfs_buf_verify_write( + struct xfs_buf *bp) +{ + if (bp->b_ops) { + bp->b_ops->verify_write(bp); + if (bp->b_error) + return false; + } else if (bp->b_rhash_key != XFS_BUF_DADDR_NULL) { + /* + * Non-crc filesystems don't attach verifiers during log + * recovery, so don't warn for such filesystems. + */ + if (xfs_has_crc(bp->b_mount)) { + xfs_warn(bp->b_mount, + "%s: no buf ops on daddr 0x%llx len %d", + __func__, xfs_buf_daddr(bp), + bp->b_length); + xfs_hex_dump(bp->b_addr, XFS_CORRUPTION_DUMP_LEN); + dump_stack(); + } + } + + return true; +} + +/* * Buffer I/O submission path, read or write. Asynchronous submission transfers * the buffer lock ownership and the current reference to the IO. It is not * safe to reference the buffer after a call to this function unless the caller * holds an additional reference itself. */ -static int -__xfs_buf_submit( - struct xfs_buf *bp, - bool wait) +static void +xfs_buf_submit( + struct xfs_buf *bp) { - int error = 0; - trace_xfs_buf_submit(bp, _RET_IP_); ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); @@ -1728,57 +1634,36 @@ __xfs_buf_submit( * state here rather than mount state to avoid corrupting the log tail * on shutdown. */ - if (bp->b_mount->m_log && - xlog_is_shutdown(bp->b_mount->m_log)) { + if (bp->b_mount->m_log && xlog_is_shutdown(bp->b_mount->m_log)) { xfs_buf_ioend_fail(bp); - return -EIO; + return; } - /* - * Grab a reference so the buffer does not go away underneath us. For - * async buffers, I/O completion drops the callers reference, which - * could occur before submission returns. - */ - xfs_buf_hold(bp); - if (bp->b_flags & XBF_WRITE) xfs_buf_wait_unpin(bp); - /* clear the internal error state to avoid spurious errors */ - bp->b_io_error = 0; - /* - * Set the count to 1 initially, this will stop an I/O completion - * callout which happens before we have started all the I/O from calling - * xfs_buf_ioend too early. + * Make sure we capture only current IO errors rather than stale errors + * left over from previous use of the buffer (e.g. failed readahead). */ - atomic_set(&bp->b_io_remaining, 1); + bp->b_error = 0; + if (bp->b_flags & XBF_ASYNC) xfs_buf_ioacct_inc(bp); - _xfs_buf_ioapply(bp); - /* - * If _xfs_buf_ioapply failed, we can get back here with only the IO - * reference we took above. If we drop it to zero, run completion so - * that we don't return to the caller with completion still pending. - */ - if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { - if (bp->b_error || !(bp->b_flags & XBF_ASYNC)) - xfs_buf_ioend(bp); - else - xfs_buf_ioend_async(bp); + if ((bp->b_flags & XBF_WRITE) && !xfs_buf_verify_write(bp)) { + xfs_force_shutdown(bp->b_mount, SHUTDOWN_CORRUPT_INCORE); + xfs_buf_ioend(bp); + return; } - if (wait) - error = xfs_buf_iowait(bp); + /* In-memory targets are directly mapped, no I/O required. */ + if (xfs_buftarg_is_mem(bp->b_target)) { + xfs_buf_ioend(bp); + return; + } - /* - * Release the hold that keeps the buffer referenced for the entire - * I/O. Note that if the buffer is async, it is not safe to reference - * after this release. - */ - xfs_buf_rele(bp); - return error; + xfs_buf_submit_bio(bp); } void * @@ -1863,13 +1748,14 @@ xfs_buftarg_drain_rele( struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru); struct list_head *dispose = arg; - if (atomic_read(&bp->b_hold) > 1) { + if (!spin_trylock(&bp->b_lock)) + return LRU_SKIP; + if (bp->b_hold > 1) { /* need to wait, so skip it this pass */ + spin_unlock(&bp->b_lock); trace_xfs_buf_drain_buftarg(bp, _RET_IP_); return LRU_SKIP; } - if (!spin_trylock(&bp->b_lock)) - return LRU_SKIP; /* * clear the LRU reference count so the buffer doesn't get @@ -2208,7 +2094,7 @@ xfs_buf_delwri_queue( */ bp->b_flags |= _XBF_DELWRI_Q; if (list_empty(&bp->b_list)) { - atomic_inc(&bp->b_hold); + xfs_buf_hold(bp); list_add_tail(&bp->b_list, list); } @@ -2266,72 +2152,26 @@ xfs_buf_cmp( return 0; } -/* - * Submit buffers for write. If wait_list is specified, the buffers are - * submitted using sync I/O and placed on the wait list such that the caller can - * iowait each buffer. Otherwise async I/O is used and the buffers are released - * at I/O completion time. In either case, buffers remain locked until I/O - * completes and the buffer is released from the queue. - */ -static int -xfs_buf_delwri_submit_buffers( - struct list_head *buffer_list, - struct list_head *wait_list) +static bool +xfs_buf_delwri_submit_prep( + struct xfs_buf *bp) { - struct xfs_buf *bp, *n; - int pinned = 0; - struct blk_plug plug; - - list_sort(NULL, buffer_list, xfs_buf_cmp); - - blk_start_plug(&plug); - list_for_each_entry_safe(bp, n, buffer_list, b_list) { - if (!wait_list) { - if (!xfs_buf_trylock(bp)) - continue; - if (xfs_buf_ispinned(bp)) { - xfs_buf_unlock(bp); - pinned++; - continue; - } - } else { - xfs_buf_lock(bp); - } - - /* - * Someone else might have written the buffer synchronously or - * marked it stale in the meantime. In that case only the - * _XBF_DELWRI_Q flag got cleared, and we have to drop the - * reference and remove it from the list here. - */ - if (!(bp->b_flags & _XBF_DELWRI_Q)) { - xfs_buf_list_del(bp); - xfs_buf_relse(bp); - continue; - } - - trace_xfs_buf_delwri_split(bp, _RET_IP_); - - /* - * If we have a wait list, each buffer (and associated delwri - * queue reference) transfers to it and is submitted - * synchronously. Otherwise, drop the buffer from the delwri - * queue and submit async. - */ - bp->b_flags &= ~_XBF_DELWRI_Q; - bp->b_flags |= XBF_WRITE; - if (wait_list) { - bp->b_flags &= ~XBF_ASYNC; - list_move_tail(&bp->b_list, wait_list); - } else { - bp->b_flags |= XBF_ASYNC; - xfs_buf_list_del(bp); - } - __xfs_buf_submit(bp, false); + /* + * Someone else might have written the buffer synchronously or marked it + * stale in the meantime. In that case only the _XBF_DELWRI_Q flag got + * cleared, and we have to drop the reference and remove it from the + * list here. + */ + if (!(bp->b_flags & _XBF_DELWRI_Q)) { + xfs_buf_list_del(bp); + xfs_buf_relse(bp); + return false; } - blk_finish_plug(&plug); - return pinned; + trace_xfs_buf_delwri_split(bp, _RET_IP_); + bp->b_flags &= ~_XBF_DELWRI_Q; + bp->b_flags |= XBF_WRITE; + return true; } /* @@ -2354,7 +2194,30 @@ int xfs_buf_delwri_submit_nowait( struct list_head *buffer_list) { - return xfs_buf_delwri_submit_buffers(buffer_list, NULL); + struct xfs_buf *bp, *n; + int pinned = 0; + struct blk_plug plug; + + list_sort(NULL, buffer_list, xfs_buf_cmp); + + blk_start_plug(&plug); + list_for_each_entry_safe(bp, n, buffer_list, b_list) { + if (!xfs_buf_trylock(bp)) + continue; + if (xfs_buf_ispinned(bp)) { + xfs_buf_unlock(bp); + pinned++; + continue; + } + if (!xfs_buf_delwri_submit_prep(bp)) + continue; + bp->b_flags |= XBF_ASYNC; + xfs_buf_list_del(bp); + xfs_buf_submit(bp); + } + blk_finish_plug(&plug); + + return pinned; } /* @@ -2371,9 +2234,21 @@ xfs_buf_delwri_submit( { LIST_HEAD (wait_list); int error = 0, error2; - struct xfs_buf *bp; + struct xfs_buf *bp, *n; + struct blk_plug plug; - xfs_buf_delwri_submit_buffers(buffer_list, &wait_list); + list_sort(NULL, buffer_list, xfs_buf_cmp); + + blk_start_plug(&plug); + list_for_each_entry_safe(bp, n, buffer_list, b_list) { + xfs_buf_lock(bp); + if (!xfs_buf_delwri_submit_prep(bp)) + continue; + bp->b_flags &= ~XBF_ASYNC; + list_move_tail(&bp->b_list, &wait_list); + xfs_buf_submit(bp); + } + blk_finish_plug(&plug); /* Wait for IO to complete. */ while (!list_empty(&wait_list)) { @@ -2398,14 +2273,9 @@ xfs_buf_delwri_submit( * Push a single buffer on a delwri queue. * * The purpose of this function is to submit a single buffer of a delwri queue - * and return with the buffer still on the original queue. The waiting delwri - * buffer submission infrastructure guarantees transfer of the delwri queue - * buffer reference to a temporary wait list. We reuse this infrastructure to - * transfer the buffer back to the original queue. + * and return with the buffer still on the original queue. * - * Note the buffer transitions from the queued state, to the submitted and wait - * listed state and back to the queued state during this call. The buffer - * locking and queue management logic between _delwri_pushbuf() and + * The buffer locking and queue management logic between _delwri_pushbuf() and * _delwri_queue() guarantee that the buffer cannot be queued to another list * before returning. */ @@ -2414,33 +2284,21 @@ xfs_buf_delwri_pushbuf( struct xfs_buf *bp, struct list_head *buffer_list) { - LIST_HEAD (submit_list); int error; ASSERT(bp->b_flags & _XBF_DELWRI_Q); trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_); - /* - * Isolate the buffer to a new local list so we can submit it for I/O - * independently from the rest of the original list. - */ xfs_buf_lock(bp); - list_move(&bp->b_list, &submit_list); - xfs_buf_unlock(bp); - - /* - * Delwri submission clears the DELWRI_Q buffer flag and returns with - * the buffer on the wait list with the original reference. Rather than - * bounce the buffer from a local wait list back to the original list - * after I/O completion, reuse the original list as the wait list. - */ - xfs_buf_delwri_submit_buffers(&submit_list, buffer_list); + bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC); + bp->b_flags |= XBF_WRITE; + xfs_buf_submit(bp); /* - * The buffer is now locked, under I/O and wait listed on the original - * delwri queue. Wait for I/O completion, restore the DELWRI_Q flag and - * return with the buffer unlocked and on the original queue. + * The buffer is now locked, under I/O but still on the original delwri + * queue. Wait for I/O completion, restore the DELWRI_Q flag and + * return with the buffer unlocked and still on the original queue. */ error = xfs_buf_iowait(bp); bp->b_flags |= _XBF_DELWRI_Q; diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index 3d56bc7a35cc..3b4ed42e11c0 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h @@ -34,8 +34,6 @@ struct xfs_buf; #define XBF_WRITE_FAIL (1u << 7) /* async writes have failed on this buffer */ /* buffer type flags for write callbacks */ -#define _XBF_INODES (1u << 16)/* inode buffer */ -#define _XBF_DQUOTS (1u << 17)/* dquot buffer */ #define _XBF_LOGRECOVERY (1u << 18)/* log recovery buffer */ /* flags used only internally */ @@ -65,8 +63,6 @@ typedef unsigned int xfs_buf_flags_t; { XBF_DONE, "DONE" }, \ { XBF_STALE, "STALE" }, \ { XBF_WRITE_FAIL, "WRITE_FAIL" }, \ - { _XBF_INODES, "INODES" }, \ - { _XBF_DQUOTS, "DQUOTS" }, \ { _XBF_LOGRECOVERY, "LOG_RECOVERY" }, \ { _XBF_PAGES, "PAGES" }, \ { _XBF_KMEM, "KMEM" }, \ @@ -84,7 +80,6 @@ typedef unsigned int xfs_buf_flags_t; #define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */ struct xfs_buf_cache { - spinlock_t bc_lock; struct rhashtable bc_hash; }; @@ -172,7 +167,7 @@ struct xfs_buf { xfs_daddr_t b_rhash_key; /* buffer cache index */ int b_length; /* size of buffer in BBs */ - atomic_t b_hold; /* reference count */ + unsigned int b_hold; /* reference count */ atomic_t b_lru_ref; /* lru reclaim ref count */ xfs_buf_flags_t b_flags; /* status flags */ struct semaphore b_sema; /* semaphore for lockables */ @@ -184,10 +179,9 @@ struct xfs_buf { struct list_head b_lru; /* lru list */ spinlock_t b_lock; /* internal state lock */ unsigned int b_state; /* internal state flags */ - int b_io_error; /* internal IO error state */ wait_queue_head_t b_waiters; /* unpin waiters */ struct list_head b_list; - struct xfs_perag *b_pag; /* contains rbtree root */ + struct xfs_perag *b_pag; struct xfs_mount *b_mount; struct xfs_buftarg *b_target; /* buffer target (device) */ void *b_addr; /* virtual address of buffer */ @@ -202,11 +196,11 @@ struct xfs_buf { struct xfs_buf_map __b_map; /* inline compound buffer map */ int b_map_count; atomic_t b_pin_count; /* pin count */ - atomic_t b_io_remaining; /* #outstanding I/O requests */ unsigned int b_page_count; /* size of page array */ unsigned int b_offset; /* page offset of b_addr, only for _XBF_KMEM buffers */ int b_error; /* error code on I/O */ + void (*b_iodone)(struct xfs_buf *bp); /* * async write failure retry count. Initialised to zero on the first diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h index 4d8a6aece995..8cde85259a58 100644 --- a/fs/xfs/xfs_buf_item.h +++ b/fs/xfs/xfs_buf_item.h @@ -54,17 +54,12 @@ bool xfs_buf_item_put(struct xfs_buf_log_item *); void xfs_buf_item_log(struct xfs_buf_log_item *, uint, uint); bool xfs_buf_item_dirty_format(struct xfs_buf_log_item *); void xfs_buf_inode_iodone(struct xfs_buf *); -void xfs_buf_inode_io_fail(struct xfs_buf *bp); #ifdef CONFIG_XFS_QUOTA void xfs_buf_dquot_iodone(struct xfs_buf *); -void xfs_buf_dquot_io_fail(struct xfs_buf *bp); #else static inline void xfs_buf_dquot_iodone(struct xfs_buf *bp) { } -static inline void xfs_buf_dquot_io_fail(struct xfs_buf *bp) -{ -} #endif /* CONFIG_XFS_QUOTA */ void xfs_buf_iodone(struct xfs_buf *); bool xfs_buf_log_check_iovec(struct xfs_log_iovec *iovec); diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c index 3d0c6402cb36..05a2f6927c12 100644 --- a/fs/xfs/xfs_buf_item_recover.c +++ b/fs/xfs/xfs_buf_item_recover.c @@ -262,12 +262,18 @@ xlog_recover_validate_buf_type( case XFS_BMAP_MAGIC: bp->b_ops = &xfs_bmbt_buf_ops; break; + case XFS_RTRMAP_CRC_MAGIC: + bp->b_ops = &xfs_rtrmapbt_buf_ops; + break; case XFS_RMAP_CRC_MAGIC: bp->b_ops = &xfs_rmapbt_buf_ops; break; case XFS_REFC_CRC_MAGIC: bp->b_ops = &xfs_refcountbt_buf_ops; break; + case XFS_RTREFC_CRC_MAGIC: + bp->b_ops = &xfs_rtrefcountbt_buf_ops; + break; default: warnmsg = "Bad btree block magic!"; break; @@ -855,6 +861,8 @@ xlog_recover_get_buf_lsn( uuid = &btb->bb_u.s.bb_uuid; break; } + case XFS_RTRMAP_CRC_MAGIC: + case XFS_RTREFC_CRC_MAGIC: case XFS_BMAP_CRC_MAGIC: case XFS_BMAP_MAGIC: { struct xfs_btree_block *btb = blk; @@ -1079,7 +1087,7 @@ xlog_recover_buf_commit_pass2( error = xlog_recover_do_primary_sb_buffer(mp, item, bp, buf_f, current_lsn); if (error) - goto out_release; + goto out_writebuf; /* Update the rt superblock if we have one. */ if (xfs_has_rtsb(mp) && mp->m_rtsb_bp) { @@ -1097,6 +1105,15 @@ xlog_recover_buf_commit_pass2( } /* + * Buffer held by buf log item during 'normal' buffer recovery must + * be committed through buffer I/O submission path to ensure proper + * release. When error occurs during sb buffer recovery, log shutdown + * will be done before submitting buffer list so that buffers can be + * released correctly through ioend failure path. + */ +out_writebuf: + + /* * Perform delayed write on the buffer. Asynchronous writes will be * slower when taking into account all the buffers to be flushed. * diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c index c4bd145f5ec1..3f2403a7b49c 100644 --- a/fs/xfs/xfs_discard.c +++ b/fs/xfs/xfs_discard.c @@ -90,7 +90,7 @@ xfs_discard_endio_work( /* * Queue up the actual completion to a thread to avoid IRQ-safe locking for - * pagb_lock. + * eb_lock. */ static void xfs_discard_endio( diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 201c26322ede..edbc521870a1 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -1230,18 +1230,6 @@ xfs_buf_dquot_iodone( } } -void -xfs_buf_dquot_io_fail( - struct xfs_buf *bp) -{ - struct xfs_log_item *lip; - - spin_lock(&bp->b_mount->m_ail->ail_lock); - list_for_each_entry(lip, &bp->b_li_list, li_bio_list) - set_bit(XFS_LI_FAILED, &lip->li_flags); - spin_unlock(&bp->b_mount->m_ail->ail_lock); -} - /* Check incore dquot for errors before we flush. */ static xfs_failaddr_t xfs_qm_dqflush_check( @@ -1316,7 +1304,8 @@ out_abort: /* * Attach a dquot buffer to this dquot to avoid allocating a buffer during a - * dqflush, since dqflush can be called from reclaim context. + * dqflush, since dqflush can be called from reclaim context. Caller must hold + * the dqlock. */ int xfs_dquot_attach_buf( @@ -1337,13 +1326,16 @@ xfs_dquot_attach_buf( return error; /* - * Attach the dquot to the buffer so that the AIL does not have - * to read the dquot buffer to push this item. + * Hold the dquot buffer so that we retain our ref to it after + * detaching it from the transaction, then give that ref to the + * dquot log item so that the AIL does not have to read the + * dquot buffer to push this item. */ xfs_buf_hold(bp); + xfs_trans_brelse(tp, bp); + spin_lock(&qlip->qli_lock); lip->li_buf = bp; - xfs_trans_brelse(tp, bp); } qlip->qli_dirty = true; spin_unlock(&qlip->qli_lock); @@ -1459,7 +1451,7 @@ xfs_qm_dqflush( * Attach the dquot to the buffer so that we can remove this dquot from * the AIL and release the flush lock once the dquot is synced to disk. */ - bp->b_flags |= _XBF_DQUOTS; + bp->b_iodone = xfs_buf_dquot_iodone; list_add_tail(&lip->li_bio_list, &bp->b_li_list); /* diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h index c617bac75361..61217adf5ba5 100644 --- a/fs/xfs/xfs_dquot.h +++ b/fs/xfs/xfs_dquot.h @@ -160,6 +160,9 @@ static inline struct xfs_dquot *xfs_inode_dquot( struct xfs_inode *ip, xfs_dqtype_t type) { + if (xfs_is_metadir_inode(ip)) + return NULL; + switch (type) { case XFS_DQTYPE_USER: return ip->i_udquot; diff --git a/fs/xfs/xfs_drain.c b/fs/xfs/xfs_drain.c index 5ede81fadbd8..fa5f31931efd 100644 --- a/fs/xfs/xfs_drain.c +++ b/fs/xfs/xfs_drain.c @@ -13,28 +13,28 @@ #include "xfs_trace.h" /* - * Use a static key here to reduce the overhead of xfs_drain_rele. If the - * compiler supports jump labels, the static branch will be replaced by a nop - * sled when there are no xfs_drain_wait callers. Online fsck is currently - * the only caller, so this is a reasonable tradeoff. + * Use a static key here to reduce the overhead of xfs_defer_drain_rele. If + * the compiler supports jump labels, the static branch will be replaced by a + * nop sled when there are no xfs_defer_drain_wait callers. Online fsck is + * currently the only caller, so this is a reasonable tradeoff. * * Note: Patching the kernel code requires taking the cpu hotplug lock. Other * parts of the kernel allocate memory with that lock held, which means that * XFS callers cannot hold any locks that might be used by memory reclaim or * writeback when calling the static_branch_{inc,dec} functions. */ -static DEFINE_STATIC_KEY_FALSE(xfs_drain_waiter_gate); +static DEFINE_STATIC_KEY_FALSE(xfs_defer_drain_waiter_gate); void -xfs_drain_wait_disable(void) +xfs_defer_drain_wait_disable(void) { - static_branch_dec(&xfs_drain_waiter_gate); + static_branch_dec(&xfs_defer_drain_waiter_gate); } void -xfs_drain_wait_enable(void) +xfs_defer_drain_wait_enable(void) { - static_branch_inc(&xfs_drain_waiter_gate); + static_branch_inc(&xfs_defer_drain_waiter_gate); } void @@ -71,7 +71,7 @@ static inline bool has_waiters(struct wait_queue_head *wq_head) static inline void xfs_defer_drain_rele(struct xfs_defer_drain *dr) { if (atomic_dec_and_test(&dr->dr_count) && - static_branch_unlikely(&xfs_drain_waiter_gate) && + static_branch_unlikely(&xfs_defer_drain_waiter_gate) && has_waiters(&dr->dr_waiters)) wake_up(&dr->dr_waiters); } diff --git a/fs/xfs/xfs_drain.h b/fs/xfs/xfs_drain.h index efcf88df9a5e..4d446dbf65e5 100644 --- a/fs/xfs/xfs_drain.h +++ b/fs/xfs/xfs_drain.h @@ -26,8 +26,8 @@ struct xfs_defer_drain { void xfs_defer_drain_init(struct xfs_defer_drain *dr); void xfs_defer_drain_free(struct xfs_defer_drain *dr); -void xfs_drain_wait_disable(void); -void xfs_drain_wait_enable(void); +void xfs_defer_drain_wait_disable(void); +void xfs_defer_drain_wait_enable(void); /* * Deferred Work Intent Drains @@ -61,6 +61,9 @@ void xfs_drain_wait_enable(void); * All functions that create work items must increment the intent counter as * soon as the item is added to the transaction and cannot drop the counter * until the item is finished or cancelled. + * + * The same principles apply to realtime groups because the rt metadata inode + * ILOCKs are not held across transaction rolls. */ struct xfs_group *xfs_group_intent_get(struct xfs_mount *mp, xfs_fsblock_t fsbno, enum xfs_group_type type); diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c index 78cdc5064a8c..dbd87e137694 100644 --- a/fs/xfs/xfs_error.c +++ b/fs/xfs/xfs_error.c @@ -63,6 +63,7 @@ static unsigned int xfs_errortag_random_default[] = { XFS_RANDOM_WB_DELAY_MS, XFS_RANDOM_WRITE_DELAY_MS, XFS_RANDOM_EXCHMAPS_FINISH_ONE, + XFS_RANDOM_METAFILE_RESV_CRITICAL, }; struct xfs_errortag_attr { @@ -181,6 +182,7 @@ XFS_ERRORTAG_ATTR_RW(attr_leaf_to_node, XFS_ERRTAG_ATTR_LEAF_TO_NODE); XFS_ERRORTAG_ATTR_RW(wb_delay_ms, XFS_ERRTAG_WB_DELAY_MS); XFS_ERRORTAG_ATTR_RW(write_delay_ms, XFS_ERRTAG_WRITE_DELAY_MS); XFS_ERRORTAG_ATTR_RW(exchmaps_finish_one, XFS_ERRTAG_EXCHMAPS_FINISH_ONE); +XFS_ERRORTAG_ATTR_RW(metafile_resv_crit, XFS_ERRTAG_METAFILE_RESV_CRITICAL); static struct attribute *xfs_errortag_attrs[] = { XFS_ERRORTAG_ATTR_LIST(noerror), @@ -227,6 +229,7 @@ static struct attribute *xfs_errortag_attrs[] = { XFS_ERRORTAG_ATTR_LIST(wb_delay_ms), XFS_ERRORTAG_ATTR_LIST(write_delay_ms), XFS_ERRORTAG_ATTR_LIST(exchmaps_finish_one), + XFS_ERRORTAG_ATTR_LIST(metafile_resv_crit), NULL, }; ATTRIBUTE_GROUPS(xfs_errortag); diff --git a/fs/xfs/xfs_exchrange.c b/fs/xfs/xfs_exchrange.c index 265c42449893..0b41bdfecdfb 100644 --- a/fs/xfs/xfs_exchrange.c +++ b/fs/xfs/xfs_exchrange.c @@ -119,6 +119,9 @@ xfs_exchrange_reserve_quota( int ip1_error = 0; int error; + ASSERT(!xfs_is_metadir_inode(req->ip1)); + ASSERT(!xfs_is_metadir_inode(req->ip2)); + /* * Don't bother with a quota reservation if we're not enforcing them * or the two inodes have the same dquots. @@ -326,22 +329,6 @@ out_trans_cancel: * successfully but before locks are dropped. */ -/* Verify that we have security clearance to perform this operation. */ -static int -xfs_exchange_range_verify_area( - struct xfs_exchrange *fxr) -{ - int ret; - - ret = remap_verify_area(fxr->file1, fxr->file1_offset, fxr->length, - true); - if (ret) - return ret; - - return remap_verify_area(fxr->file2, fxr->file2_offset, fxr->length, - true); -} - /* * Performs necessary checks before doing a range exchange, having stabilized * mutable inode attributes via i_rwsem. @@ -352,11 +339,13 @@ xfs_exchange_range_checks( unsigned int alloc_unit) { struct inode *inode1 = file_inode(fxr->file1); + loff_t size1 = i_size_read(inode1); struct inode *inode2 = file_inode(fxr->file2); + loff_t size2 = i_size_read(inode2); uint64_t allocmask = alloc_unit - 1; int64_t test_len; uint64_t blen; - loff_t size1, size2, tmp; + loff_t tmp; int error; /* Don't touch certain kinds of inodes */ @@ -365,24 +354,25 @@ xfs_exchange_range_checks( if (IS_SWAPFILE(inode1) || IS_SWAPFILE(inode2)) return -ETXTBSY; - size1 = i_size_read(inode1); - size2 = i_size_read(inode2); - /* Ranges cannot start after EOF. */ if (fxr->file1_offset > size1 || fxr->file2_offset > size2) return -EINVAL; - /* - * If the caller said to exchange to EOF, we set the length of the - * request large enough to cover everything to the end of both files. - */ if (fxr->flags & XFS_EXCHANGE_RANGE_TO_EOF) { + /* + * If the caller said to exchange to EOF, we set the length of + * the request large enough to cover everything to the end of + * both files. + */ fxr->length = max_t(int64_t, size1 - fxr->file1_offset, size2 - fxr->file2_offset); - - error = xfs_exchange_range_verify_area(fxr); - if (error) - return error; + } else { + /* + * Otherwise we require both ranges to end within EOF. + */ + if (fxr->file1_offset + fxr->length > size1 || + fxr->file2_offset + fxr->length > size2) + return -EINVAL; } /* @@ -399,15 +389,6 @@ xfs_exchange_range_checks( return -EINVAL; /* - * We require both ranges to end within EOF, unless we're exchanging - * to EOF. - */ - if (!(fxr->flags & XFS_EXCHANGE_RANGE_TO_EOF) && - (fxr->file1_offset + fxr->length > size1 || - fxr->file2_offset + fxr->length > size2)) - return -EINVAL; - - /* * Make sure we don't hit any file size limits. If we hit any size * limits such that test_length was adjusted, we abort the whole * operation. @@ -744,6 +725,7 @@ xfs_exchange_range( { struct inode *inode1 = file_inode(fxr->file1); struct inode *inode2 = file_inode(fxr->file2); + loff_t check_len = fxr->length; int ret; BUILD_BUG_ON(XFS_EXCHANGE_RANGE_ALL_FLAGS & @@ -776,14 +758,18 @@ xfs_exchange_range( return -EBADF; /* - * If we're not exchanging to EOF, we can check the areas before - * stabilizing both files' i_size. + * If we're exchanging to EOF we can't calculate the length until taking + * the iolock. Pass a 0 length to remap_verify_area similar to the + * FICLONE and FICLONERANGE ioctls that support cloning to EOF as well. */ - if (!(fxr->flags & XFS_EXCHANGE_RANGE_TO_EOF)) { - ret = xfs_exchange_range_verify_area(fxr); - if (ret) - return ret; - } + if (fxr->flags & XFS_EXCHANGE_RANGE_TO_EOF) + check_len = 0; + ret = remap_verify_area(fxr->file1, fxr->file1_offset, check_len, true); + if (ret) + return ret; + ret = remap_verify_area(fxr->file2, fxr->file2_offset, check_len, true); + if (ret) + return ret; /* Update cmtime if the fd/inode don't forbid it. */ if (!(fxr->file1->f_mode & FMODE_NOCMTIME) && !IS_NOCMTIME(inode1)) diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 9a435b1ff264..f7a7d89c345e 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1451,6 +1451,9 @@ xfs_dax_read_fault( trace_xfs_read_fault(ip, order); + ret = filemap_fsnotify_fault(vmf); + if (unlikely(ret)) + return ret; xfs_ilock(ip, XFS_MMAPLOCK_SHARED); ret = xfs_dax_fault_locked(vmf, order, false); xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); @@ -1479,6 +1482,16 @@ xfs_write_fault( vm_fault_t ret; trace_xfs_write_fault(ip, order); + /* + * Usually we get here from ->page_mkwrite callback but in case of DAX + * we will get here also for ordinary write fault. Handle HSM + * notifications for that case. + */ + if (IS_DAX(inode)) { + ret = filemap_fsnotify_fault(vmf); + if (unlikely(ret)) + return ret; + } sb_start_pagefault(inode->i_sb); file_update_time(vmf->vma->vm_file); diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c index 3290dd8524a6..1dbd2d75f7ae 100644 --- a/fs/xfs/xfs_fsmap.c +++ b/fs/xfs/xfs_fsmap.c @@ -26,6 +26,8 @@ #include "xfs_rtbitmap.h" #include "xfs_ag.h" #include "xfs_rtgroup.h" +#include "xfs_rtrmap_btree.h" +#include "xfs_rtrefcount_btree.h" /* Convert an xfs_fsmap to an fsmap. */ static void @@ -211,21 +213,20 @@ xfs_getfsmap_is_shared( struct xfs_mount *mp = tp->t_mountp; struct xfs_btree_cur *cur; xfs_agblock_t fbno; - xfs_extlen_t flen; + xfs_extlen_t flen = 0; int error; *stat = false; - if (!xfs_has_reflink(mp)) - return 0; - /* rt files will have no perag structure */ - if (!info->group) + if (!xfs_has_reflink(mp) || !info->group) return 0; - /* Are there any shared blocks here? */ - flen = 0; - cur = xfs_refcountbt_init_cursor(mp, tp, info->agf_bp, - to_perag(info->group)); + if (info->group->xg_type == XG_TYPE_RTG) + cur = xfs_rtrefcountbt_init_cursor(tp, to_rtg(info->group)); + else + cur = xfs_refcountbt_init_cursor(mp, tp, info->agf_bp, + to_perag(info->group)); + /* Are there any shared blocks here? */ error = xfs_refcount_find_shared(cur, frec->rec_key, XFS_BB_TO_FSBT(mp, frec->len_daddr), &fbno, &flen, false); @@ -832,6 +833,175 @@ xfs_getfsmap_rtdev_rtbitmap( return error; } + +/* Transform a realtime rmapbt record into a fsmap */ +STATIC int +xfs_getfsmap_rtdev_rmapbt_helper( + struct xfs_btree_cur *cur, + const struct xfs_rmap_irec *rec, + void *priv) +{ + struct xfs_fsmap_irec frec = { + .owner = rec->rm_owner, + .offset = rec->rm_offset, + .rm_flags = rec->rm_flags, + .rec_key = rec->rm_startblock, + }; + struct xfs_getfsmap_info *info = priv; + + return xfs_getfsmap_group_helper(info, cur->bc_tp, cur->bc_group, + rec->rm_startblock, rec->rm_blockcount, &frec); +} + +/* Actually query the rtrmap btree. */ +STATIC int +xfs_getfsmap_rtdev_rmapbt_query( + struct xfs_trans *tp, + struct xfs_getfsmap_info *info, + struct xfs_btree_cur **curpp) +{ + struct xfs_rtgroup *rtg = to_rtg(info->group); + + /* Query the rtrmapbt */ + xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP | XFS_RTGLOCK_REFCOUNT); + *curpp = xfs_rtrmapbt_init_cursor(tp, rtg); + return xfs_rmap_query_range(*curpp, &info->low, &info->high, + xfs_getfsmap_rtdev_rmapbt_helper, info); +} + +/* Execute a getfsmap query against the realtime device rmapbt. */ +STATIC int +xfs_getfsmap_rtdev_rmapbt( + struct xfs_trans *tp, + const struct xfs_fsmap *keys, + struct xfs_getfsmap_info *info) +{ + struct xfs_mount *mp = tp->t_mountp; + struct xfs_rtgroup *rtg = NULL; + struct xfs_btree_cur *bt_cur = NULL; + xfs_rtblock_t start_rtb; + xfs_rtblock_t end_rtb; + xfs_rgnumber_t start_rg, end_rg; + uint64_t eofs; + int error = 0; + + eofs = XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks); + if (keys[0].fmr_physical >= eofs) + return 0; + start_rtb = xfs_daddr_to_rtb(mp, keys[0].fmr_physical); + end_rtb = xfs_daddr_to_rtb(mp, min(eofs - 1, keys[1].fmr_physical)); + + info->missing_owner = XFS_FMR_OWN_FREE; + + /* + * Convert the fsmap low/high keys to rtgroup based keys. Initialize + * low to the fsmap low key and max out the high key to the end + * of the rtgroup. + */ + info->low.rm_offset = XFS_BB_TO_FSBT(mp, keys[0].fmr_offset); + error = xfs_fsmap_owner_to_rmap(&info->low, &keys[0]); + if (error) + return error; + info->low.rm_blockcount = XFS_BB_TO_FSBT(mp, keys[0].fmr_length); + xfs_getfsmap_set_irec_flags(&info->low, &keys[0]); + + /* Adjust the low key if we are continuing from where we left off. */ + if (info->low.rm_blockcount == 0) { + /* No previous record from which to continue */ + } else if (rmap_not_shareable(mp, &info->low)) { + /* Last record seen was an unshareable extent */ + info->low.rm_owner = 0; + info->low.rm_offset = 0; + + start_rtb += info->low.rm_blockcount; + if (xfs_rtb_to_daddr(mp, start_rtb) >= eofs) + return 0; + } else { + /* Last record seen was a shareable file data extent */ + info->low.rm_offset += info->low.rm_blockcount; + } + info->low.rm_startblock = xfs_rtb_to_rgbno(mp, start_rtb); + + info->high.rm_startblock = -1U; + info->high.rm_owner = ULLONG_MAX; + info->high.rm_offset = ULLONG_MAX; + info->high.rm_blockcount = 0; + info->high.rm_flags = XFS_RMAP_KEY_FLAGS | XFS_RMAP_REC_FLAGS; + + start_rg = xfs_rtb_to_rgno(mp, start_rtb); + end_rg = xfs_rtb_to_rgno(mp, end_rtb); + + while ((rtg = xfs_rtgroup_next_range(mp, rtg, start_rg, end_rg))) { + /* + * Set the rtgroup high key from the fsmap high key if this + * is the last rtgroup that we're querying. + */ + info->group = rtg_group(rtg); + if (rtg_rgno(rtg) == end_rg) { + info->high.rm_startblock = + xfs_rtb_to_rgbno(mp, end_rtb); + info->high.rm_offset = + XFS_BB_TO_FSBT(mp, keys[1].fmr_offset); + error = xfs_fsmap_owner_to_rmap(&info->high, &keys[1]); + if (error) + break; + xfs_getfsmap_set_irec_flags(&info->high, &keys[1]); + } + + if (bt_cur) { + xfs_rtgroup_unlock(to_rtg(bt_cur->bc_group), + XFS_RTGLOCK_RMAP | + XFS_RTGLOCK_REFCOUNT); + xfs_btree_del_cursor(bt_cur, XFS_BTREE_NOERROR); + bt_cur = NULL; + } + + trace_xfs_fsmap_low_group_key(mp, info->dev, rtg_rgno(rtg), + &info->low); + trace_xfs_fsmap_high_group_key(mp, info->dev, rtg_rgno(rtg), + &info->high); + + error = xfs_getfsmap_rtdev_rmapbt_query(tp, info, &bt_cur); + if (error) + break; + + /* + * Set the rtgroup low key to the start of the rtgroup prior to + * moving on to the next rtgroup. + */ + if (rtg_rgno(rtg) == start_rg) + memset(&info->low, 0, sizeof(info->low)); + + /* + * If this is the last rtgroup, report any gap at the end of it + * before we drop the reference to the perag when the loop + * terminates. + */ + if (rtg_rgno(rtg) == end_rg) { + info->last = true; + error = xfs_getfsmap_rtdev_rmapbt_helper(bt_cur, + &info->high, info); + if (error) + break; + } + info->group = NULL; + } + + if (bt_cur) { + xfs_rtgroup_unlock(to_rtg(bt_cur->bc_group), + XFS_RTGLOCK_RMAP | XFS_RTGLOCK_REFCOUNT); + xfs_btree_del_cursor(bt_cur, error < 0 ? XFS_BTREE_ERROR : + XFS_BTREE_NOERROR); + } + + /* loop termination case */ + if (rtg) { + info->group = NULL; + xfs_rtgroup_rele(rtg); + } + + return error; +} #endif /* CONFIG_XFS_RT */ /* Do we recognize the device? */ @@ -971,7 +1141,10 @@ xfs_getfsmap( if (mp->m_rtdev_targp) { handlers[2].nr_sectors = XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks); handlers[2].dev = new_encode_dev(mp->m_rtdev_targp->bt_dev); - handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap; + if (use_rmap) + handlers[2].fn = xfs_getfsmap_rtdev_rmapbt; + else + handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap; } #endif /* CONFIG_XFS_RT */ diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 28dde215c899..455298503d01 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -21,6 +21,9 @@ #include "xfs_ag.h" #include "xfs_ag_resv.h" #include "xfs_trace.h" +#include "xfs_rtalloc.h" +#include "xfs_rtrmap_btree.h" +#include "xfs_rtrefcount_btree.h" /* * Write new AG headers to disk. Non-transactional, but need to be @@ -113,6 +116,12 @@ xfs_growfs_data_private( xfs_buf_relse(bp); } + /* Make sure the new fs size won't cause problems with the log. */ + error = xfs_growfs_check_rtgeom(mp, nb, mp->m_sb.sb_rblocks, + mp->m_sb.sb_rextsize); + if (error) + return error; + nb_div = nb; nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks); if (nb_mod && nb_mod >= XFS_MIN_AG_BLOCKS) @@ -220,7 +229,12 @@ xfs_growfs_data_private( error = xfs_fs_reserve_ag_blocks(mp); if (error == -ENOSPC) error = 0; + + /* Compute new maxlevels for rt btrees. */ + xfs_rtrmapbt_compute_maxlevels(mp); + xfs_rtrefcountbt_compute_maxlevels(mp); } + return error; out_trans_cancel: @@ -541,6 +555,19 @@ xfs_fs_reserve_ag_blocks( xfs_warn(mp, "Error %d reserving per-AG metadata reserve pool.", error); xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); + return error; + } + + if (xfs_has_realtime(mp)) { + err2 = xfs_rt_resv_init(mp); + if (err2 && err2 != -ENOSPC) { + xfs_warn(mp, + "Error %d reserving realtime metadata reserve pool.", err2); + xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); + } + + if (err2 && !error) + error = err2; } return error; @@ -555,6 +582,9 @@ xfs_fs_unreserve_ag_blocks( { struct xfs_perag *pag = NULL; + if (xfs_has_realtime(mp)) + xfs_rt_resv_free(mp); + while ((pag = xfs_perag_next(mp, pag))) xfs_ag_resv_free(pag); } diff --git a/fs/xfs/xfs_health.c b/fs/xfs/xfs_health.c index c7c2e6561998..7c541fb373d5 100644 --- a/fs/xfs/xfs_health.c +++ b/fs/xfs/xfs_health.c @@ -447,6 +447,8 @@ static const struct ioctl_sick_map rtgroup_map[] = { { XFS_SICK_RG_SUPER, XFS_RTGROUP_GEOM_SICK_SUPER }, { XFS_SICK_RG_BITMAP, XFS_RTGROUP_GEOM_SICK_BITMAP }, { XFS_SICK_RG_SUMMARY, XFS_RTGROUP_GEOM_SICK_SUMMARY }, + { XFS_SICK_RG_RMAPBT, XFS_RTGROUP_GEOM_SICK_RMAPBT }, + { XFS_SICK_RG_REFCNTBT, XFS_RTGROUP_GEOM_SICK_REFCNTBT }, }; /* Fill out rtgroup geometry health info. */ diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index c8ad2606f928..b1f9f156ec88 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1404,8 +1404,11 @@ xfs_inactive( goto out; /* Try to clean out the cow blocks if there are any. */ - if (xfs_inode_has_cow_data(ip)) - xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true); + if (xfs_inode_has_cow_data(ip)) { + error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true); + if (error) + goto out; + } if (VFS_I(ip)->i_nlink != 0) { /* @@ -2382,7 +2385,16 @@ xfs_iflush( __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip); goto flush_out; } - if (S_ISREG(VFS_I(ip)->i_mode)) { + if (ip->i_df.if_format == XFS_DINODE_FMT_META_BTREE) { + if (!S_ISREG(VFS_I(ip)->i_mode) || + !(ip->i_diflags2 & XFS_DIFLAG2_METADATA)) { + xfs_alert_tag(mp, XFS_PTAG_IFLUSH, + "%s: Bad %s meta btree inode %Lu, ptr "PTR_FMT, + __func__, xfs_metafile_type_str(ip->i_metatype), + ip->i_ino, ip); + goto flush_out; + } + } else if (S_ISREG(VFS_I(ip)->i_mode)) { if (XFS_TEST_ERROR( ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS && ip->i_df.if_format != XFS_DINODE_FMT_BTREE, @@ -2422,6 +2434,14 @@ xfs_iflush( goto flush_out; } + if (xfs_inode_has_attr_fork(ip) && + ip->i_af.if_format == XFS_DINODE_FMT_META_BTREE) { + xfs_alert_tag(mp, XFS_PTAG_IFLUSH, + "%s: meta btree in inode %Lu attr fork, ptr "PTR_FMT, + __func__, ip->i_ino, ip); + goto flush_out; + } + /* * Inode item log recovery for v2 inodes are dependent on the flushiter * count for correct sequencing. We bump the flush iteration count so diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 1648dc5a8068..c08093a65352 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -25,9 +25,19 @@ struct xfs_dquot; typedef struct xfs_inode { /* Inode linking and identification information. */ struct xfs_mount *i_mount; /* fs mount struct ptr */ - struct xfs_dquot *i_udquot; /* user dquot */ - struct xfs_dquot *i_gdquot; /* group dquot */ - struct xfs_dquot *i_pdquot; /* project dquot */ + union { + struct { + struct xfs_dquot *i_udquot; /* user dquot */ + struct xfs_dquot *i_gdquot; /* group dquot */ + struct xfs_dquot *i_pdquot; /* project dquot */ + }; + + /* + * Space that has been set aside to accomodate expansions of a + * metadata btree rooted in this file. + */ + uint64_t i_meta_resv_asked; + }; /* Inode location stuff */ xfs_ino_t i_ino; /* inode number (agno/agino)*/ diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 912f0b1bc3cb..35803fcf0beb 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -157,6 +157,20 @@ xfs_inode_item_precommit( if (flags & XFS_ILOG_IVERSION) flags = ((flags & ~XFS_ILOG_IVERSION) | XFS_ILOG_CORE); + /* + * Inode verifiers do not check that the CoW extent size hint is an + * integer multiple of the rt extent size on a directory with both + * rtinherit and cowextsize flags set. If we're logging a directory + * that is misconfigured in this way, clear the hint. + */ + if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) && + (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) && + xfs_extlen_to_rtxmod(ip->i_mount, ip->i_cowextsize) > 0) { + ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE; + ip->i_cowextsize = 0; + flags |= XFS_ILOG_CORE; + } + if (!iip->ili_item.li_buf) { struct xfs_buf *bp; int error; @@ -185,7 +199,7 @@ xfs_inode_item_precommit( xfs_buf_hold(bp); spin_lock(&iip->ili_lock); iip->ili_item.li_buf = bp; - bp->b_flags |= _XBF_INODES; + bp->b_iodone = xfs_buf_inode_iodone; list_add_tail(&iip->ili_item.li_bio_list, &bp->b_li_list); xfs_trans_brelse(tp, bp); } @@ -242,6 +256,7 @@ xfs_inode_item_data_fork_size( } break; case XFS_DINODE_FMT_BTREE: + case XFS_DINODE_FMT_META_BTREE: if ((iip->ili_fields & XFS_ILOG_DBROOT) && ip->i_df.if_broot_bytes > 0) { *nbytes += ip->i_df.if_broot_bytes; @@ -362,6 +377,7 @@ xfs_inode_item_format_data_fork( } break; case XFS_DINODE_FMT_BTREE: + case XFS_DINODE_FMT_META_BTREE: iip->ili_fields &= ~(XFS_ILOG_DDATA | XFS_ILOG_DEXT | XFS_ILOG_DEV); @@ -1023,18 +1039,6 @@ xfs_buf_inode_iodone( list_splice_tail(&flushed_inodes, &bp->b_li_list); } -void -xfs_buf_inode_io_fail( - struct xfs_buf *bp) -{ - struct xfs_log_item *lip; - - list_for_each_entry(lip, &bp->b_li_list, li_bio_list) { - set_bit(XFS_LI_FAILED, &lip->li_flags); - clear_bit(XFS_LI_FLUSHING, &lip->li_flags); - } -} - /* * Clear the inode logging fields so no more flushes are attempted. If we are * on a buffer list, it is now safe to remove it because the buffer is diff --git a/fs/xfs/xfs_inode_item_recover.c b/fs/xfs/xfs_inode_item_recover.c index e70d2611456b..f3bfb814378c 100644 --- a/fs/xfs/xfs_inode_item_recover.c +++ b/fs/xfs/xfs_inode_item_recover.c @@ -22,6 +22,8 @@ #include "xfs_log_recover.h" #include "xfs_icache.h" #include "xfs_bmap_btree.h" +#include "xfs_rtrmap_btree.h" +#include "xfs_rtrefcount_btree.h" STATIC void xlog_recover_inode_ra_pass2( @@ -266,6 +268,41 @@ xlog_dinode_verify_extent_counts( return 0; } +static inline int +xlog_recover_inode_dbroot( + struct xfs_mount *mp, + void *src, + unsigned int len, + struct xfs_dinode *dip) +{ + void *dfork = XFS_DFORK_DPTR(dip); + unsigned int dsize = XFS_DFORK_DSIZE(dip, mp); + + switch (dip->di_format) { + case XFS_DINODE_FMT_BTREE: + xfs_bmbt_to_bmdr(mp, src, len, dfork, dsize); + break; + case XFS_DINODE_FMT_META_BTREE: + switch (be16_to_cpu(dip->di_metatype)) { + case XFS_METAFILE_RTRMAP: + xfs_rtrmapbt_to_disk(mp, src, len, dfork, dsize); + return 0; + case XFS_METAFILE_RTREFCOUNT: + xfs_rtrefcountbt_to_disk(mp, src, len, dfork, dsize); + return 0; + default: + ASSERT(0); + return -EFSCORRUPTED; + } + break; + default: + ASSERT(0); + return -EFSCORRUPTED; + } + + return 0; +} + STATIC int xlog_recover_inode_commit_pass2( struct xlog *log, @@ -393,8 +430,9 @@ xlog_recover_inode_commit_pass2( if (unlikely(S_ISREG(ldip->di_mode))) { - if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) && - (ldip->di_format != XFS_DINODE_FMT_BTREE)) { + if (ldip->di_format != XFS_DINODE_FMT_EXTENTS && + ldip->di_format != XFS_DINODE_FMT_BTREE && + ldip->di_format != XFS_DINODE_FMT_META_BTREE) { XFS_CORRUPTION_ERROR( "Bad log dinode data fork format for regular file", XFS_ERRLEVEL_LOW, mp, ldip, sizeof(*ldip)); @@ -475,9 +513,9 @@ xlog_recover_inode_commit_pass2( break; case XFS_ILOG_DBROOT: - xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len, - (struct xfs_bmdr_block *)XFS_DFORK_DPTR(dip), - XFS_DFORK_DSIZE(dip, mp)); + error = xlog_recover_inode_dbroot(mp, src, len, dip); + if (error) + goto out_release; break; default: diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 0789c18aaa18..ed85322507dd 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -469,8 +469,21 @@ xfs_fill_fsxattr( } } - if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) - fa->fsx_cowextsize = XFS_FSB_TO_B(mp, ip->i_cowextsize); + if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) { + /* + * Don't let a misaligned CoW extent size hint on a directory + * escape to userspace if it won't pass the setattr checks + * later. + */ + if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) && + ip->i_cowextsize % mp->m_sb.sb_rextsize > 0) { + fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE; + fa->fsx_cowextsize = 0; + } else { + fa->fsx_cowextsize = XFS_FSB_TO_B(mp, ip->i_cowextsize); + } + } + fa->fsx_projid = ip->i_projid; if (ifp && !xfs_need_iread_extents(ifp)) fa->fsx_nextents = xfs_iext_count(ifp); @@ -541,10 +554,6 @@ xfs_ioctl_setattr_xflags( if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 || xfs_extlen_to_rtxmod(mp, ip->i_extsize)) return -EINVAL; - - /* Clear reflink if we are actually able to set the rt flag. */ - if (xfs_is_reflink_inode(ip)) - ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK; } /* diflags2 only valid for v3 inodes. */ @@ -1204,7 +1213,16 @@ xfs_file_ioctl( struct xfs_buftarg *target = xfs_inode_buftarg(ip); struct dioattr da; - da.d_mem = da.d_miniosz = target->bt_logical_sectorsize; + da.d_mem = target->bt_logical_sectorsize; + + /* + * See xfs_report_dioalign() for an explanation about why this + * reports a value larger than the sector size for COW inodes. + */ + if (xfs_is_cow_inode(ip)) + da.d_miniosz = xfs_inode_alloc_unitsize(ip); + else + da.d_miniosz = target->bt_logical_sectorsize; da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1); if (copy_to_user(arg, &da, sizeof(da))) diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 50fa3ef89f6c..d61460309a78 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -976,10 +976,8 @@ xfs_dax_write_iomap_end( if (!xfs_is_cow_inode(ip)) return 0; - if (!written) { - xfs_reflink_cancel_cow_range(ip, pos, length, true); - return 0; - } + if (!written) + return xfs_reflink_cancel_cow_range(ip, pos, length, true); return xfs_reflink_end_cow(ip, pos, written); } diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 207e0dadffc3..40289fe6f5b2 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -573,17 +573,43 @@ xfs_stat_blksize( } static void -xfs_get_atomic_write_attr( +xfs_report_dioalign( struct xfs_inode *ip, - unsigned int *unit_min, - unsigned int *unit_max) + struct kstat *stat) { - if (!xfs_inode_can_atomicwrite(ip)) { - *unit_min = *unit_max = 0; - return; - } + struct xfs_buftarg *target = xfs_inode_buftarg(ip); + struct block_device *bdev = target->bt_bdev; + + stat->result_mask |= STATX_DIOALIGN | STATX_DIO_READ_ALIGN; + stat->dio_mem_align = bdev_dma_alignment(bdev) + 1; - *unit_min = *unit_max = ip->i_mount->m_sb.sb_blocksize; + /* + * For COW inodes, we can only perform out of place writes of entire + * allocation units (blocks or RT extents). + * For writes smaller than the allocation unit, we must fall back to + * buffered I/O to perform read-modify-write cycles. At best this is + * highly inefficient; at worst it leads to page cache invalidation + * races. Tell applications to avoid this by reporting the larger write + * alignment in dio_offset_align, and the smaller read alignment in + * dio_read_offset_align. + */ + stat->dio_read_offset_align = bdev_logical_block_size(bdev); + if (xfs_is_cow_inode(ip)) + stat->dio_offset_align = xfs_inode_alloc_unitsize(ip); + else + stat->dio_offset_align = stat->dio_read_offset_align; +} + +static void +xfs_report_atomic_write( + struct xfs_inode *ip, + struct kstat *stat) +{ + unsigned int unit_min = 0, unit_max = 0; + + if (xfs_inode_can_atomicwrite(ip)) + unit_min = unit_max = ip->i_mount->m_sb.sb_blocksize; + generic_fill_statx_atomic_writes(stat, unit_min, unit_max); } STATIC int @@ -647,22 +673,10 @@ xfs_vn_getattr( stat->rdev = inode->i_rdev; break; case S_IFREG: - if (request_mask & STATX_DIOALIGN) { - struct xfs_buftarg *target = xfs_inode_buftarg(ip); - struct block_device *bdev = target->bt_bdev; - - stat->result_mask |= STATX_DIOALIGN; - stat->dio_mem_align = bdev_dma_alignment(bdev) + 1; - stat->dio_offset_align = bdev_logical_block_size(bdev); - } - if (request_mask & STATX_WRITE_ATOMIC) { - unsigned int unit_min, unit_max; - - xfs_get_atomic_write_attr(ip, &unit_min, - &unit_max); - generic_fill_statx_atomic_writes(stat, - unit_min, unit_max); - } + if (request_mask & (STATX_DIOALIGN | STATX_DIO_READ_ALIGN)) + xfs_report_dioalign(ip, stat); + if (request_mask & STATX_WRITE_ATOMIC) + xfs_report_atomic_write(ip, stat); fallthrough; default: stat->blksize = xfs_stat_blksize(ip); diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 05daad8a8d34..f8851ff835de 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -2744,8 +2744,6 @@ xfs_log_ticket_regrant( if (!ticket->t_cnt) { xlog_grant_add_space(&log->l_reserve_head, ticket->t_unit_res); trace_xfs_log_ticket_regrant_exit(log, ticket); - - ticket->t_curr_res = ticket->t_unit_res; } xfs_log_ticket_put(ticket); diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index 0af3d477197b..b3c27dbccce8 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -1820,6 +1820,10 @@ static const struct xlog_recover_item_ops *xlog_recover_item_ops[] = { &xlog_xmd_item_ops, &xlog_rtefi_item_ops, &xlog_rtefd_item_ops, + &xlog_rtrui_item_ops, + &xlog_rtrud_item_ops, + &xlog_rtcui_item_ops, + &xlog_rtcud_item_ops, }; static const struct xlog_recover_item_ops * diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 5918f433dba7..477c5262cf91 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -37,6 +37,8 @@ #include "xfs_rtbitmap.h" #include "xfs_metafile.h" #include "xfs_rtgroup.h" +#include "xfs_rtrmap_btree.h" +#include "xfs_rtrefcount_btree.h" #include "scrub/stats.h" static DEFINE_MUTEX(xfs_uuid_table_mutex); @@ -650,6 +652,15 @@ xfs_agbtree_compute_maxlevels( mp->m_agbtree_maxlevels = max(levels, mp->m_refc_maxlevels); } +/* Compute maximum possible height for realtime btree types for this fs. */ +static inline void +xfs_rtbtree_compute_maxlevels( + struct xfs_mount *mp) +{ + mp->m_rtbtree_maxlevels = max(mp->m_rtrmap_maxlevels, + mp->m_rtrefc_maxlevels); +} + /* * This function does the following on an initial mount of a file system: * - reads the superblock from disk and init the mount struct @@ -718,9 +729,12 @@ xfs_mountfs( xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK); xfs_mount_setup_inode_geom(mp); xfs_rmapbt_compute_maxlevels(mp); + xfs_rtrmapbt_compute_maxlevels(mp); xfs_refcountbt_compute_maxlevels(mp); + xfs_rtrefcountbt_compute_maxlevels(mp); xfs_agbtree_compute_maxlevels(mp); + xfs_rtbtree_compute_maxlevels(mp); /* * Check if sb_agblocks is aligned at stripe boundary. If sb_agblocks diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index db9dade7d22a..fbed172d6770 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -158,13 +158,20 @@ typedef struct xfs_mount { uint m_bmap_dmnr[2]; /* min bmap btree records */ uint m_rmap_mxr[2]; /* max rmap btree records */ uint m_rmap_mnr[2]; /* min rmap btree records */ + uint m_rtrmap_mxr[2]; /* max rtrmap btree records */ + uint m_rtrmap_mnr[2]; /* min rtrmap btree records */ uint m_refc_mxr[2]; /* max refc btree records */ uint m_refc_mnr[2]; /* min refc btree records */ + uint m_rtrefc_mxr[2]; /* max rtrefc btree records */ + uint m_rtrefc_mnr[2]; /* min rtrefc btree records */ uint m_alloc_maxlevels; /* max alloc btree levels */ uint m_bm_maxlevels[2]; /* max bmap btree levels */ uint m_rmap_maxlevels; /* max rmap btree levels */ + uint m_rtrmap_maxlevels; /* max rtrmap btree level */ uint m_refc_maxlevels; /* max refcount btree level */ + uint m_rtrefc_maxlevels; /* max rtrefc btree level */ unsigned int m_agbtree_maxlevels; /* max level of all AG btrees */ + unsigned int m_rtbtree_maxlevels; /* max level of all rt btrees */ xfs_extlen_t m_ag_prealloc_blocks; /* reserved ag blocks */ uint m_alloc_set_aside; /* space we can't use */ uint m_ag_max_usable; /* max space per AG */ @@ -350,7 +357,7 @@ typedef struct xfs_mount { #define XFS_FEAT_NOUUID (1ULL << 63) /* ignore uuid during mount */ #define __XFS_HAS_FEAT(name, NAME) \ -static inline bool xfs_has_ ## name (struct xfs_mount *mp) \ +static inline bool xfs_has_ ## name (const struct xfs_mount *mp) \ { \ return mp->m_features & XFS_FEAT_ ## NAME; \ } @@ -386,18 +393,30 @@ __XFS_HAS_FEAT(large_extent_counts, NREXT64) __XFS_HAS_FEAT(exchange_range, EXCHANGE_RANGE) __XFS_HAS_FEAT(metadir, METADIR) -static inline bool xfs_has_rtgroups(struct xfs_mount *mp) +static inline bool xfs_has_rtgroups(const struct xfs_mount *mp) { /* all metadir file systems also allow rtgroups */ return xfs_has_metadir(mp); } -static inline bool xfs_has_rtsb(struct xfs_mount *mp) +static inline bool xfs_has_rtsb(const struct xfs_mount *mp) { /* all rtgroups filesystems with an rt section have an rtsb */ return xfs_has_rtgroups(mp) && xfs_has_realtime(mp); } +static inline bool xfs_has_rtrmapbt(const struct xfs_mount *mp) +{ + return xfs_has_rtgroups(mp) && xfs_has_realtime(mp) && + xfs_has_rmapbt(mp); +} + +static inline bool xfs_has_rtreflink(const struct xfs_mount *mp) +{ + return xfs_has_metadir(mp) && xfs_has_realtime(mp) && + xfs_has_reflink(mp); +} + /* * Some features are always on for v5 file systems, allow the compiler to * eliminiate dead code when building without v4 support. diff --git a/fs/xfs/xfs_notify_failure.c b/fs/xfs/xfs_notify_failure.c index fa50e5308292..ed8d8ed42f0a 100644 --- a/fs/xfs/xfs_notify_failure.c +++ b/fs/xfs/xfs_notify_failure.c @@ -19,6 +19,9 @@ #include "xfs_rtalloc.h" #include "xfs_trans.h" #include "xfs_ag.h" +#include "xfs_notify_failure.h" +#include "xfs_rtgroup.h" +#include "xfs_rtrmap_btree.h" #include <linux/mm.h> #include <linux/dax.h> @@ -154,23 +157,115 @@ xfs_dax_notify_failure_thaw( } static int -xfs_dax_notify_ddev_failure( +xfs_dax_translate_range( + struct xfs_buftarg *btp, + u64 offset, + u64 len, + xfs_daddr_t *daddr, + uint64_t *bblen) +{ + u64 dev_start = btp->bt_dax_part_off; + u64 dev_len = bdev_nr_bytes(btp->bt_bdev); + u64 dev_end = dev_start + dev_len - 1; + + /* Notify failure on the whole device. */ + if (offset == 0 && len == U64_MAX) { + offset = dev_start; + len = dev_len; + } + + /* Ignore the range out of filesystem area */ + if (offset + len - 1 < dev_start) + return -ENXIO; + if (offset > dev_end) + return -ENXIO; + + /* Calculate the real range when it touches the boundary */ + if (offset > dev_start) + offset -= dev_start; + else { + len -= dev_start - offset; + offset = 0; + } + if (offset + len - 1 > dev_end) + len = dev_end - offset + 1; + + *daddr = BTOBB(offset); + *bblen = BTOBB(len); + return 0; +} + +static int +xfs_dax_notify_logdev_failure( struct xfs_mount *mp, - xfs_daddr_t daddr, - xfs_daddr_t bblen, + u64 offset, + u64 len, int mf_flags) { + xfs_daddr_t daddr; + uint64_t bblen; + int error; + + /* + * Return ENXIO instead of shutting down the filesystem if the failed + * region is beyond the end of the log. + */ + error = xfs_dax_translate_range(mp->m_logdev_targp, + offset, len, &daddr, &bblen); + if (error) + return error; + + /* + * In the pre-remove case the failure notification is attempting to + * trigger a force unmount. The expectation is that the device is + * still present, but its removal is in progress and can not be + * cancelled, proceed with accessing the log device. + */ + if (mf_flags & MF_MEM_PRE_REMOVE) + return 0; + + xfs_err(mp, "ondisk log corrupt, shutting down fs!"); + xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_ONDISK); + return -EFSCORRUPTED; +} + +static int +xfs_dax_notify_dev_failure( + struct xfs_mount *mp, + u64 offset, + u64 len, + int mf_flags, + enum xfs_group_type type) +{ struct xfs_failure_info notify = { .mf_flags = mf_flags }; struct xfs_trans *tp = NULL; struct xfs_btree_cur *cur = NULL; - struct xfs_buf *agf_bp = NULL; int error = 0; bool kernel_frozen = false; - xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, daddr); - xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, fsbno); - xfs_fsblock_t end_fsbno = XFS_DADDR_TO_FSB(mp, - daddr + bblen - 1); - xfs_agnumber_t end_agno = XFS_FSB_TO_AGNO(mp, end_fsbno); + uint32_t start_gno, end_gno; + xfs_fsblock_t start_bno, end_bno; + xfs_daddr_t daddr; + uint64_t bblen; + struct xfs_group *xg = NULL; + + if (!xfs_has_rmapbt(mp)) { + xfs_debug(mp, "notify_failure() needs rmapbt enabled!"); + return -EOPNOTSUPP; + } + + error = xfs_dax_translate_range(type == XG_TYPE_RTG ? + mp->m_rtdev_targp : mp->m_ddev_targp, + offset, len, &daddr, &bblen); + if (error) + return error; + + if (type == XG_TYPE_RTG) { + start_bno = xfs_daddr_to_rtb(mp, daddr); + end_bno = xfs_daddr_to_rtb(mp, daddr + bblen - 1); + } else { + start_bno = XFS_DADDR_TO_FSB(mp, daddr); + end_bno = XFS_DADDR_TO_FSB(mp, daddr + bblen - 1); + } if (mf_flags & MF_MEM_PRE_REMOVE) { xfs_info(mp, "Device is about to be removed!"); @@ -189,46 +284,58 @@ xfs_dax_notify_ddev_failure( if (error) goto out; - for (; agno <= end_agno; agno++) { + start_gno = xfs_fsb_to_gno(mp, start_bno, type); + end_gno = xfs_fsb_to_gno(mp, end_bno, type); + while ((xg = xfs_group_next_range(mp, xg, start_gno, end_gno, type))) { + struct xfs_buf *agf_bp = NULL; + struct xfs_rtgroup *rtg = NULL; struct xfs_rmap_irec ri_low = { }; struct xfs_rmap_irec ri_high; - struct xfs_agf *agf; - struct xfs_perag *pag; - xfs_agblock_t range_agend; - pag = xfs_perag_get(mp, agno); - error = xfs_alloc_read_agf(pag, tp, 0, &agf_bp); - if (error) { - xfs_perag_put(pag); - break; - } + if (type == XG_TYPE_AG) { + struct xfs_perag *pag = to_perag(xg); - cur = xfs_rmapbt_init_cursor(mp, tp, agf_bp, pag); + error = xfs_alloc_read_agf(pag, tp, 0, &agf_bp); + if (error) { + xfs_perag_put(pag); + break; + } + + cur = xfs_rmapbt_init_cursor(mp, tp, agf_bp, pag); + } else { + rtg = to_rtg(xg); + xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP); + cur = xfs_rtrmapbt_init_cursor(tp, rtg); + } /* * Set the rmap range from ri_low to ri_high, which represents * a [start, end] where we looking for the files or metadata. */ memset(&ri_high, 0xFF, sizeof(ri_high)); - ri_low.rm_startblock = XFS_FSB_TO_AGBNO(mp, fsbno); - if (agno == end_agno) - ri_high.rm_startblock = XFS_FSB_TO_AGBNO(mp, end_fsbno); + if (xg->xg_gno == start_gno) + ri_low.rm_startblock = + xfs_fsb_to_gbno(mp, start_bno, type); + if (xg->xg_gno == end_gno) + ri_high.rm_startblock = + xfs_fsb_to_gbno(mp, end_bno, type); - agf = agf_bp->b_addr; - range_agend = min(be32_to_cpu(agf->agf_length) - 1, - ri_high.rm_startblock); notify.startblock = ri_low.rm_startblock; - notify.blockcount = range_agend + 1 - ri_low.rm_startblock; + notify.blockcount = min(xg->xg_block_count, + ri_high.rm_startblock + 1) - + ri_low.rm_startblock; error = xfs_rmap_query_range(cur, &ri_low, &ri_high, xfs_dax_failure_fn, ¬ify); xfs_btree_del_cursor(cur, error); - xfs_trans_brelse(tp, agf_bp); - xfs_perag_put(pag); - if (error) + if (agf_bp) + xfs_trans_brelse(tp, agf_bp); + if (rtg) + xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP); + if (error) { + xfs_group_put(xg); break; - - fsbno = XFS_AGB_TO_FSB(mp, agno + 1, 0); + } } xfs_trans_cancel(tp); @@ -263,67 +370,20 @@ xfs_dax_notify_failure( int mf_flags) { struct xfs_mount *mp = dax_holder(dax_dev); - u64 ddev_start; - u64 ddev_end; if (!(mp->m_super->s_flags & SB_BORN)) { xfs_warn(mp, "filesystem is not ready for notify_failure()!"); return -EIO; } - if (mp->m_rtdev_targp && mp->m_rtdev_targp->bt_daxdev == dax_dev) { - xfs_debug(mp, - "notify_failure() not supported on realtime device!"); - return -EOPNOTSUPP; - } - - if (mp->m_logdev_targp && mp->m_logdev_targp->bt_daxdev == dax_dev && - mp->m_logdev_targp != mp->m_ddev_targp) { - /* - * In the pre-remove case the failure notification is attempting - * to trigger a force unmount. The expectation is that the - * device is still present, but its removal is in progress and - * can not be cancelled, proceed with accessing the log device. - */ - if (mf_flags & MF_MEM_PRE_REMOVE) - return 0; - xfs_err(mp, "ondisk log corrupt, shutting down fs!"); - xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_ONDISK); - return -EFSCORRUPTED; - } - - if (!xfs_has_rmapbt(mp)) { - xfs_debug(mp, "notify_failure() needs rmapbt enabled!"); - return -EOPNOTSUPP; - } - - ddev_start = mp->m_ddev_targp->bt_dax_part_off; - ddev_end = ddev_start + bdev_nr_bytes(mp->m_ddev_targp->bt_bdev) - 1; - - /* Notify failure on the whole device. */ - if (offset == 0 && len == U64_MAX) { - offset = ddev_start; - len = bdev_nr_bytes(mp->m_ddev_targp->bt_bdev); - } - - /* Ignore the range out of filesystem area */ - if (offset + len - 1 < ddev_start) - return -ENXIO; - if (offset > ddev_end) - return -ENXIO; - - /* Calculate the real range when it touches the boundary */ - if (offset > ddev_start) - offset -= ddev_start; - else { - len -= ddev_start - offset; - offset = 0; + if (mp->m_logdev_targp != mp->m_ddev_targp && + mp->m_logdev_targp->bt_daxdev == dax_dev) { + return xfs_dax_notify_logdev_failure(mp, offset, len, mf_flags); } - if (offset + len - 1 > ddev_end) - len = ddev_end - offset + 1; - return xfs_dax_notify_ddev_failure(mp, BTOBB(offset), BTOBB(len), - mf_flags); + return xfs_dax_notify_dev_failure(mp, offset, len, mf_flags, + (mp->m_rtdev_targp && mp->m_rtdev_targp->bt_daxdev == dax_dev) ? + XG_TYPE_RTG : XG_TYPE_AG); } const struct dax_holder_operations xfs_dax_holder_operations = { diff --git a/fs/xfs/xfs_notify_failure.h b/fs/xfs/xfs_notify_failure.h new file mode 100644 index 000000000000..8d08ec29dd29 --- /dev/null +++ b/fs/xfs/xfs_notify_failure.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2024 Oracle. All Rights Reserved. + * Author: Darrick J. Wong <djwong@kernel.org> + */ +#ifndef __XFS_NOTIFY_FAILURE_H__ +#define __XFS_NOTIFY_FAILURE_H__ + +extern const struct dax_holder_operations xfs_dax_holder_operations; + +#endif /* __XFS_NOTIFY_FAILURE_H__ */ diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index dc8b1010d4d3..e1ba5af6250f 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -230,10 +230,10 @@ xfs_qm_unmount_rt( if (!rtg) return; - if (rtg->rtg_inodes[XFS_RTGI_BITMAP]) - xfs_qm_dqdetach(rtg->rtg_inodes[XFS_RTGI_BITMAP]); - if (rtg->rtg_inodes[XFS_RTGI_SUMMARY]) - xfs_qm_dqdetach(rtg->rtg_inodes[XFS_RTGI_SUMMARY]); + if (rtg_bitmap(rtg)) + xfs_qm_dqdetach(rtg_bitmap(rtg)); + if (rtg_summary(rtg)) + xfs_qm_dqdetach(rtg_summary(rtg)); xfs_rtgroup_rele(rtg); } @@ -428,6 +428,8 @@ void xfs_qm_dqdetach( xfs_inode_t *ip) { + if (xfs_is_metadir_inode(ip)) + return; if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot)) return; diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c index 847ba29630e9..37f1230e7584 100644 --- a/fs/xfs/xfs_qm_bhv.c +++ b/fs/xfs/xfs_qm_bhv.c @@ -32,21 +32,27 @@ xfs_fill_statvfs_from_dquot( limit = blkres->softlimit ? blkres->softlimit : blkres->hardlimit; - if (limit && statp->f_blocks > limit) { - statp->f_blocks = limit; - statp->f_bfree = statp->f_bavail = - (statp->f_blocks > blkres->reserved) ? - (statp->f_blocks - blkres->reserved) : 0; + if (limit) { + uint64_t remaining = 0; + + if (limit > blkres->reserved) + remaining = limit - blkres->reserved; + + statp->f_blocks = min(statp->f_blocks, limit); + statp->f_bfree = min(statp->f_bfree, remaining); } limit = dqp->q_ino.softlimit ? dqp->q_ino.softlimit : dqp->q_ino.hardlimit; - if (limit && statp->f_files > limit) { - statp->f_files = limit; - statp->f_ffree = - (statp->f_files > dqp->q_ino.reserved) ? - (statp->f_files - dqp->q_ino.reserved) : 0; + if (limit) { + uint64_t remaining = 0; + + if (limit > dqp->q_ino.reserved) + remaining = limit - dqp->q_ino.reserved; + + statp->f_files = min(statp->f_files, limit); + statp->f_ffree = min(statp->f_ffree, remaining); } } diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h index d7565462af3d..105e6eb57620 100644 --- a/fs/xfs/xfs_quota.h +++ b/fs/xfs/xfs_quota.h @@ -29,11 +29,6 @@ struct xfs_buf; (XFS_IS_GQUOTA_ON(mp) && (ip)->i_gdquot == NULL) || \ (XFS_IS_PQUOTA_ON(mp) && (ip)->i_pdquot == NULL)) -#define XFS_IS_DQDETACHED(ip) \ - ((ip)->i_udquot == NULL && \ - (ip)->i_gdquot == NULL && \ - (ip)->i_pdquot == NULL) - #define XFS_QM_NEED_QUOTACHECK(mp) \ ((XFS_IS_UQUOTA_ON(mp) && \ (mp->m_sb.sb_qflags & XFS_UQUOTA_CHKD) == 0) || \ diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c index bede1c96c330..fe2d7aab8554 100644 --- a/fs/xfs/xfs_refcount_item.c +++ b/fs/xfs/xfs_refcount_item.c @@ -23,6 +23,7 @@ #include "xfs_ag.h" #include "xfs_btree.h" #include "xfs_trace.h" +#include "xfs_rtgroup.h" struct kmem_cache *xfs_cui_cache; struct kmem_cache *xfs_cud_cache; @@ -94,8 +95,9 @@ xfs_cui_item_format( ASSERT(atomic_read(&cuip->cui_next_extent) == cuip->cui_format.cui_nextents); + ASSERT(lip->li_type == XFS_LI_CUI || lip->li_type == XFS_LI_CUI_RT); - cuip->cui_format.cui_type = XFS_LI_CUI; + cuip->cui_format.cui_type = lip->li_type; cuip->cui_format.cui_size = 1; xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUI_FORMAT, &cuip->cui_format, @@ -138,12 +140,14 @@ xfs_cui_item_release( STATIC struct xfs_cui_log_item * xfs_cui_init( struct xfs_mount *mp, + unsigned short item_type, uint nextents) - { struct xfs_cui_log_item *cuip; ASSERT(nextents > 0); + ASSERT(item_type == XFS_LI_CUI || item_type == XFS_LI_CUI_RT); + if (nextents > XFS_CUI_MAX_FAST_EXTENTS) cuip = kzalloc(xfs_cui_log_item_sizeof(nextents), GFP_KERNEL | __GFP_NOFAIL); @@ -151,7 +155,7 @@ xfs_cui_init( cuip = kmem_cache_zalloc(xfs_cui_cache, GFP_KERNEL | __GFP_NOFAIL); - xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops); + xfs_log_item_init(mp, &cuip->cui_item, item_type, &xfs_cui_item_ops); cuip->cui_format.cui_nextents = nextents; cuip->cui_format.cui_id = (uintptr_t)(void *)cuip; atomic_set(&cuip->cui_next_extent, 0); @@ -190,7 +194,9 @@ xfs_cud_item_format( struct xfs_cud_log_item *cudp = CUD_ITEM(lip); struct xfs_log_iovec *vecp = NULL; - cudp->cud_format.cud_type = XFS_LI_CUD; + ASSERT(lip->li_type == XFS_LI_CUD || lip->li_type == XFS_LI_CUD_RT); + + cudp->cud_format.cud_type = lip->li_type; cudp->cud_format.cud_size = 1; xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUD_FORMAT, &cudp->cud_format, @@ -234,6 +240,14 @@ static inline struct xfs_refcount_intent *ci_entry(const struct list_head *e) return list_entry(e, struct xfs_refcount_intent, ri_list); } +static inline bool +xfs_cui_item_isrt(const struct xfs_log_item *lip) +{ + ASSERT(lip->li_type == XFS_LI_CUI || lip->li_type == XFS_LI_CUI_RT); + + return lip->li_type == XFS_LI_CUI_RT; +} + /* Sort refcount intents by AG. */ static int xfs_refcount_update_diff_items( @@ -282,18 +296,20 @@ xfs_refcount_update_log_item( } static struct xfs_log_item * -xfs_refcount_update_create_intent( +__xfs_refcount_update_create_intent( struct xfs_trans *tp, struct list_head *items, unsigned int count, - bool sort) + bool sort, + unsigned short item_type) { struct xfs_mount *mp = tp->t_mountp; - struct xfs_cui_log_item *cuip = xfs_cui_init(mp, count); + struct xfs_cui_log_item *cuip; struct xfs_refcount_intent *ri; ASSERT(count > 0); + cuip = xfs_cui_init(mp, item_type, count); if (sort) list_sort(mp, items, xfs_refcount_update_diff_items); list_for_each_entry(ri, items, ri_list) @@ -301,6 +317,23 @@ xfs_refcount_update_create_intent( return &cuip->cui_item; } +static struct xfs_log_item * +xfs_refcount_update_create_intent( + struct xfs_trans *tp, + struct list_head *items, + unsigned int count, + bool sort) +{ + return __xfs_refcount_update_create_intent(tp, items, count, sort, + XFS_LI_CUI); +} + +static inline unsigned short +xfs_cud_type_from_cui(const struct xfs_cui_log_item *cuip) +{ + return xfs_cui_item_isrt(&cuip->cui_item) ? XFS_LI_CUD_RT : XFS_LI_CUD; +} + /* Get an CUD so we can process all the deferred refcount updates. */ static struct xfs_log_item * xfs_refcount_update_create_done( @@ -312,8 +345,8 @@ xfs_refcount_update_create_done( struct xfs_cud_log_item *cudp; cudp = kmem_cache_zalloc(xfs_cud_cache, GFP_KERNEL | __GFP_NOFAIL); - xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD, - &xfs_cud_item_ops); + xfs_log_item_init(tp->t_mountp, &cudp->cud_item, + xfs_cud_type_from_cui(cuip), &xfs_cud_item_ops); cudp->cud_cuip = cuip; cudp->cud_format.cud_cui_id = cuip->cui_format.cui_id; @@ -328,10 +361,20 @@ xfs_refcount_defer_add( { struct xfs_mount *mp = tp->t_mountp; - trace_xfs_refcount_defer(mp, ri); + /* + * Deferred refcount updates for the realtime and data sections must + * use separate transactions to finish deferred work because updates to + * realtime metadata files can lock AGFs to allocate btree blocks and + * we don't want that mixing with the AGF locks taken to finish data + * section updates. + */ + ri->ri_group = xfs_group_intent_get(mp, ri->ri_startblock, + ri->ri_realtime ? XG_TYPE_RTG : XG_TYPE_AG); - ri->ri_group = xfs_group_intent_get(mp, ri->ri_startblock, XG_TYPE_AG); - xfs_defer_add(tp, &ri->ri_list, &xfs_refcount_update_defer_type); + trace_xfs_refcount_defer(mp, ri); + xfs_defer_add(tp, &ri->ri_list, ri->ri_realtime ? + &xfs_rtrefcount_update_defer_type : + &xfs_refcount_update_defer_type); } /* Cancel a deferred refcount update. */ @@ -381,7 +424,7 @@ xfs_refcount_finish_one_cleanup( return; agbp = rcur->bc_ag.agbp; xfs_btree_del_cursor(rcur, error); - if (error) + if (error && agbp) xfs_trans_brelse(tp, agbp); } @@ -397,6 +440,7 @@ xfs_refcount_update_abort_intent( static inline bool xfs_cui_validate_phys( struct xfs_mount *mp, + bool isrt, struct xfs_phys_extent *pmap) { if (!xfs_has_reflink(mp)) @@ -415,6 +459,9 @@ xfs_cui_validate_phys( return false; } + if (isrt) + return xfs_verify_rtbext(mp, pmap->pe_startblock, pmap->pe_len); + return xfs_verify_fsbext(mp, pmap->pe_startblock, pmap->pe_len); } @@ -422,6 +469,7 @@ static inline void xfs_cui_recover_work( struct xfs_mount *mp, struct xfs_defer_pending *dfp, + bool isrt, struct xfs_phys_extent *pmap) { struct xfs_refcount_intent *ri; @@ -432,7 +480,8 @@ xfs_cui_recover_work( ri->ri_startblock = pmap->pe_startblock; ri->ri_blockcount = pmap->pe_len; ri->ri_group = xfs_group_intent_get(mp, pmap->pe_startblock, - XG_TYPE_AG); + isrt ? XG_TYPE_RTG : XG_TYPE_AG); + ri->ri_realtime = isrt; xfs_defer_add_item(dfp, &ri->ri_list); } @@ -451,6 +500,7 @@ xfs_refcount_recover_work( struct xfs_cui_log_item *cuip = CUI_ITEM(lip); struct xfs_trans *tp; struct xfs_mount *mp = lip->li_log->l_mp; + bool isrt = xfs_cui_item_isrt(lip); int i; int error = 0; @@ -460,7 +510,7 @@ xfs_refcount_recover_work( * just toss the CUI. */ for (i = 0; i < cuip->cui_format.cui_nextents; i++) { - if (!xfs_cui_validate_phys(mp, + if (!xfs_cui_validate_phys(mp, isrt, &cuip->cui_format.cui_extents[i])) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, &cuip->cui_format, @@ -468,7 +518,8 @@ xfs_refcount_recover_work( return -EFSCORRUPTED; } - xfs_cui_recover_work(mp, dfp, &cuip->cui_format.cui_extents[i]); + xfs_cui_recover_work(mp, dfp, isrt, + &cuip->cui_format.cui_extents[i]); } /* @@ -515,10 +566,13 @@ xfs_refcount_relog_intent( struct xfs_phys_extent *pmap; unsigned int count; + ASSERT(intent->li_type == XFS_LI_CUI || + intent->li_type == XFS_LI_CUI_RT); + count = CUI_ITEM(intent)->cui_format.cui_nextents; pmap = CUI_ITEM(intent)->cui_format.cui_extents; - cuip = xfs_cui_init(tp->t_mountp, count); + cuip = xfs_cui_init(tp->t_mountp, intent->li_type, count); memcpy(cuip->cui_format.cui_extents, pmap, count * sizeof(*pmap)); atomic_set(&cuip->cui_next_extent, count); @@ -538,6 +592,71 @@ const struct xfs_defer_op_type xfs_refcount_update_defer_type = { .relog_intent = xfs_refcount_relog_intent, }; +#ifdef CONFIG_XFS_RT +static struct xfs_log_item * +xfs_rtrefcount_update_create_intent( + struct xfs_trans *tp, + struct list_head *items, + unsigned int count, + bool sort) +{ + return __xfs_refcount_update_create_intent(tp, items, count, sort, + XFS_LI_CUI_RT); +} + +/* Process a deferred realtime refcount update. */ +STATIC int +xfs_rtrefcount_update_finish_item( + struct xfs_trans *tp, + struct xfs_log_item *done, + struct list_head *item, + struct xfs_btree_cur **state) +{ + struct xfs_refcount_intent *ri = ci_entry(item); + int error; + + error = xfs_rtrefcount_finish_one(tp, ri, state); + + /* Did we run out of reservation? Requeue what we didn't finish. */ + if (!error && ri->ri_blockcount > 0) { + ASSERT(ri->ri_type == XFS_REFCOUNT_INCREASE || + ri->ri_type == XFS_REFCOUNT_DECREASE); + return -EAGAIN; + } + + xfs_refcount_update_cancel_item(item); + return error; +} + +/* Clean up after calling xfs_rtrefcount_finish_one. */ +STATIC void +xfs_rtrefcount_finish_one_cleanup( + struct xfs_trans *tp, + struct xfs_btree_cur *rcur, + int error) +{ + if (rcur) + xfs_btree_del_cursor(rcur, error); +} + +const struct xfs_defer_op_type xfs_rtrefcount_update_defer_type = { + .name = "rtrefcount", + .max_items = XFS_CUI_MAX_FAST_EXTENTS, + .create_intent = xfs_rtrefcount_update_create_intent, + .abort_intent = xfs_refcount_update_abort_intent, + .create_done = xfs_refcount_update_create_done, + .finish_item = xfs_rtrefcount_update_finish_item, + .finish_cleanup = xfs_rtrefcount_finish_one_cleanup, + .cancel_item = xfs_refcount_update_cancel_item, + .recover_work = xfs_refcount_recover_work, + .relog_intent = xfs_refcount_relog_intent, +}; +#else +const struct xfs_defer_op_type xfs_rtrefcount_update_defer_type = { + .name = "rtrefcount", +}; +#endif /* CONFIG_XFS_RT */ + STATIC bool xfs_cui_item_match( struct xfs_log_item *lip, @@ -603,7 +722,7 @@ xlog_recover_cui_commit_pass2( return -EFSCORRUPTED; } - cuip = xfs_cui_init(mp, cui_formatp->cui_nextents); + cuip = xfs_cui_init(mp, ITEM_TYPE(item), cui_formatp->cui_nextents); xfs_cui_copy_format(&cuip->cui_format, cui_formatp); atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents); @@ -617,6 +736,61 @@ const struct xlog_recover_item_ops xlog_cui_item_ops = { .commit_pass2 = xlog_recover_cui_commit_pass2, }; +#ifdef CONFIG_XFS_RT +STATIC int +xlog_recover_rtcui_commit_pass2( + struct xlog *log, + struct list_head *buffer_list, + struct xlog_recover_item *item, + xfs_lsn_t lsn) +{ + struct xfs_mount *mp = log->l_mp; + struct xfs_cui_log_item *cuip; + struct xfs_cui_log_format *cui_formatp; + size_t len; + + cui_formatp = item->ri_buf[0].i_addr; + + if (item->ri_buf[0].i_len < xfs_cui_log_format_sizeof(0)) { + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, + item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + return -EFSCORRUPTED; + } + + len = xfs_cui_log_format_sizeof(cui_formatp->cui_nextents); + if (item->ri_buf[0].i_len != len) { + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, + item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + return -EFSCORRUPTED; + } + + cuip = xfs_cui_init(mp, ITEM_TYPE(item), cui_formatp->cui_nextents); + xfs_cui_copy_format(&cuip->cui_format, cui_formatp); + atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents); + + xlog_recover_intent_item(log, &cuip->cui_item, lsn, + &xfs_rtrefcount_update_defer_type); + return 0; +} +#else +STATIC int +xlog_recover_rtcui_commit_pass2( + struct xlog *log, + struct list_head *buffer_list, + struct xlog_recover_item *item, + xfs_lsn_t lsn) +{ + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, + item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + return -EFSCORRUPTED; +} +#endif + +const struct xlog_recover_item_ops xlog_rtcui_item_ops = { + .item_type = XFS_LI_CUI_RT, + .commit_pass2 = xlog_recover_rtcui_commit_pass2, +}; + /* * This routine is called when an CUD format structure is found in a committed * transaction in the log. Its purpose is to cancel the corresponding CUI if it @@ -648,3 +822,33 @@ const struct xlog_recover_item_ops xlog_cud_item_ops = { .item_type = XFS_LI_CUD, .commit_pass2 = xlog_recover_cud_commit_pass2, }; + +#ifdef CONFIG_XFS_RT +STATIC int +xlog_recover_rtcud_commit_pass2( + struct xlog *log, + struct list_head *buffer_list, + struct xlog_recover_item *item, + xfs_lsn_t lsn) +{ + struct xfs_cud_log_format *cud_formatp; + + cud_formatp = item->ri_buf[0].i_addr; + if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) { + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, + item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + return -EFSCORRUPTED; + } + + xlog_recover_release_intent(log, XFS_LI_CUI_RT, + cud_formatp->cud_cui_id); + return 0; +} +#else +# define xlog_recover_rtcud_commit_pass2 xlog_recover_rtcui_commit_pass2 +#endif + +const struct xlog_recover_item_ops xlog_rtcud_item_ops = { + .item_type = XFS_LI_CUD_RT, + .commit_pass2 = xlog_recover_rtcud_commit_pass2, +}; diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index b11769c009ef..59f7fc16eb80 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -30,6 +30,10 @@ #include "xfs_ag.h" #include "xfs_ag_resv.h" #include "xfs_health.h" +#include "xfs_rtrefcount_btree.h" +#include "xfs_rtalloc.h" +#include "xfs_rtgroup.h" +#include "xfs_metafile.h" /* * Copy on Write of Shared Blocks @@ -120,38 +124,93 @@ */ /* - * Given an AG extent, find the lowest-numbered run of shared blocks - * within that range and return the range in fbno/flen. If - * find_end_of_shared is true, return the longest contiguous extent of - * shared blocks. If there are no shared extents, fbno and flen will - * be set to NULLAGBLOCK and 0, respectively. + * Given a file mapping for the data device, find the lowest-numbered run of + * shared blocks within that mapping and return it in shared_offset/shared_len. + * The offset is relative to the start of irec. + * + * If find_end_of_shared is true, return the longest contiguous extent of shared + * blocks. If there are no shared extents, shared_offset and shared_len will be + * set to 0; */ static int xfs_reflink_find_shared( - struct xfs_perag *pag, + struct xfs_mount *mp, struct xfs_trans *tp, - xfs_agblock_t agbno, - xfs_extlen_t aglen, - xfs_agblock_t *fbno, - xfs_extlen_t *flen, + const struct xfs_bmbt_irec *irec, + xfs_extlen_t *shared_offset, + xfs_extlen_t *shared_len, bool find_end_of_shared) { struct xfs_buf *agbp; + struct xfs_perag *pag; struct xfs_btree_cur *cur; int error; + xfs_agblock_t orig_bno, found_bno; + + pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, irec->br_startblock)); + orig_bno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock); error = xfs_alloc_read_agf(pag, tp, 0, &agbp); if (error) - return error; + goto out; - cur = xfs_refcountbt_init_cursor(pag_mount(pag), tp, agbp, pag); + cur = xfs_refcountbt_init_cursor(mp, tp, agbp, pag); + error = xfs_refcount_find_shared(cur, orig_bno, irec->br_blockcount, + &found_bno, shared_len, find_end_of_shared); + xfs_btree_del_cursor(cur, error); + xfs_trans_brelse(tp, agbp); - error = xfs_refcount_find_shared(cur, agbno, aglen, fbno, flen, - find_end_of_shared); + if (!error && *shared_len) + *shared_offset = found_bno - orig_bno; +out: + xfs_perag_put(pag); + return error; +} +/* + * Given a file mapping for the rt device, find the lowest-numbered run of + * shared blocks within that mapping and return it in shared_offset/shared_len. + * The offset is relative to the start of irec. + * + * If find_end_of_shared is true, return the longest contiguous extent of shared + * blocks. If there are no shared extents, shared_offset and shared_len will be + * set to 0; + */ +static int +xfs_reflink_find_rtshared( + struct xfs_mount *mp, + struct xfs_trans *tp, + const struct xfs_bmbt_irec *irec, + xfs_extlen_t *shared_offset, + xfs_extlen_t *shared_len, + bool find_end_of_shared) +{ + struct xfs_rtgroup *rtg; + struct xfs_btree_cur *cur; + xfs_rgblock_t orig_bno; + xfs_agblock_t found_bno; + int error; + + BUILD_BUG_ON(NULLRGBLOCK != NULLAGBLOCK); + + /* + * Note: this uses the not quite correct xfs_agblock_t type because + * xfs_refcount_find_shared is shared between the RT and data device + * refcount code. + */ + orig_bno = xfs_rtb_to_rgbno(mp, irec->br_startblock); + rtg = xfs_rtgroup_get(mp, xfs_rtb_to_rgno(mp, irec->br_startblock)); + + xfs_rtgroup_lock(rtg, XFS_RTGLOCK_REFCOUNT); + cur = xfs_rtrefcountbt_init_cursor(tp, rtg); + error = xfs_refcount_find_shared(cur, orig_bno, irec->br_blockcount, + &found_bno, shared_len, find_end_of_shared); xfs_btree_del_cursor(cur, error); + xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_REFCOUNT); + xfs_rtgroup_put(rtg); - xfs_trans_brelse(tp, agbp); + if (!error && *shared_len) + *shared_offset = found_bno - orig_bno; return error; } @@ -172,11 +231,7 @@ xfs_reflink_trim_around_shared( bool *shared) { struct xfs_mount *mp = ip->i_mount; - struct xfs_perag *pag; - xfs_agblock_t agbno; - xfs_extlen_t aglen; - xfs_agblock_t fbno; - xfs_extlen_t flen; + xfs_extlen_t shared_offset, shared_len; int error = 0; /* Holes, unwritten, and delalloc extents cannot be shared */ @@ -187,41 +242,37 @@ xfs_reflink_trim_around_shared( trace_xfs_reflink_trim_around_shared(ip, irec); - pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, irec->br_startblock)); - agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock); - aglen = irec->br_blockcount; - - error = xfs_reflink_find_shared(pag, NULL, agbno, aglen, &fbno, &flen, - true); - xfs_perag_put(pag); + if (XFS_IS_REALTIME_INODE(ip)) + error = xfs_reflink_find_rtshared(mp, NULL, irec, + &shared_offset, &shared_len, true); + else + error = xfs_reflink_find_shared(mp, NULL, irec, + &shared_offset, &shared_len, true); if (error) return error; - *shared = false; - if (fbno == NULLAGBLOCK) { + if (!shared_len) { /* No shared blocks at all. */ - return 0; - } - - if (fbno == agbno) { + *shared = false; + } else if (!shared_offset) { /* - * The start of this extent is shared. Truncate the - * mapping at the end of the shared region so that a - * subsequent iteration starts at the start of the - * unshared region. + * The start of this mapping points to shared space. Truncate + * the mapping at the end of the shared region so that a + * subsequent iteration starts at the start of the unshared + * region. */ - irec->br_blockcount = flen; + irec->br_blockcount = shared_len; *shared = true; - return 0; + } else { + /* + * There's a shared region that doesn't start at the beginning + * of the mapping. Truncate the mapping at the start of the + * shared extent so that a subsequent iteration starts at the + * start of the shared region. + */ + irec->br_blockcount = shared_offset; + *shared = false; } - - /* - * There's a shared extent midway through this extent. - * Truncate the mapping at the start of the shared - * extent so that a subsequent iteration starts at the - * start of the shared region. - */ - irec->br_blockcount = fbno - agbno; return 0; } @@ -389,20 +440,26 @@ xfs_reflink_fill_cow_hole( struct xfs_mount *mp = ip->i_mount; struct xfs_trans *tp; xfs_filblks_t resaligned; - xfs_extlen_t resblks; + unsigned int dblocks = 0, rblocks = 0; int nimaps; int error; bool found; resaligned = xfs_aligned_fsb_count(imap->br_startoff, imap->br_blockcount, xfs_get_cowextsz_hint(ip)); - resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); + if (XFS_IS_REALTIME_INODE(ip)) { + dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0); + rblocks = resaligned; + } else { + dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); + rblocks = 0; + } xfs_iunlock(ip, *lockmode); *lockmode = 0; - error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0, - false, &tp); + error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, dblocks, + rblocks, false, &tp); if (error) return error; @@ -571,6 +628,7 @@ xfs_reflink_cancel_cow_blocks( struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_COW_FORK); struct xfs_bmbt_irec got, del; struct xfs_iext_cursor icur; + bool isrt = XFS_IS_REALTIME_INODE(ip); int error = 0; if (!xfs_inode_has_cow_data(ip)) @@ -598,12 +656,13 @@ xfs_reflink_cancel_cow_blocks( ASSERT((*tpp)->t_highest_agno == NULLAGNUMBER); /* Free the CoW orphan record. */ - xfs_refcount_free_cow_extent(*tpp, del.br_startblock, - del.br_blockcount); + xfs_refcount_free_cow_extent(*tpp, isrt, + del.br_startblock, del.br_blockcount); error = xfs_free_extent_later(*tpp, del.br_startblock, del.br_blockcount, NULL, - XFS_AG_RESV_NONE, 0); + XFS_AG_RESV_NONE, + isrt ? XFS_FREE_EXTENT_REALTIME : 0); if (error) break; @@ -687,6 +746,35 @@ out: return error; } +#ifdef CONFIG_XFS_QUOTA +/* + * Update quota accounting for a remapping operation. When we're remapping + * something from the CoW fork to the data fork, we must update the quota + * accounting for delayed allocations. For remapping from the data fork to the + * data fork, use regular block accounting. + */ +static inline void +xfs_reflink_update_quota( + struct xfs_trans *tp, + struct xfs_inode *ip, + bool is_cow, + int64_t blocks) +{ + unsigned int qflag; + + if (XFS_IS_REALTIME_INODE(ip)) { + qflag = is_cow ? XFS_TRANS_DQ_DELRTBCOUNT : + XFS_TRANS_DQ_RTBCOUNT; + } else { + qflag = is_cow ? XFS_TRANS_DQ_DELBCOUNT : + XFS_TRANS_DQ_BCOUNT; + } + xfs_trans_mod_dquot_byino(tp, ip, qflag, blocks); +} +#else +# define xfs_reflink_update_quota(tp, ip, is_cow, blocks) ((void)0) +#endif + /* * Remap part of the CoW fork into the data fork. * @@ -710,6 +798,7 @@ xfs_reflink_end_cow_extent( struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_COW_FORK); unsigned int resblks; int nmaps; + bool isrt = XFS_IS_REALTIME_INODE(ip); int error; resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); @@ -779,9 +868,8 @@ xfs_reflink_end_cow_extent( * or not), unmap the extent and drop its refcount. */ xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &data); - xfs_refcount_decrease_extent(tp, &data); - xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, - -data.br_blockcount); + xfs_refcount_decrease_extent(tp, isrt, &data); + xfs_reflink_update_quota(tp, ip, false, -data.br_blockcount); } else if (data.br_startblock == DELAYSTARTBLOCK) { int done; @@ -799,14 +887,14 @@ xfs_reflink_end_cow_extent( } /* Free the CoW orphan record. */ - xfs_refcount_free_cow_extent(tp, del.br_startblock, del.br_blockcount); + xfs_refcount_free_cow_extent(tp, isrt, del.br_startblock, + del.br_blockcount); /* Map the new blocks into the data fork. */ xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, &del); /* Charge this new data fork mapping to the on-disk quota. */ - xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_DELBCOUNT, - (long)del.br_blockcount); + xfs_reflink_update_quota(tp, ip, true, del.br_blockcount); /* Remove the mapping from the CoW fork. */ xfs_bmap_del_extent_cow(ip, &icur, &got, &del); @@ -895,20 +983,29 @@ xfs_reflink_recover_cow( struct xfs_mount *mp) { struct xfs_perag *pag = NULL; + struct xfs_rtgroup *rtg = NULL; int error = 0; if (!xfs_has_reflink(mp)) return 0; while ((pag = xfs_perag_next(mp, pag))) { - error = xfs_refcount_recover_cow_leftovers(mp, pag); + error = xfs_refcount_recover_cow_leftovers(pag_group(pag)); if (error) { xfs_perag_rele(pag); - break; + return error; } } - return error; + while ((rtg = xfs_rtgroup_next(mp, rtg))) { + error = xfs_refcount_recover_cow_leftovers(rtg_group(rtg)); + if (error) { + xfs_rtgroup_rele(rtg); + return error; + } + } + + return 0; } /* @@ -1100,14 +1197,28 @@ out_error: static int xfs_reflink_ag_has_free_space( struct xfs_mount *mp, - xfs_agnumber_t agno) + struct xfs_inode *ip, + xfs_fsblock_t fsb) { struct xfs_perag *pag; + xfs_agnumber_t agno; int error = 0; if (!xfs_has_rmapbt(mp)) return 0; + if (XFS_IS_REALTIME_INODE(ip)) { + struct xfs_rtgroup *rtg; + xfs_rgnumber_t rgno; + + rgno = xfs_rtb_to_rgno(mp, fsb); + rtg = xfs_rtgroup_get(mp, rgno); + if (xfs_metafile_resv_critical(rtg_rmap(rtg))) + error = -ENOSPC; + xfs_rtgroup_put(rtg); + return error; + } + agno = XFS_FSB_TO_AGNO(mp, fsb); pag = xfs_perag_get(mp, agno); if (xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) || xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA)) @@ -1131,10 +1242,11 @@ xfs_reflink_remap_extent( struct xfs_trans *tp; xfs_off_t newlen; int64_t qdelta = 0; - unsigned int resblks; + unsigned int dblocks, rblocks, resblks; bool quota_reserved = true; bool smap_real; bool dmap_written = xfs_bmap_is_written_extent(dmap); + bool isrt = XFS_IS_REALTIME_INODE(ip); int iext_delta = 0; int nimaps; int error; @@ -1161,8 +1273,15 @@ xfs_reflink_remap_extent( * we're remapping. */ resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); + if (XFS_IS_REALTIME_INODE(ip)) { + dblocks = resblks; + rblocks = dmap->br_blockcount; + } else { + dblocks = resblks + dmap->br_blockcount; + rblocks = 0; + } error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, - resblks + dmap->br_blockcount, 0, false, &tp); + dblocks, rblocks, false, &tp); if (error == -EDQUOT || error == -ENOSPC) { quota_reserved = false; error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, @@ -1213,8 +1332,8 @@ xfs_reflink_remap_extent( /* No reflinking if the AG of the dest mapping is low on space. */ if (dmap_written) { - error = xfs_reflink_ag_has_free_space(mp, - XFS_FSB_TO_AGNO(mp, dmap->br_startblock)); + error = xfs_reflink_ag_has_free_space(mp, ip, + dmap->br_startblock); if (error) goto out_cancel; } @@ -1242,8 +1361,15 @@ xfs_reflink_remap_extent( * done. */ if (!quota_reserved && !smap_real && dmap_written) { - error = xfs_trans_reserve_quota_nblks(tp, ip, - dmap->br_blockcount, 0, false); + if (XFS_IS_REALTIME_INODE(ip)) { + dblocks = 0; + rblocks = dmap->br_blockcount; + } else { + dblocks = dmap->br_blockcount; + rblocks = 0; + } + error = xfs_trans_reserve_quota_nblks(tp, ip, dblocks, rblocks, + false); if (error) goto out_cancel; } @@ -1264,7 +1390,7 @@ xfs_reflink_remap_extent( * or not), unmap the extent and drop its refcount. */ xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &smap); - xfs_refcount_decrease_extent(tp, &smap); + xfs_refcount_decrease_extent(tp, isrt, &smap); qdelta -= smap.br_blockcount; } else if (smap.br_startblock == DELAYSTARTBLOCK) { int done; @@ -1287,12 +1413,12 @@ xfs_reflink_remap_extent( * its refcount and map it into the file. */ if (dmap_written) { - xfs_refcount_increase_extent(tp, dmap); + xfs_refcount_increase_extent(tp, isrt, dmap); xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, dmap); qdelta += dmap->br_blockcount; } - xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, qdelta); + xfs_reflink_update_quota(tp, ip, false, qdelta); /* Update dest isize if needed. */ newlen = XFS_FSB_TO_B(mp, dmap->br_startoff + dmap->br_blockcount); @@ -1466,8 +1592,8 @@ xfs_reflink_remap_prep( /* Check file eligibility and prepare for block sharing. */ ret = -EINVAL; - /* Don't reflink realtime inodes */ - if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest)) + /* Can't reflink between data and rt volumes */ + if (XFS_IS_REALTIME_INODE(src) != XFS_IS_REALTIME_INODE(dest)) goto out_unlock; /* Don't share DAX file data with non-DAX file. */ @@ -1547,27 +1673,23 @@ xfs_reflink_inode_has_shared_extents( *has_shared = false; found = xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got); while (found) { - struct xfs_perag *pag; - xfs_agblock_t agbno; - xfs_extlen_t aglen; - xfs_agblock_t rbno; - xfs_extlen_t rlen; + xfs_extlen_t shared_offset, shared_len; if (isnullstartblock(got.br_startblock) || got.br_state != XFS_EXT_NORM) goto next; - pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, got.br_startblock)); - agbno = XFS_FSB_TO_AGBNO(mp, got.br_startblock); - aglen = got.br_blockcount; - error = xfs_reflink_find_shared(pag, tp, agbno, aglen, - &rbno, &rlen, false); - xfs_perag_put(pag); + if (XFS_IS_REALTIME_INODE(ip)) + error = xfs_reflink_find_rtshared(mp, tp, &got, + &shared_offset, &shared_len, false); + else + error = xfs_reflink_find_shared(mp, tp, &got, + &shared_offset, &shared_len, false); if (error) return error; /* Is there still a shared block here? */ - if (rbno != NULLAGBLOCK) { + if (shared_len) { *has_shared = true; return 0; } @@ -1700,3 +1822,28 @@ out: trace_xfs_reflink_unshare_error(ip, error, _RET_IP_); return error; } + +/* + * Can we use reflink with this realtime extent size? Note that we don't check + * for rblocks > 0 here because this can be called as part of attaching a new + * rt section. + */ +bool +xfs_reflink_supports_rextsize( + struct xfs_mount *mp, + unsigned int rextsize) +{ + /* reflink on the realtime device requires rtgroups */ + if (!xfs_has_rtgroups(mp)) + return false; + + /* + * Reflink doesn't support rt extent size larger than a single fsblock + * because we would have to perform CoW-around for unaligned write + * requests to guarantee that we always remap entire rt extents. + */ + if (rextsize != 1) + return false; + + return true; +} diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h index 4a58e4533671..cc4e92278279 100644 --- a/fs/xfs/xfs_reflink.h +++ b/fs/xfs/xfs_reflink.h @@ -25,7 +25,7 @@ xfs_can_free_cowblocks(struct xfs_inode *ip) return true; } -extern int xfs_reflink_trim_around_shared(struct xfs_inode *ip, +int xfs_reflink_trim_around_shared(struct xfs_inode *ip, struct xfs_bmbt_irec *irec, bool *shared); int xfs_bmap_trim_cow(struct xfs_inode *ip, struct xfs_bmbt_irec *imap, bool *shared); @@ -62,4 +62,6 @@ extern int xfs_reflink_remap_blocks(struct xfs_inode *src, loff_t pos_in, extern int xfs_reflink_update_dest(struct xfs_inode *dest, xfs_off_t newlen, xfs_extlen_t cowextsize, unsigned int remap_flags); +bool xfs_reflink_supports_rextsize(struct xfs_mount *mp, unsigned int rextsize); + #endif /* __XFS_REFLINK_H */ diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c index 76b3c0ed3b4f..89decffe76c8 100644 --- a/fs/xfs/xfs_rmap_item.c +++ b/fs/xfs/xfs_rmap_item.c @@ -23,6 +23,7 @@ #include "xfs_ag.h" #include "xfs_btree.h" #include "xfs_trace.h" +#include "xfs_rtgroup.h" struct kmem_cache *xfs_rui_cache; struct kmem_cache *xfs_rud_cache; @@ -94,7 +95,9 @@ xfs_rui_item_format( ASSERT(atomic_read(&ruip->rui_next_extent) == ruip->rui_format.rui_nextents); - ruip->rui_format.rui_type = XFS_LI_RUI; + ASSERT(lip->li_type == XFS_LI_RUI || lip->li_type == XFS_LI_RUI_RT); + + ruip->rui_format.rui_type = lip->li_type; ruip->rui_format.rui_size = 1; xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format, @@ -137,12 +140,15 @@ xfs_rui_item_release( STATIC struct xfs_rui_log_item * xfs_rui_init( struct xfs_mount *mp, + unsigned short item_type, uint nextents) { struct xfs_rui_log_item *ruip; ASSERT(nextents > 0); + ASSERT(item_type == XFS_LI_RUI || item_type == XFS_LI_RUI_RT); + if (nextents > XFS_RUI_MAX_FAST_EXTENTS) ruip = kzalloc(xfs_rui_log_item_sizeof(nextents), GFP_KERNEL | __GFP_NOFAIL); @@ -150,7 +156,7 @@ xfs_rui_init( ruip = kmem_cache_zalloc(xfs_rui_cache, GFP_KERNEL | __GFP_NOFAIL); - xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops); + xfs_log_item_init(mp, &ruip->rui_item, item_type, &xfs_rui_item_ops); ruip->rui_format.rui_nextents = nextents; ruip->rui_format.rui_id = (uintptr_t)(void *)ruip; atomic_set(&ruip->rui_next_extent, 0); @@ -189,7 +195,9 @@ xfs_rud_item_format( struct xfs_rud_log_item *rudp = RUD_ITEM(lip); struct xfs_log_iovec *vecp = NULL; - rudp->rud_format.rud_type = XFS_LI_RUD; + ASSERT(lip->li_type == XFS_LI_RUD || lip->li_type == XFS_LI_RUD_RT); + + rudp->rud_format.rud_type = lip->li_type; rudp->rud_format.rud_size = 1; xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format, @@ -233,6 +241,14 @@ static inline struct xfs_rmap_intent *ri_entry(const struct list_head *e) return list_entry(e, struct xfs_rmap_intent, ri_list); } +static inline bool +xfs_rui_item_isrt(const struct xfs_log_item *lip) +{ + ASSERT(lip->li_type == XFS_LI_RUI || lip->li_type == XFS_LI_RUI_RT); + + return lip->li_type == XFS_LI_RUI_RT; +} + /* Sort rmap intents by AG. */ static int xfs_rmap_update_diff_items( @@ -305,18 +321,20 @@ xfs_rmap_update_log_item( } static struct xfs_log_item * -xfs_rmap_update_create_intent( +__xfs_rmap_update_create_intent( struct xfs_trans *tp, struct list_head *items, unsigned int count, - bool sort) + bool sort, + unsigned short item_type) { struct xfs_mount *mp = tp->t_mountp; - struct xfs_rui_log_item *ruip = xfs_rui_init(mp, count); + struct xfs_rui_log_item *ruip; struct xfs_rmap_intent *ri; ASSERT(count > 0); + ruip = xfs_rui_init(mp, item_type, count); if (sort) list_sort(mp, items, xfs_rmap_update_diff_items); list_for_each_entry(ri, items, ri_list) @@ -324,6 +342,23 @@ xfs_rmap_update_create_intent( return &ruip->rui_item; } +static struct xfs_log_item * +xfs_rmap_update_create_intent( + struct xfs_trans *tp, + struct list_head *items, + unsigned int count, + bool sort) +{ + return __xfs_rmap_update_create_intent(tp, items, count, sort, + XFS_LI_RUI); +} + +static inline unsigned short +xfs_rud_type_from_rui(const struct xfs_rui_log_item *ruip) +{ + return xfs_rui_item_isrt(&ruip->rui_item) ? XFS_LI_RUD_RT : XFS_LI_RUD; +} + /* Get an RUD so we can process all the deferred rmap updates. */ static struct xfs_log_item * xfs_rmap_update_create_done( @@ -335,8 +370,8 @@ xfs_rmap_update_create_done( struct xfs_rud_log_item *rudp; rudp = kmem_cache_zalloc(xfs_rud_cache, GFP_KERNEL | __GFP_NOFAIL); - xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD, - &xfs_rud_item_ops); + xfs_log_item_init(tp->t_mountp, &rudp->rud_item, + xfs_rud_type_from_rui(ruip), &xfs_rud_item_ops); rudp->rud_ruip = ruip; rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id; @@ -351,11 +386,20 @@ xfs_rmap_defer_add( { struct xfs_mount *mp = tp->t_mountp; - trace_xfs_rmap_defer(mp, ri); - + /* + * Deferred rmap updates for the realtime and data sections must use + * separate transactions to finish deferred work because updates to + * realtime metadata files can lock AGFs to allocate btree blocks and + * we don't want that mixing with the AGF locks taken to finish data + * section updates. + */ ri->ri_group = xfs_group_intent_get(mp, ri->ri_bmap.br_startblock, - XG_TYPE_AG); - xfs_defer_add(tp, &ri->ri_list, &xfs_rmap_update_defer_type); + ri->ri_realtime ? XG_TYPE_RTG : XG_TYPE_AG); + + trace_xfs_rmap_defer(mp, ri); + xfs_defer_add(tp, &ri->ri_list, ri->ri_realtime ? + &xfs_rtrmap_update_defer_type : + &xfs_rmap_update_defer_type); } /* Cancel a deferred rmap update. */ @@ -415,6 +459,7 @@ xfs_rmap_update_abort_intent( static inline bool xfs_rui_validate_map( struct xfs_mount *mp, + bool isrt, struct xfs_map_extent *map) { if (!xfs_has_rmapbt(mp)) @@ -444,6 +489,9 @@ xfs_rui_validate_map( if (!xfs_verify_fileext(mp, map->me_startoff, map->me_len)) return false; + if (isrt) + return xfs_verify_rtbext(mp, map->me_startblock, map->me_len); + return xfs_verify_fsbext(mp, map->me_startblock, map->me_len); } @@ -451,6 +499,7 @@ static inline void xfs_rui_recover_work( struct xfs_mount *mp, struct xfs_defer_pending *dfp, + bool isrt, const struct xfs_map_extent *map) { struct xfs_rmap_intent *ri; @@ -495,7 +544,9 @@ xfs_rui_recover_work( ri->ri_bmap.br_blockcount = map->me_len; ri->ri_bmap.br_state = (map->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ? XFS_EXT_UNWRITTEN : XFS_EXT_NORM; - ri->ri_group = xfs_group_intent_get(mp, map->me_startblock, XG_TYPE_AG); + ri->ri_group = xfs_group_intent_get(mp, map->me_startblock, + isrt ? XG_TYPE_RTG : XG_TYPE_AG); + ri->ri_realtime = isrt; xfs_defer_add_item(dfp, &ri->ri_list); } @@ -514,6 +565,7 @@ xfs_rmap_recover_work( struct xfs_rui_log_item *ruip = RUI_ITEM(lip); struct xfs_trans *tp; struct xfs_mount *mp = lip->li_log->l_mp; + bool isrt = xfs_rui_item_isrt(lip); int i; int error = 0; @@ -523,7 +575,7 @@ xfs_rmap_recover_work( * just toss the RUI. */ for (i = 0; i < ruip->rui_format.rui_nextents; i++) { - if (!xfs_rui_validate_map(mp, + if (!xfs_rui_validate_map(mp, isrt, &ruip->rui_format.rui_extents[i])) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, &ruip->rui_format, @@ -531,7 +583,8 @@ xfs_rmap_recover_work( return -EFSCORRUPTED; } - xfs_rui_recover_work(mp, dfp, &ruip->rui_format.rui_extents[i]); + xfs_rui_recover_work(mp, dfp, isrt, + &ruip->rui_format.rui_extents[i]); } resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate); @@ -566,10 +619,13 @@ xfs_rmap_relog_intent( struct xfs_map_extent *map; unsigned int count; + ASSERT(intent->li_type == XFS_LI_RUI || + intent->li_type == XFS_LI_RUI_RT); + count = RUI_ITEM(intent)->rui_format.rui_nextents; map = RUI_ITEM(intent)->rui_format.rui_extents; - ruip = xfs_rui_init(tp->t_mountp, count); + ruip = xfs_rui_init(tp->t_mountp, intent->li_type, count); memcpy(ruip->rui_format.rui_extents, map, count * sizeof(*map)); atomic_set(&ruip->rui_next_extent, count); @@ -589,6 +645,47 @@ const struct xfs_defer_op_type xfs_rmap_update_defer_type = { .relog_intent = xfs_rmap_relog_intent, }; +#ifdef CONFIG_XFS_RT +static struct xfs_log_item * +xfs_rtrmap_update_create_intent( + struct xfs_trans *tp, + struct list_head *items, + unsigned int count, + bool sort) +{ + return __xfs_rmap_update_create_intent(tp, items, count, sort, + XFS_LI_RUI_RT); +} + +/* Clean up after calling xfs_rmap_finish_one. */ +STATIC void +xfs_rtrmap_finish_one_cleanup( + struct xfs_trans *tp, + struct xfs_btree_cur *rcur, + int error) +{ + if (rcur) + xfs_btree_del_cursor(rcur, error); +} + +const struct xfs_defer_op_type xfs_rtrmap_update_defer_type = { + .name = "rtrmap", + .max_items = XFS_RUI_MAX_FAST_EXTENTS, + .create_intent = xfs_rtrmap_update_create_intent, + .abort_intent = xfs_rmap_update_abort_intent, + .create_done = xfs_rmap_update_create_done, + .finish_item = xfs_rmap_update_finish_item, + .finish_cleanup = xfs_rtrmap_finish_one_cleanup, + .cancel_item = xfs_rmap_update_cancel_item, + .recover_work = xfs_rmap_recover_work, + .relog_intent = xfs_rmap_relog_intent, +}; +#else +const struct xfs_defer_op_type xfs_rtrmap_update_defer_type = { + .name = "rtrmap", +}; +#endif + STATIC bool xfs_rui_item_match( struct xfs_log_item *lip, @@ -654,7 +751,7 @@ xlog_recover_rui_commit_pass2( return -EFSCORRUPTED; } - ruip = xfs_rui_init(mp, rui_formatp->rui_nextents); + ruip = xfs_rui_init(mp, ITEM_TYPE(item), rui_formatp->rui_nextents); xfs_rui_copy_format(&ruip->rui_format, rui_formatp); atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents); @@ -668,6 +765,61 @@ const struct xlog_recover_item_ops xlog_rui_item_ops = { .commit_pass2 = xlog_recover_rui_commit_pass2, }; +#ifdef CONFIG_XFS_RT +STATIC int +xlog_recover_rtrui_commit_pass2( + struct xlog *log, + struct list_head *buffer_list, + struct xlog_recover_item *item, + xfs_lsn_t lsn) +{ + struct xfs_mount *mp = log->l_mp; + struct xfs_rui_log_item *ruip; + struct xfs_rui_log_format *rui_formatp; + size_t len; + + rui_formatp = item->ri_buf[0].i_addr; + + if (item->ri_buf[0].i_len < xfs_rui_log_format_sizeof(0)) { + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, + item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + return -EFSCORRUPTED; + } + + len = xfs_rui_log_format_sizeof(rui_formatp->rui_nextents); + if (item->ri_buf[0].i_len != len) { + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, + item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + return -EFSCORRUPTED; + } + + ruip = xfs_rui_init(mp, ITEM_TYPE(item), rui_formatp->rui_nextents); + xfs_rui_copy_format(&ruip->rui_format, rui_formatp); + atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents); + + xlog_recover_intent_item(log, &ruip->rui_item, lsn, + &xfs_rtrmap_update_defer_type); + return 0; +} +#else +STATIC int +xlog_recover_rtrui_commit_pass2( + struct xlog *log, + struct list_head *buffer_list, + struct xlog_recover_item *item, + xfs_lsn_t lsn) +{ + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, + item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + return -EFSCORRUPTED; +} +#endif + +const struct xlog_recover_item_ops xlog_rtrui_item_ops = { + .item_type = XFS_LI_RUI_RT, + .commit_pass2 = xlog_recover_rtrui_commit_pass2, +}; + /* * This routine is called when an RUD format structure is found in a committed * transaction in the log. Its purpose is to cancel the corresponding RUI if it @@ -699,3 +851,33 @@ const struct xlog_recover_item_ops xlog_rud_item_ops = { .item_type = XFS_LI_RUD, .commit_pass2 = xlog_recover_rud_commit_pass2, }; + +#ifdef CONFIG_XFS_RT +STATIC int +xlog_recover_rtrud_commit_pass2( + struct xlog *log, + struct list_head *buffer_list, + struct xlog_recover_item *item, + xfs_lsn_t lsn) +{ + struct xfs_rud_log_format *rud_formatp; + + rud_formatp = item->ri_buf[0].i_addr; + if (item->ri_buf[0].i_len != sizeof(struct xfs_rud_log_format)) { + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, + rud_formatp, item->ri_buf[0].i_len); + return -EFSCORRUPTED; + } + + xlog_recover_release_intent(log, XFS_LI_RUI_RT, + rud_formatp->rud_rui_id); + return 0; +} +#else +# define xlog_recover_rtrud_commit_pass2 xlog_recover_rtrui_commit_pass2 +#endif + +const struct xlog_recover_item_ops xlog_rtrud_item_ops = { + .item_type = XFS_LI_RUD_RT, + .commit_pass2 = xlog_recover_rtrud_commit_pass2, +}; diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index fcfa6e0eb3ad..d8e6d073d64d 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c @@ -22,6 +22,7 @@ #include "xfs_rtalloc.h" #include "xfs_sb.h" #include "xfs_rtbitmap.h" +#include "xfs_rtrmap_btree.h" #include "xfs_quota.h" #include "xfs_log_priv.h" #include "xfs_health.h" @@ -30,6 +31,8 @@ #include "xfs_rtgroup.h" #include "xfs_error.h" #include "xfs_trace.h" +#include "xfs_rtrefcount_btree.h" +#include "xfs_reflink.h" /* * Return whether there are any free extents in the size range given @@ -592,7 +595,7 @@ xfs_rtalloc_sumlevel( * specified. If we don't get maxlen then use prod to trim * the length, if given. The lengths are all in rtextents. */ -STATIC int +static int xfs_rtallocate_extent_size( struct xfs_rtalloc_args *args, xfs_rtxlen_t minlen, /* minimum length to allocate */ @@ -845,6 +848,13 @@ xfs_growfs_rt_init_rtsb( mp->m_rtsb_bp = rtsb_bp; error = xfs_bwrite(rtsb_bp); xfs_buf_unlock(rtsb_bp); + if (error) + return error; + + /* Initialize the rtrmap to reflect the rtsb. */ + if (rtg_rmap(args->rtg) != NULL) + error = xfs_rtrmapbt_init_rtsb(nargs->mp, args->rtg, args->tp); + return error; } @@ -856,8 +866,8 @@ xfs_growfs_rt_bmblock( xfs_fileoff_t bmbno) { struct xfs_mount *mp = rtg_mount(rtg); - struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP]; - struct xfs_inode *rsumip = rtg->rtg_inodes[XFS_RTGI_SUMMARY]; + struct xfs_inode *rbmip = rtg_bitmap(rtg); + struct xfs_inode *rsumip = rtg_summary(rtg); struct xfs_rtalloc_args args = { .mp = mp, .rtg = rtg, @@ -893,8 +903,9 @@ xfs_growfs_rt_bmblock( goto out_free; nargs.tp = args.tp; - xfs_rtgroup_lock(args.rtg, XFS_RTGLOCK_BITMAP); - xfs_rtgroup_trans_join(args.tp, args.rtg, XFS_RTGLOCK_BITMAP); + xfs_rtgroup_lock(args.rtg, XFS_RTGLOCK_BITMAP | XFS_RTGLOCK_RMAP); + xfs_rtgroup_trans_join(args.tp, args.rtg, + XFS_RTGLOCK_BITMAP | XFS_RTGLOCK_RMAP); /* * Update the bitmap inode's size ondisk and incore. We need to update @@ -980,9 +991,12 @@ xfs_growfs_rt_bmblock( goto out_free; /* - * Ensure the mount RT feature flag is now set. + * Ensure the mount RT feature flag is now set, and compute new + * maxlevels for rt btrees. */ mp->m_features |= XFS_FEAT_REALTIME; + xfs_rtrmapbt_compute_maxlevels(mp); + xfs_rtrefcountbt_compute_maxlevels(mp); kfree(nmp); return 0; @@ -1041,8 +1055,8 @@ xfs_growfs_rt_alloc_blocks( xfs_extlen_t *nrbmblocks) { struct xfs_mount *mp = rtg_mount(rtg); - struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP]; - struct xfs_inode *rsumip = rtg->rtg_inodes[XFS_RTGI_SUMMARY]; + struct xfs_inode *rbmip = rtg_bitmap(rtg); + struct xfs_inode *rsumip = rtg_summary(rtg); xfs_extlen_t orbmblocks = 0; xfs_extlen_t orsumblocks = 0; struct xfs_mount *nmp; @@ -1150,29 +1164,38 @@ out_rele: return error; } -static int +int xfs_growfs_check_rtgeom( const struct xfs_mount *mp, + xfs_rfsblock_t dblocks, xfs_rfsblock_t rblocks, xfs_extlen_t rextsize) { + xfs_extlen_t min_logfsbs; struct xfs_mount *nmp; - int error = 0; nmp = xfs_growfs_rt_alloc_fake_mount(mp, rblocks, rextsize); if (!nmp) return -ENOMEM; + nmp->m_sb.sb_dblocks = dblocks; + + xfs_rtrmapbt_compute_maxlevels(nmp); + xfs_rtrefcountbt_compute_maxlevels(nmp); + xfs_trans_resv_calc(nmp, M_RES(nmp)); /* * New summary size can't be more than half the size of the log. This * prevents us from getting a log overflow, since we'll log basically * the whole summary file at once. */ - if (nmp->m_rsumblocks > (mp->m_sb.sb_logblocks >> 1)) - error = -EINVAL; + min_logfsbs = min_t(xfs_extlen_t, xfs_log_calc_minimum_size(nmp), + nmp->m_rsumblocks * 2); kfree(nmp); - return error; + + if (min_logfsbs > mp->m_sb.sb_logblocks) + return -EINVAL; + return 0; } /* @@ -1263,11 +1286,17 @@ xfs_growfs_rt( XFS_FSB_TO_B(mp, in->extsize) < XFS_MIN_RTEXTSIZE) goto out_unlock; - /* Unsupported realtime features. */ + /* Check for features supported only on rtgroups filesystems. */ error = -EOPNOTSUPP; - if (xfs_has_quota(mp) && !xfs_has_rtgroups(mp)) - goto out_unlock; - if (xfs_has_rmapbt(mp) || xfs_has_reflink(mp)) + if (!xfs_has_rtgroups(mp)) { + if (xfs_has_rmapbt(mp)) + goto out_unlock; + if (xfs_has_quota(mp)) + goto out_unlock; + if (xfs_has_reflink(mp)) + goto out_unlock; + } else if (xfs_has_reflink(mp) && + !xfs_reflink_supports_rextsize(mp, in->extsize)) goto out_unlock; error = xfs_sb_validate_fsb_count(&mp->m_sb, in->newblocks); @@ -1291,7 +1320,8 @@ xfs_growfs_rt( goto out_unlock; /* Make sure the new fs size won't cause problems with the log. */ - error = xfs_growfs_check_rtgeom(mp, in->newblocks, in->extsize); + error = xfs_growfs_check_rtgeom(mp, mp->m_sb.sb_dblocks, in->newblocks, + in->extsize); if (error) goto out_unlock; @@ -1344,6 +1374,12 @@ xfs_growfs_rt( if (!error) error = error2; + + /* Reset the rt metadata btree space reservations. */ + xfs_rt_resv_free(mp); + error2 = xfs_rt_resv_init(mp); + if (error2 && error2 != -ENOSPC) + error = error2; } out_unlock: @@ -1487,6 +1523,46 @@ xfs_rtalloc_reinit_frextents( return 0; } +/* Free space reservations for rt metadata inodes. */ +void +xfs_rt_resv_free( + struct xfs_mount *mp) +{ + struct xfs_rtgroup *rtg = NULL; + unsigned int i; + + while ((rtg = xfs_rtgroup_next(mp, rtg))) { + for (i = 0; i < XFS_RTGI_MAX; i++) + xfs_metafile_resv_free(rtg->rtg_inodes[i]); + } +} + +/* Reserve space for rt metadata inodes' space expansion. */ +int +xfs_rt_resv_init( + struct xfs_mount *mp) +{ + struct xfs_rtgroup *rtg = NULL; + xfs_filblks_t ask; + int error = 0; + + while ((rtg = xfs_rtgroup_next(mp, rtg))) { + int err2; + + ask = xfs_rtrmapbt_calc_reserves(mp); + err2 = xfs_metafile_resv_init(rtg_rmap(rtg), ask); + if (err2 && !error) + error = err2; + + ask = xfs_rtrefcountbt_calc_reserves(mp); + err2 = xfs_metafile_resv_init(rtg_refcount(rtg), ask); + if (err2 && !error) + error = err2; + } + + return error; +} + /* * Read in the bmbt of an rt metadata inode so that we never have to load them * at runtime. This enables the use of shared ILOCKs for rtbitmap scans. Use @@ -1601,7 +1677,7 @@ xfs_rtpick_extent( xfs_rtxlen_t len) /* allocation length (rtextents) */ { struct xfs_mount *mp = rtg_mount(rtg); - struct xfs_inode *rbmip = rtg->rtg_inodes[XFS_RTGI_BITMAP]; + struct xfs_inode *rbmip = rtg_bitmap(rtg); xfs_rtxnum_t b = 0; /* result rtext */ int log2; /* log of sequence number */ uint64_t resid; /* residual after log removed */ @@ -1885,7 +1961,7 @@ out_unlock: goto out_release; } -static int +int xfs_rtallocate_rtgs( struct xfs_trans *tp, xfs_fsblock_t bno_hint, @@ -1950,7 +2026,10 @@ xfs_rtallocate_align( if (*noalign) { align = mp->m_sb.sb_rextsize; } else { - align = xfs_get_extsz_hint(ap->ip); + if (ap->flags & XFS_BMAPI_COWFORK) + align = xfs_get_cowextsz_hint(ap->ip); + else + align = xfs_get_extsz_hint(ap->ip); if (!align) align = 1; if (align == mp->m_sb.sb_rextsize) diff --git a/fs/xfs/xfs_rtalloc.h b/fs/xfs/xfs_rtalloc.h index 8e2a07b8174b..0d95b29092c9 100644 --- a/fs/xfs/xfs_rtalloc.h +++ b/fs/xfs/xfs_rtalloc.h @@ -34,6 +34,9 @@ int /* error */ xfs_rtmount_inodes( struct xfs_mount *mp); /* file system mount structure */ +void xfs_rt_resv_free(struct xfs_mount *mp); +int xfs_rt_resv_init(struct xfs_mount *mp); + /* * Grow the realtime area of the filesystem. */ @@ -43,6 +46,8 @@ xfs_growfs_rt( xfs_growfs_rt_t *in); /* user supplied growfs struct */ int xfs_rtalloc_reinit_frextents(struct xfs_mount *mp); +int xfs_growfs_check_rtgeom(const struct xfs_mount *mp, xfs_rfsblock_t dblocks, + xfs_rfsblock_t rblocks, xfs_agblock_t rextsize); #else # define xfs_growfs_rt(mp,in) (-ENOSYS) # define xfs_rtalloc_reinit_frextents(m) (0) @@ -60,6 +65,21 @@ xfs_rtmount_init( } # define xfs_rtmount_inodes(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (-ENOSYS)) # define xfs_rtunmount_inodes(m) +# define xfs_rt_resv_free(mp) ((void)0) +# define xfs_rt_resv_init(mp) (0) + +static inline int +xfs_growfs_check_rtgeom(const struct xfs_mount *mp, + xfs_rfsblock_t dblocks, xfs_rfsblock_t rblocks, + xfs_extlen_t rextsize) +{ + return 0; +} #endif /* CONFIG_XFS_RT */ +int xfs_rtallocate_rtgs(struct xfs_trans *tp, xfs_fsblock_t bno_hint, + xfs_rtxlen_t minlen, xfs_rtxlen_t maxlen, xfs_rtxlen_t prod, + bool wasdel, bool initial_user_data, xfs_rtblock_t *bno, + xfs_extlen_t *blen); + #endif /* __XFS_RTALLOC_H__ */ diff --git a/fs/xfs/xfs_stats.c b/fs/xfs/xfs_stats.c index ffb52725c2a8..35c7fb3ba324 100644 --- a/fs/xfs/xfs_stats.c +++ b/fs/xfs/xfs_stats.c @@ -52,7 +52,10 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf) { "rmapbt", xfsstats_offset(xs_refcbt_2) }, { "refcntbt", xfsstats_offset(xs_rmap_mem_2) }, { "rmapbt_mem", xfsstats_offset(xs_rcbag_2) }, - { "rcbagbt", xfsstats_offset(xs_qm_dqreclaims)}, + { "rcbagbt", xfsstats_offset(xs_rtrmap_2) }, + { "rtrmapbt", xfsstats_offset(xs_rtrmap_mem_2)}, + { "rtrmapbt_mem", xfsstats_offset(xs_rtrefcbt_2) }, + { "rtrefcntbt", xfsstats_offset(xs_qm_dqreclaims)}, /* we print both series of quota information together */ { "qm", xfsstats_offset(xs_xstrat_bytes)}, }; diff --git a/fs/xfs/xfs_stats.h b/fs/xfs/xfs_stats.h index a61fb56ed2e6..15ba1abcf253 100644 --- a/fs/xfs/xfs_stats.h +++ b/fs/xfs/xfs_stats.h @@ -127,6 +127,9 @@ struct __xfsstats { uint32_t xs_refcbt_2[__XBTS_MAX]; uint32_t xs_rmap_mem_2[__XBTS_MAX]; uint32_t xs_rcbag_2[__XBTS_MAX]; + uint32_t xs_rtrmap_2[__XBTS_MAX]; + uint32_t xs_rtrmap_mem_2[__XBTS_MAX]; + uint32_t xs_rtrefcbt_2[__XBTS_MAX]; uint32_t xs_qm_dqreclaims; uint32_t xs_qm_dqreclaim_misses; uint32_t xs_qm_dquot_dups; diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 394fdf3bb535..d92d7a07ea89 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -819,20 +819,76 @@ xfs_fs_sync_fs( return 0; } +static xfs_extlen_t +xfs_internal_log_size( + struct xfs_mount *mp) +{ + if (!mp->m_sb.sb_logstart) + return 0; + return mp->m_sb.sb_logblocks; +} + +static void +xfs_statfs_data( + struct xfs_mount *mp, + struct kstatfs *st) +{ + int64_t fdblocks = + percpu_counter_sum(&mp->m_fdblocks); + + /* make sure st->f_bfree does not underflow */ + st->f_bfree = max(0LL, fdblocks - xfs_fdblocks_unavailable(mp)); + /* + * sb_dblocks can change during growfs, but nothing cares about reporting + * the old or new value during growfs. + */ + st->f_blocks = mp->m_sb.sb_dblocks - xfs_internal_log_size(mp); +} + +/* + * When stat(v)fs is called on a file with the realtime bit set or a directory + * with the rtinherit bit, report freespace information for the RT device + * instead of the main data device. + */ +static void +xfs_statfs_rt( + struct xfs_mount *mp, + struct kstatfs *st) +{ + st->f_bfree = xfs_rtbxlen_to_blen(mp, + percpu_counter_sum_positive(&mp->m_frextents)); + st->f_blocks = mp->m_sb.sb_rblocks; +} + +static void +xfs_statfs_inodes( + struct xfs_mount *mp, + struct kstatfs *st) +{ + uint64_t icount = percpu_counter_sum(&mp->m_icount); + uint64_t ifree = percpu_counter_sum(&mp->m_ifree); + uint64_t fakeinos = XFS_FSB_TO_INO(mp, st->f_bfree); + + st->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER); + if (M_IGEO(mp)->maxicount) + st->f_files = min_t(typeof(st->f_files), st->f_files, + M_IGEO(mp)->maxicount); + + /* If sb_icount overshot maxicount, report actual allocation */ + st->f_files = max_t(typeof(st->f_files), st->f_files, + mp->m_sb.sb_icount); + + /* Make sure st->f_ffree does not underflow */ + st->f_ffree = max_t(int64_t, 0, st->f_files - (icount - ifree)); +} + STATIC int xfs_fs_statfs( struct dentry *dentry, - struct kstatfs *statp) + struct kstatfs *st) { struct xfs_mount *mp = XFS_M(dentry->d_sb); - xfs_sb_t *sbp = &mp->m_sb; struct xfs_inode *ip = XFS_I(d_inode(dentry)); - uint64_t fakeinos, id; - uint64_t icount; - uint64_t ifree; - uint64_t fdblocks; - xfs_extlen_t lsize; - int64_t ffree; /* * Expedite background inodegc but don't wait. We do not want to block @@ -840,58 +896,28 @@ xfs_fs_statfs( */ xfs_inodegc_push(mp); - statp->f_type = XFS_SUPER_MAGIC; - statp->f_namelen = MAXNAMELEN - 1; - - id = huge_encode_dev(mp->m_ddev_targp->bt_dev); - statp->f_fsid = u64_to_fsid(id); - - icount = percpu_counter_sum(&mp->m_icount); - ifree = percpu_counter_sum(&mp->m_ifree); - fdblocks = percpu_counter_sum(&mp->m_fdblocks); - - spin_lock(&mp->m_sb_lock); - statp->f_bsize = sbp->sb_blocksize; - lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; - statp->f_blocks = sbp->sb_dblocks - lsize; - spin_unlock(&mp->m_sb_lock); - - /* make sure statp->f_bfree does not underflow */ - statp->f_bfree = max_t(int64_t, 0, - fdblocks - xfs_fdblocks_unavailable(mp)); - statp->f_bavail = statp->f_bfree; - - fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree); - statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER); - if (M_IGEO(mp)->maxicount) - statp->f_files = min_t(typeof(statp->f_files), - statp->f_files, - M_IGEO(mp)->maxicount); - - /* If sb_icount overshot maxicount, report actual allocation */ - statp->f_files = max_t(typeof(statp->f_files), - statp->f_files, - sbp->sb_icount); + st->f_type = XFS_SUPER_MAGIC; + st->f_namelen = MAXNAMELEN - 1; + st->f_bsize = mp->m_sb.sb_blocksize; + st->f_fsid = u64_to_fsid(huge_encode_dev(mp->m_ddev_targp->bt_dev)); - /* make sure statp->f_ffree does not underflow */ - ffree = statp->f_files - (icount - ifree); - statp->f_ffree = max_t(int64_t, ffree, 0); + xfs_statfs_data(mp, st); + xfs_statfs_inodes(mp, st); if (XFS_IS_REALTIME_MOUNT(mp) && - (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) { - s64 freertx; - - statp->f_blocks = sbp->sb_rblocks; - freertx = percpu_counter_sum_positive(&mp->m_frextents); - statp->f_bavail = statp->f_bfree = - xfs_rtbxlen_to_blen(mp, freertx); - } + (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) + xfs_statfs_rt(mp, st); if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) && ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) == (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD)) - xfs_qm_statvfs(ip, statp); + xfs_qm_statvfs(ip, st); + /* + * XFS does not distinguish between blocks available to privileged and + * unprivileged users. + */ + st->f_bavail = st->f_bfree; return 0; } @@ -1730,7 +1756,7 @@ xfs_fs_fill_super( sb->s_time_max = XFS_LEGACY_TIME_MAX; } trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max); - sb->s_iflags |= SB_I_CGROUPWB; + sb->s_iflags |= SB_I_CGROUPWB | SB_I_ALLOW_HSM; set_posix_acl_flag(sb); @@ -1754,9 +1780,11 @@ xfs_fs_fill_super( xfs_warn_experimental(mp, XFS_EXPERIMENTAL_METADIR); if (xfs_has_reflink(mp)) { - if (mp->m_sb.sb_rblocks) { + if (xfs_has_realtime(mp) && + !xfs_reflink_supports_rextsize(mp, mp->m_sb.sb_rextsize)) { xfs_alert(mp, - "reflink not compatible with realtime device!"); + "reflink not compatible with realtime extent size %u!", + mp->m_sb.sb_rextsize); error = -EINVAL; goto out_filestream_unmount; } @@ -1767,12 +1795,6 @@ xfs_fs_fill_super( } } - if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) { - xfs_alert(mp, - "reverse mapping btree not compatible with realtime device!"); - error = -EINVAL; - goto out_filestream_unmount; - } if (xfs_has_exchange_range(mp)) xfs_warn_experimental(mp, XFS_EXPERIMENTAL_EXCHRANGE); diff --git a/fs/xfs/xfs_super.h b/fs/xfs/xfs_super.h index 302e6e5d6c7e..c0e85c1e42f2 100644 --- a/fs/xfs/xfs_super.h +++ b/fs/xfs/xfs_super.h @@ -92,7 +92,6 @@ extern xfs_agnumber_t xfs_set_inode_alloc(struct xfs_mount *, extern const struct export_operations xfs_export_operations; extern const struct quotactl_ops xfs_quotactl_operations; -extern const struct dax_holder_operations xfs_dax_holder_operations; extern void xfs_reinit_percpu_counters(struct xfs_mount *mp); diff --git a/fs/xfs/xfs_sysctl.c b/fs/xfs/xfs_sysctl.c index c84df23b494d..751dc74a3067 100644 --- a/fs/xfs/xfs_sysctl.c +++ b/fs/xfs/xfs_sysctl.c @@ -66,7 +66,7 @@ xfs_deprecated_dointvec_minmax( return proc_dointvec_minmax(ctl, write, buffer, lenp, ppos); } -static struct ctl_table xfs_table[] = { +static const struct ctl_table xfs_table[] = { { .procname = "irix_sgid_inherit", .data = &xfs_params.sgid_inherit.val, diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 7b16cdd72e9d..b29462363b81 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -14,11 +14,15 @@ * ino: filesystem inode number * * agbno: per-AG block number in fs blocks + * rgbno: per-rtgroup block number in fs blocks * startblock: physical block number for file mappings. This is either a * segmented fsblock for data device mappings, or a rfsblock * for realtime device mappings * fsbcount: number of blocks in an extent, in fs blocks * + * gbno: generic allocation group block number. This is an agbno for + * space in a per-AG or a rgbno for space in a realtime group. + * * daddr: physical block number in 512b blocks * bbcount: number of blocks in a physical extent, in 512b blocks * @@ -494,7 +498,7 @@ DECLARE_EVENT_CLASS(xfs_buf_class, __entry->dev = bp->b_target->bt_dev; __entry->bno = xfs_buf_daddr(bp); __entry->nblks = bp->b_length; - __entry->hold = atomic_read(&bp->b_hold); + __entry->hold = bp->b_hold; __entry->pincount = atomic_read(&bp->b_pin_count); __entry->lockval = bp->b_sema.count; __entry->flags = bp->b_flags; @@ -565,7 +569,7 @@ DECLARE_EVENT_CLASS(xfs_buf_flags_class, __entry->bno = xfs_buf_daddr(bp); __entry->length = bp->b_length; __entry->flags = flags; - __entry->hold = atomic_read(&bp->b_hold); + __entry->hold = bp->b_hold; __entry->pincount = atomic_read(&bp->b_pin_count); __entry->lockval = bp->b_sema.count; __entry->caller_ip = caller_ip; @@ -608,7 +612,7 @@ TRACE_EVENT(xfs_buf_ioerror, __entry->dev = bp->b_target->bt_dev; __entry->bno = xfs_buf_daddr(bp); __entry->length = bp->b_length; - __entry->hold = atomic_read(&bp->b_hold); + __entry->hold = bp->b_hold; __entry->pincount = atomic_read(&bp->b_pin_count); __entry->lockval = bp->b_sema.count; __entry->error = error; @@ -652,7 +656,7 @@ DECLARE_EVENT_CLASS(xfs_buf_item_class, __entry->buf_bno = xfs_buf_daddr(bip->bli_buf); __entry->buf_len = bip->bli_buf->b_length; __entry->buf_flags = bip->bli_buf->b_flags; - __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold); + __entry->buf_hold = bip->bli_buf->b_hold; __entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count); __entry->buf_lockval = bip->bli_buf->b_sema.count; __entry->li_flags = bip->bli_item.li_flags; @@ -2295,6 +2299,7 @@ TRACE_DEFINE_ENUM(XFS_DINODE_FMT_LOCAL); TRACE_DEFINE_ENUM(XFS_DINODE_FMT_EXTENTS); TRACE_DEFINE_ENUM(XFS_DINODE_FMT_BTREE); TRACE_DEFINE_ENUM(XFS_DINODE_FMT_UUID); +TRACE_DEFINE_ENUM(XFS_DINODE_FMT_META_BTREE); DECLARE_EVENT_CLASS(xfs_swap_extent_class, TP_PROTO(struct xfs_inode *ip, int which), @@ -2918,13 +2923,14 @@ DEFINE_DEFER_PENDING_ITEM_EVENT(xfs_defer_finish_item); /* rmap tracepoints */ DECLARE_EVENT_CLASS(xfs_rmap_class, TP_PROTO(struct xfs_btree_cur *cur, - xfs_agblock_t agbno, xfs_extlen_t len, bool unwritten, + xfs_agblock_t gbno, xfs_extlen_t len, bool unwritten, const struct xfs_owner_info *oinfo), - TP_ARGS(cur, agbno, len, unwritten, oinfo), + TP_ARGS(cur, gbno, len, unwritten, oinfo), TP_STRUCT__entry( __field(dev_t, dev) + __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) - __field(xfs_agblock_t, agbno) + __field(xfs_agblock_t, gbno) __field(xfs_extlen_t, len) __field(uint64_t, owner) __field(uint64_t, offset) @@ -2932,8 +2938,9 @@ DECLARE_EVENT_CLASS(xfs_rmap_class, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; + __entry->type = cur->bc_group->xg_type; __entry->agno = cur->bc_group->xg_gno; - __entry->agbno = agbno; + __entry->gbno = gbno; __entry->len = len; __entry->owner = oinfo->oi_owner; __entry->offset = oinfo->oi_offset; @@ -2941,10 +2948,11 @@ DECLARE_EVENT_CLASS(xfs_rmap_class, if (unwritten) __entry->flags |= XFS_RMAP_UNWRITTEN; ), - TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%lx", + TP_printk("dev %d:%d %sno 0x%x gbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%lx", MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, - __entry->agbno, + __entry->gbno, __entry->len, __entry->owner, __entry->offset, @@ -2953,9 +2961,9 @@ DECLARE_EVENT_CLASS(xfs_rmap_class, #define DEFINE_RMAP_EVENT(name) \ DEFINE_EVENT(xfs_rmap_class, name, \ TP_PROTO(struct xfs_btree_cur *cur, \ - xfs_agblock_t agbno, xfs_extlen_t len, bool unwritten, \ + xfs_agblock_t gbno, xfs_extlen_t len, bool unwritten, \ const struct xfs_owner_info *oinfo), \ - TP_ARGS(cur, agbno, len, unwritten, oinfo)) + TP_ARGS(cur, gbno, len, unwritten, oinfo)) /* btree cursor error/%ip tracepoint class */ DECLARE_EVENT_CLASS(xfs_btree_error_class, @@ -3018,47 +3026,36 @@ TRACE_EVENT(xfs_rmap_convert_state, TP_ARGS(cur, state, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) + __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) - __field(xfs_ino_t, ino) __field(int, state) __field(unsigned long, caller_ip) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; - switch (cur->bc_ops->type) { - case XFS_BTREE_TYPE_INODE: - __entry->agno = 0; - __entry->ino = cur->bc_ino.ip->i_ino; - break; - case XFS_BTREE_TYPE_AG: - __entry->agno = cur->bc_group->xg_gno; - __entry->ino = 0; - break; - case XFS_BTREE_TYPE_MEM: - __entry->agno = 0; - __entry->ino = 0; - break; - } + __entry->type = cur->bc_group->xg_type; + __entry->agno = cur->bc_group->xg_gno; __entry->state = state; __entry->caller_ip = caller_ip; ), - TP_printk("dev %d:%d agno 0x%x ino 0x%llx state %d caller %pS", + TP_printk("dev %d:%d %sno 0x%x state %d caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, - __entry->ino, __entry->state, (char *)__entry->caller_ip) ); DECLARE_EVENT_CLASS(xfs_rmapbt_class, TP_PROTO(struct xfs_btree_cur *cur, - xfs_agblock_t agbno, xfs_extlen_t len, + xfs_agblock_t gbno, xfs_extlen_t len, uint64_t owner, uint64_t offset, unsigned int flags), - TP_ARGS(cur, agbno, len, owner, offset, flags), + TP_ARGS(cur, gbno, len, owner, offset, flags), TP_STRUCT__entry( __field(dev_t, dev) + __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) - __field(xfs_agblock_t, agbno) + __field(xfs_agblock_t, gbno) __field(xfs_extlen_t, len) __field(uint64_t, owner) __field(uint64_t, offset) @@ -3066,17 +3063,19 @@ DECLARE_EVENT_CLASS(xfs_rmapbt_class, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; + __entry->type = cur->bc_group->xg_type; __entry->agno = cur->bc_group->xg_gno; - __entry->agbno = agbno; + __entry->gbno = gbno; __entry->len = len; __entry->owner = owner; __entry->offset = offset; __entry->flags = flags; ), - TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%x", + TP_printk("dev %d:%d %sno 0x%x gbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, - __entry->agbno, + __entry->gbno, __entry->len, __entry->owner, __entry->offset, @@ -3085,9 +3084,9 @@ DECLARE_EVENT_CLASS(xfs_rmapbt_class, #define DEFINE_RMAPBT_EVENT(name) \ DEFINE_EVENT(xfs_rmapbt_class, name, \ TP_PROTO(struct xfs_btree_cur *cur, \ - xfs_agblock_t agbno, xfs_extlen_t len, \ + xfs_agblock_t gbno, xfs_extlen_t len, \ uint64_t owner, uint64_t offset, unsigned int flags), \ - TP_ARGS(cur, agbno, len, owner, offset, flags)) + TP_ARGS(cur, gbno, len, owner, offset, flags)) TRACE_DEFINE_ENUM(XFS_RMAP_MAP); TRACE_DEFINE_ENUM(XFS_RMAP_MAP_SHARED); @@ -3104,8 +3103,9 @@ DECLARE_EVENT_CLASS(xfs_rmap_deferred_class, TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned long long, owner) + __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) - __field(xfs_agblock_t, agbno) + __field(xfs_agblock_t, gbno) __field(int, whichfork) __field(xfs_fileoff_t, l_loff) __field(xfs_filblks_t, l_len) @@ -3114,9 +3114,11 @@ DECLARE_EVENT_CLASS(xfs_rmap_deferred_class, ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; - __entry->agno = XFS_FSB_TO_AGNO(mp, ri->ri_bmap.br_startblock); - __entry->agbno = XFS_FSB_TO_AGBNO(mp, - ri->ri_bmap.br_startblock); + __entry->type = ri->ri_group->xg_type; + __entry->agno = ri->ri_group->xg_gno; + __entry->gbno = xfs_fsb_to_gbno(mp, + ri->ri_bmap.br_startblock, + ri->ri_group->xg_type); __entry->owner = ri->ri_owner; __entry->whichfork = ri->ri_whichfork; __entry->l_loff = ri->ri_bmap.br_startoff; @@ -3124,11 +3126,12 @@ DECLARE_EVENT_CLASS(xfs_rmap_deferred_class, __entry->l_state = ri->ri_bmap.br_state; __entry->op = ri->ri_type; ), - TP_printk("dev %d:%d op %s agno 0x%x agbno 0x%x owner 0x%llx %s fileoff 0x%llx fsbcount 0x%llx state %d", + TP_printk("dev %d:%d op %s %sno 0x%x gbno 0x%x owner 0x%llx %s fileoff 0x%llx fsbcount 0x%llx state %d", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->op, XFS_RMAP_INTENT_STRINGS), + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, - __entry->agbno, + __entry->gbno, __entry->owner, __print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS), __entry->l_loff, @@ -3302,56 +3305,62 @@ TRACE_EVENT(xfs_ag_resv_init_error, /* refcount tracepoint classes */ DECLARE_EVENT_CLASS(xfs_refcount_class, - TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t agbno, + TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t gbno, xfs_extlen_t len), - TP_ARGS(cur, agbno, len), + TP_ARGS(cur, gbno, len), TP_STRUCT__entry( __field(dev_t, dev) + __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) - __field(xfs_agblock_t, agbno) + __field(xfs_agblock_t, gbno) __field(xfs_extlen_t, len) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; + __entry->type = cur->bc_group->xg_type; __entry->agno = cur->bc_group->xg_gno; - __entry->agbno = agbno; + __entry->gbno = gbno; __entry->len = len; ), - TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x", + TP_printk("dev %d:%d %sno 0x%x gbno 0x%x fsbcount 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, - __entry->agbno, + __entry->gbno, __entry->len) ); #define DEFINE_REFCOUNT_EVENT(name) \ DEFINE_EVENT(xfs_refcount_class, name, \ - TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t agbno, \ + TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t gbno, \ xfs_extlen_t len), \ - TP_ARGS(cur, agbno, len)) + TP_ARGS(cur, gbno, len)) TRACE_DEFINE_ENUM(XFS_LOOKUP_EQi); TRACE_DEFINE_ENUM(XFS_LOOKUP_LEi); TRACE_DEFINE_ENUM(XFS_LOOKUP_GEi); TRACE_EVENT(xfs_refcount_lookup, - TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t agbno, + TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t gbno, xfs_lookup_t dir), - TP_ARGS(cur, agbno, dir), + TP_ARGS(cur, gbno, dir), TP_STRUCT__entry( __field(dev_t, dev) + __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) - __field(xfs_agblock_t, agbno) + __field(xfs_agblock_t, gbno) __field(xfs_lookup_t, dir) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; + __entry->type = cur->bc_group->xg_type; __entry->agno = cur->bc_group->xg_gno; - __entry->agbno = agbno; + __entry->gbno = gbno; __entry->dir = dir; ), - TP_printk("dev %d:%d agno 0x%x agbno 0x%x cmp %s(%d)", + TP_printk("dev %d:%d %sno 0x%x gbno 0x%x cmp %s(%d)", MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, - __entry->agbno, + __entry->gbno, __print_symbolic(__entry->dir, XFS_AG_BTREE_CMP_FORMAT_STR), __entry->dir) ) @@ -3362,6 +3371,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_extent_class, TP_ARGS(cur, irec), TP_STRUCT__entry( __field(dev_t, dev) + __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(enum xfs_refc_domain, domain) __field(xfs_agblock_t, startblock) @@ -3370,14 +3380,16 @@ DECLARE_EVENT_CLASS(xfs_refcount_extent_class, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; + __entry->type = cur->bc_group->xg_type; __entry->agno = cur->bc_group->xg_gno; __entry->domain = irec->rc_domain; __entry->startblock = irec->rc_startblock; __entry->blockcount = irec->rc_blockcount; __entry->refcount = irec->rc_refcount; ), - TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u", + TP_printk("dev %d:%d %sno 0x%x dom %s gbno 0x%x fsbcount 0x%x refcount %u", MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __print_symbolic(__entry->domain, XFS_REFC_DOMAIN_STRINGS), __entry->startblock, @@ -3393,49 +3405,53 @@ DEFINE_EVENT(xfs_refcount_extent_class, name, \ /* single-rcext and an agbno tracepoint class */ DECLARE_EVENT_CLASS(xfs_refcount_extent_at_class, TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *irec, - xfs_agblock_t agbno), - TP_ARGS(cur, irec, agbno), + xfs_agblock_t gbno), + TP_ARGS(cur, irec, gbno), TP_STRUCT__entry( __field(dev_t, dev) + __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(enum xfs_refc_domain, domain) __field(xfs_agblock_t, startblock) __field(xfs_extlen_t, blockcount) __field(xfs_nlink_t, refcount) - __field(xfs_agblock_t, agbno) + __field(xfs_agblock_t, gbno) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; + __entry->type = cur->bc_group->xg_type; __entry->agno = cur->bc_group->xg_gno; __entry->domain = irec->rc_domain; __entry->startblock = irec->rc_startblock; __entry->blockcount = irec->rc_blockcount; __entry->refcount = irec->rc_refcount; - __entry->agbno = agbno; + __entry->gbno = gbno; ), - TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u @ agbno 0x%x", + TP_printk("dev %d:%d %sno 0x%x dom %s gbno 0x%x fsbcount 0x%x refcount %u @ gbno 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __print_symbolic(__entry->domain, XFS_REFC_DOMAIN_STRINGS), __entry->startblock, __entry->blockcount, __entry->refcount, - __entry->agbno) + __entry->gbno) ) #define DEFINE_REFCOUNT_EXTENT_AT_EVENT(name) \ DEFINE_EVENT(xfs_refcount_extent_at_class, name, \ TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *irec, \ - xfs_agblock_t agbno), \ - TP_ARGS(cur, irec, agbno)) + xfs_agblock_t gbno), \ + TP_ARGS(cur, irec, gbno)) /* double-rcext tracepoint class */ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_class, TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1, - struct xfs_refcount_irec *i2), + struct xfs_refcount_irec *i2), TP_ARGS(cur, i1, i2), TP_STRUCT__entry( __field(dev_t, dev) + __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(enum xfs_refc_domain, i1_domain) __field(xfs_agblock_t, i1_startblock) @@ -3448,6 +3464,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_class, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; + __entry->type = cur->bc_group->xg_type; __entry->agno = cur->bc_group->xg_gno; __entry->i1_domain = i1->rc_domain; __entry->i1_startblock = i1->rc_startblock; @@ -3458,9 +3475,10 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_class, __entry->i2_blockcount = i2->rc_blockcount; __entry->i2_refcount = i2->rc_refcount; ), - TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u -- " - "dom %s agbno 0x%x fsbcount 0x%x refcount %u", + TP_printk("dev %d:%d %sno 0x%x dom %s gbno 0x%x fsbcount 0x%x refcount %u -- " + "dom %s gbno 0x%x fsbcount 0x%x refcount %u", MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __print_symbolic(__entry->i1_domain, XFS_REFC_DOMAIN_STRINGS), __entry->i1_startblock, @@ -3481,10 +3499,11 @@ DEFINE_EVENT(xfs_refcount_double_extent_class, name, \ /* double-rcext and an agbno tracepoint class */ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_at_class, TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1, - struct xfs_refcount_irec *i2, xfs_agblock_t agbno), - TP_ARGS(cur, i1, i2, agbno), + struct xfs_refcount_irec *i2, xfs_agblock_t gbno), + TP_ARGS(cur, i1, i2, gbno), TP_STRUCT__entry( __field(dev_t, dev) + __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(enum xfs_refc_domain, i1_domain) __field(xfs_agblock_t, i1_startblock) @@ -3494,10 +3513,11 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_at_class, __field(xfs_agblock_t, i2_startblock) __field(xfs_extlen_t, i2_blockcount) __field(xfs_nlink_t, i2_refcount) - __field(xfs_agblock_t, agbno) + __field(xfs_agblock_t, gbno) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; + __entry->type = cur->bc_group->xg_type; __entry->agno = cur->bc_group->xg_gno; __entry->i1_domain = i1->rc_domain; __entry->i1_startblock = i1->rc_startblock; @@ -3507,11 +3527,12 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_at_class, __entry->i2_startblock = i2->rc_startblock; __entry->i2_blockcount = i2->rc_blockcount; __entry->i2_refcount = i2->rc_refcount; - __entry->agbno = agbno; + __entry->gbno = gbno; ), - TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u -- " - "dom %s agbno 0x%x fsbcount 0x%x refcount %u @ agbno 0x%x", + TP_printk("dev %d:%d %sno 0x%x dom %s gbno 0x%x fsbcount 0x%x refcount %u -- " + "dom %s gbno 0x%x fsbcount 0x%x refcount %u @ gbno 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __print_symbolic(__entry->i1_domain, XFS_REFC_DOMAIN_STRINGS), __entry->i1_startblock, @@ -3521,14 +3542,14 @@ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_at_class, __entry->i2_startblock, __entry->i2_blockcount, __entry->i2_refcount, - __entry->agbno) + __entry->gbno) ) #define DEFINE_REFCOUNT_DOUBLE_EXTENT_AT_EVENT(name) \ DEFINE_EVENT(xfs_refcount_double_extent_at_class, name, \ TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1, \ - struct xfs_refcount_irec *i2, xfs_agblock_t agbno), \ - TP_ARGS(cur, i1, i2, agbno)) + struct xfs_refcount_irec *i2, xfs_agblock_t gbno), \ + TP_ARGS(cur, i1, i2, gbno)) /* triple-rcext tracepoint class */ DECLARE_EVENT_CLASS(xfs_refcount_triple_extent_class, @@ -3537,6 +3558,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_triple_extent_class, TP_ARGS(cur, i1, i2, i3), TP_STRUCT__entry( __field(dev_t, dev) + __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(enum xfs_refc_domain, i1_domain) __field(xfs_agblock_t, i1_startblock) @@ -3553,6 +3575,7 @@ DECLARE_EVENT_CLASS(xfs_refcount_triple_extent_class, ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; + __entry->type = cur->bc_group->xg_type; __entry->agno = cur->bc_group->xg_gno; __entry->i1_domain = i1->rc_domain; __entry->i1_startblock = i1->rc_startblock; @@ -3567,10 +3590,11 @@ DECLARE_EVENT_CLASS(xfs_refcount_triple_extent_class, __entry->i3_blockcount = i3->rc_blockcount; __entry->i3_refcount = i3->rc_refcount; ), - TP_printk("dev %d:%d agno 0x%x dom %s agbno 0x%x fsbcount 0x%x refcount %u -- " - "dom %s agbno 0x%x fsbcount 0x%x refcount %u -- " - "dom %s agbno 0x%x fsbcount 0x%x refcount %u", + TP_printk("dev %d:%d %sno 0x%x dom %s gbno 0x%x fsbcount 0x%x refcount %u -- " + "dom %s gbno 0x%x fsbcount 0x%x refcount %u -- " + "dom %s gbno 0x%x fsbcount 0x%x refcount %u", MAJOR(__entry->dev), MINOR(__entry->dev), + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __print_symbolic(__entry->i1_domain, XFS_REFC_DOMAIN_STRINGS), __entry->i1_startblock, @@ -3638,23 +3662,27 @@ DECLARE_EVENT_CLASS(xfs_refcount_deferred_class, TP_ARGS(mp, refc), TP_STRUCT__entry( __field(dev_t, dev) + __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(int, op) - __field(xfs_agblock_t, agbno) + __field(xfs_agblock_t, gbno) __field(xfs_extlen_t, len) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; - __entry->agno = XFS_FSB_TO_AGNO(mp, refc->ri_startblock); + __entry->type = refc->ri_group->xg_type; + __entry->agno = refc->ri_group->xg_gno; __entry->op = refc->ri_type; - __entry->agbno = XFS_FSB_TO_AGBNO(mp, refc->ri_startblock); + __entry->gbno = xfs_fsb_to_gbno(mp, refc->ri_startblock, + refc->ri_group->xg_type); __entry->len = refc->ri_blockcount; ), - TP_printk("dev %d:%d op %s agno 0x%x agbno 0x%x fsbcount 0x%x", + TP_printk("dev %d:%d op %s %sno 0x%x gbno 0x%x fsbcount 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->op, XFS_REFCOUNT_INTENT_STRINGS), + __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, - __entry->agbno, + __entry->gbno, __entry->len) ); #define DEFINE_REFCOUNT_DEFERRED_EVENT(name) \ @@ -3993,7 +4021,7 @@ TRACE_EVENT(xfs_fsmap_mapping, __entry->offset = frec->offset; __entry->flags = frec->rm_flags; ), - TP_printk("dev %d:%d keydev %d:%d agno 0x%x rmapbno 0x%x start_daddr 0x%llx len_daddr 0x%llx owner 0x%llx fileoff 0x%llx flags 0x%x", + TP_printk("dev %d:%d keydev %d:%d agno 0x%x gbno 0x%x start_daddr 0x%llx len_daddr 0x%llx owner 0x%llx fileoff 0x%llx flags 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->keydev), MINOR(__entry->keydev), __entry->agno, @@ -4950,7 +4978,7 @@ DECLARE_EVENT_CLASS(xfbtree_buf_class, __entry->xfino = file_inode(xfbt->target->bt_file)->i_ino; __entry->bno = xfs_buf_daddr(bp); __entry->nblks = bp->b_length; - __entry->hold = atomic_read(&bp->b_hold); + __entry->hold = bp->b_hold; __entry->pincount = atomic_read(&bp->b_pin_count); __entry->lockval = bp->b_sema.count; __entry->flags = bp->b_flags; @@ -5574,6 +5602,72 @@ DEFINE_EVENT(xfs_metadir_class, name, \ TP_ARGS(dp, name, ino)) DEFINE_METADIR_EVENT(xfs_metadir_lookup); +/* metadata inode space reservations */ + +DECLARE_EVENT_CLASS(xfs_metafile_resv_class, + TP_PROTO(struct xfs_inode *ip, xfs_filblks_t len), + TP_ARGS(ip, len), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_ino_t, ino) + __field(unsigned long long, freeblks) + __field(unsigned long long, reserved) + __field(unsigned long long, asked) + __field(unsigned long long, used) + __field(unsigned long long, len) + ), + TP_fast_assign( + struct xfs_mount *mp = ip->i_mount; + + __entry->dev = mp->m_super->s_dev; + __entry->ino = ip->i_ino; + __entry->freeblks = percpu_counter_sum(&mp->m_fdblocks); + __entry->reserved = ip->i_delayed_blks; + __entry->asked = ip->i_meta_resv_asked; + __entry->used = ip->i_nblocks; + __entry->len = len; + ), + TP_printk("dev %d:%d ino 0x%llx freeblks %llu resv %llu ask %llu used %llu len %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->ino, + __entry->freeblks, + __entry->reserved, + __entry->asked, + __entry->used, + __entry->len) +) +#define DEFINE_METAFILE_RESV_EVENT(name) \ +DEFINE_EVENT(xfs_metafile_resv_class, name, \ + TP_PROTO(struct xfs_inode *ip, xfs_filblks_t len), \ + TP_ARGS(ip, len)) +DEFINE_METAFILE_RESV_EVENT(xfs_metafile_resv_init); +DEFINE_METAFILE_RESV_EVENT(xfs_metafile_resv_free); +DEFINE_METAFILE_RESV_EVENT(xfs_metafile_resv_alloc_space); +DEFINE_METAFILE_RESV_EVENT(xfs_metafile_resv_free_space); +DEFINE_METAFILE_RESV_EVENT(xfs_metafile_resv_critical); +DEFINE_INODE_ERROR_EVENT(xfs_metafile_resv_init_error); + +#ifdef CONFIG_XFS_RT +TRACE_EVENT(xfs_growfs_check_rtgeom, + TP_PROTO(const struct xfs_mount *mp, unsigned int min_logfsbs), + TP_ARGS(mp, min_logfsbs), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned int, logblocks) + __field(unsigned int, min_logfsbs) + ), + TP_fast_assign( + __entry->dev = mp->m_super->s_dev; + __entry->logblocks = mp->m_sb.sb_logblocks; + __entry->min_logfsbs = min_logfsbs; + ), + TP_printk("dev %d:%d logblocks %u min_logfsbs %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->logblocks, + __entry->min_logfsbs) +); +#endif /* CONFIG_XFS_RT */ + #endif /* _TRACE_XFS_H */ #undef TRACE_INCLUDE_PATH diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 4cd25717c9d1..c6657072361a 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -100,7 +100,6 @@ xfs_trans_dup( /* * Initialize the new transaction structure. */ - ntp->t_magic = XFS_TRANS_HEADER_MAGIC; ntp->t_mountp = tp->t_mountp; INIT_LIST_HEAD(&ntp->t_items); INIT_LIST_HEAD(&ntp->t_busy); @@ -275,7 +274,6 @@ retry: ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) || xfs_has_lazysbcount(mp)); - tp->t_magic = XFS_TRANS_HEADER_MAGIC; tp->t_flags = flags; tp->t_mountp = mp; INIT_LIST_HEAD(&tp->t_items); @@ -1266,6 +1264,9 @@ retry: xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + if (xfs_is_metadir_inode(ip)) + goto out; + error = xfs_qm_dqattach_locked(ip, false); if (error) { /* Caller should have allocated the dquots! */ @@ -1334,6 +1335,7 @@ retry: goto out_cancel; } +out: *tpp = tp; return 0; diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 71c2e82e4dad..2b366851e9a4 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -122,7 +122,6 @@ void xfs_log_item_init(struct xfs_mount *mp, struct xfs_log_item *item, * This is the structure maintained for every active transaction. */ typedef struct xfs_trans { - unsigned int t_magic; /* magic number */ unsigned int t_log_res; /* amt of log space resvd */ unsigned int t_log_count; /* count for perm log res */ unsigned int t_blk_res; /* # of blocks resvd */ diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index f56d62dced97..0fcb1828e598 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -359,13 +359,8 @@ xfsaild_resubmit_item( } /* protected by ail_lock */ - list_for_each_entry(lip, &bp->b_li_list, li_bio_list) { - if (bp->b_flags & (_XBF_INODES | _XBF_DQUOTS)) - clear_bit(XFS_LI_FAILED, &lip->li_flags); - else - xfs_clear_li_failed(lip); - } - + list_for_each_entry(lip, &bp->b_li_list, li_bio_list) + clear_bit(XFS_LI_FAILED, &lip->li_flags); xfs_buf_unlock(bp); return XFS_ITEM_SUCCESS; } diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 8e886ecfd69a..53af546c0b23 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c @@ -659,7 +659,7 @@ xfs_trans_inode_buf( ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_flags |= XFS_BLI_INODE_BUF; - bp->b_flags |= _XBF_INODES; + bp->b_iodone = xfs_buf_inode_iodone; xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); } @@ -684,7 +684,7 @@ xfs_trans_stale_inode_buf( ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_flags |= XFS_BLI_STALE_INODE; - bp->b_flags |= _XBF_INODES; + bp->b_iodone = xfs_buf_inode_iodone; xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); } @@ -709,7 +709,7 @@ xfs_trans_inode_alloc_buf( ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF; - bp->b_flags |= _XBF_INODES; + bp->b_iodone = xfs_buf_inode_iodone; xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); } @@ -820,6 +820,6 @@ xfs_trans_dquot_buf( break; } - bp->b_flags |= _XBF_DQUOTS; + bp->b_iodone = xfs_buf_dquot_iodone; xfs_trans_buf_set_type(tp, bp, type); } diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c index 713b6d243e56..765456bf3428 100644 --- a/fs/xfs/xfs_trans_dquot.c +++ b/fs/xfs/xfs_trans_dquot.c @@ -156,7 +156,8 @@ xfs_trans_mod_ino_dquot( unsigned int field, int64_t delta) { - ASSERT(!xfs_is_metadir_inode(ip) || XFS_IS_DQDETACHED(ip)); + if (xfs_is_metadir_inode(ip)) + return; xfs_trans_mod_dquot(tp, dqp, field, delta); @@ -246,11 +247,10 @@ xfs_trans_mod_dquot_byino( xfs_mount_t *mp = tp->t_mountp; if (!XFS_IS_QUOTA_ON(mp) || - xfs_is_quota_inode(&mp->m_sb, ip->i_ino)) + xfs_is_quota_inode(&mp->m_sb, ip->i_ino) || + xfs_is_metadir_inode(ip)) return; - ASSERT(!xfs_is_metadir_inode(ip) || XFS_IS_DQDETACHED(ip)); - if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot) xfs_trans_mod_ino_dquot(tp, ip, ip->i_udquot, field, delta); if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot) |