diff options
Diffstat (limited to 'fs/gfs2')
37 files changed, 1247 insertions, 1160 deletions
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig index be7f87a8e11a..7bd231d16d4a 100644 --- a/fs/gfs2/Kconfig +++ b/fs/gfs2/Kconfig @@ -4,7 +4,6 @@ config GFS2_FS select BUFFER_HEAD select FS_POSIX_ACL select CRC32 - select LIBCRC32C select QUOTACTL select FS_IOMAP help diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 974aca9c8ea8..14f204cd5a82 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -37,27 +37,6 @@ #include "aops.h" -void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio, - size_t from, size_t len) -{ - struct buffer_head *head = folio_buffers(folio); - unsigned int bsize = head->b_size; - struct buffer_head *bh; - size_t to = from + len; - size_t start, end; - - for (bh = head, start = 0; bh != head || !start; - bh = bh->b_this_page, start = end) { - end = start + bsize; - if (end <= from) - continue; - if (start >= to) - break; - set_buffer_uptodate(bh); - gfs2_trans_add_data(ip->i_gl, bh); - } -} - /** * gfs2_get_block_noalloc - Fills in a buffer head with details about a block * @inode: The inode @@ -116,8 +95,7 @@ static int gfs2_write_jdata_folio(struct folio *folio, * @folio: The folio to write * @wbc: The writeback control * - * This is shared between writepage and writepages and implements the - * core of the writepage operation. If a transaction is required then + * Implements the core of write back. If a transaction is required then * the checked flag will have been set and the transaction will have * already been started before this is called. */ @@ -134,38 +112,40 @@ static int __gfs2_jdata_write_folio(struct folio *folio, inode->i_sb->s_blocksize, BIT(BH_Dirty)|BIT(BH_Uptodate)); } - gfs2_trans_add_databufs(ip, folio, 0, folio_size(folio)); + gfs2_trans_add_databufs(ip->i_gl, folio, 0, folio_size(folio)); } return gfs2_write_jdata_folio(folio, wbc); } /** - * gfs2_jdata_writepage - Write complete page - * @page: Page to write + * gfs2_jdata_writeback - Write jdata folios to the log + * @mapping: The mapping to write * @wbc: The writeback control * * Returns: errno - * */ - -static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc) +int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc) { - struct folio *folio = page_folio(page); - struct inode *inode = page->mapping->host; + struct inode *inode = mapping->host; struct gfs2_inode *ip = GFS2_I(inode); - struct gfs2_sbd *sdp = GFS2_SB(inode); + struct gfs2_sbd *sdp = GFS2_SB(mapping->host); + struct folio *folio = NULL; + int error; + BUG_ON(current->journal_info); if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE)) - goto out; - if (folio_test_checked(folio) || current->journal_info) - goto out_ignore; - return __gfs2_jdata_write_folio(folio, wbc); + return 0; -out_ignore: - folio_redirty_for_writepage(wbc, folio); -out: - folio_unlock(folio); - return 0; + while ((folio = writeback_iter(mapping, wbc, folio, &error))) { + if (folio_test_checked(folio)) { + folio_redirty_for_writepage(wbc, folio); + folio_unlock(folio); + continue; + } + error = __gfs2_jdata_write_folio(folio, wbc); + } + + return error; } /** @@ -258,24 +238,16 @@ continue_unlock: ret = __gfs2_jdata_write_folio(folio, wbc); if (unlikely(ret)) { - if (ret == AOP_WRITEPAGE_ACTIVATE) { - folio_unlock(folio); - ret = 0; - } else { - - /* - * done_index is set past this page, - * so media errors will not choke - * background writeout for the entire - * file. This has consequences for - * range_cyclic semantics (ie. it may - * not be suitable for data integrity - * writeout). - */ - *done_index = folio_next_index(folio); - ret = 1; - break; - } + /* + * done_index is set past this page, so media errors + * will not choke background writeout for the entire + * file. This has consequences for range_cyclic + * semantics (ie. it may not be suitable for data + * integrity writeout). + */ + *done_index = folio_next_index(folio); + ret = 1; + break; } /* @@ -570,7 +542,7 @@ out: gfs2_trans_end(sdp); } -static bool jdata_dirty_folio(struct address_space *mapping, +static bool gfs2_jdata_dirty_folio(struct address_space *mapping, struct folio *folio) { if (current->journal_info) @@ -749,12 +721,12 @@ static const struct address_space_operations gfs2_aops = { }; static const struct address_space_operations gfs2_jdata_aops = { - .writepage = gfs2_jdata_writepage, .writepages = gfs2_jdata_writepages, .read_folio = gfs2_read_folio, .readahead = gfs2_readahead, - .dirty_folio = jdata_dirty_folio, + .dirty_folio = gfs2_jdata_dirty_folio, .bmap = gfs2_bmap, + .migrate_folio = buffer_migrate_folio, .invalidate_folio = gfs2_invalidate_folio, .release_folio = gfs2_release_folio, .is_partially_uptodate = block_is_partially_uptodate, diff --git a/fs/gfs2/aops.h b/fs/gfs2/aops.h index a10c4334d248..bf002522a782 100644 --- a/fs/gfs2/aops.h +++ b/fs/gfs2/aops.h @@ -9,7 +9,6 @@ #include "incore.h" void adjust_fs_space(struct inode *inode); -void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio, - size_t from, size_t len); +int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc); #endif /* __AOPS_DOT_H__ */ diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index aa1626955b2c..7703d0471139 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -988,7 +988,8 @@ static void gfs2_iomap_put_folio(struct inode *inode, loff_t pos, struct gfs2_sbd *sdp = GFS2_SB(inode); if (!gfs2_is_stuffed(ip)) - gfs2_trans_add_databufs(ip, folio, offset_in_folio(folio, pos), + gfs2_trans_add_databufs(ip->i_gl, folio, + offset_in_folio(folio, pos), copied); folio_unlock(folio); @@ -1296,11 +1297,14 @@ int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock, * uses iomap write to perform its actions, which begin their own transactions * (iomap_begin, get_folio, etc.) */ -static int gfs2_block_zero_range(struct inode *inode, loff_t from, - unsigned int length) +static int gfs2_block_zero_range(struct inode *inode, loff_t from, loff_t length) { BUG_ON(current->journal_info); - return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops); + if (from >= inode->i_size) + return 0; + length = min(length, inode->i_size - from); + return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops, + NULL); } #define GFS2_JTRUNC_REVOKES 8192 @@ -1827,7 +1831,7 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length) gfs2_assert_withdraw(sdp, bh); if (gfs2_assert_withdraw(sdp, prev_bnr != bh->b_blocknr)) { - fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u," + fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u, " "s_h:%u, mp_h:%u\n", (unsigned long long)ip->i_no_addr, prev_bnr, ip->i_height, strip_h, mp_h); diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c index 2e215e8c3c88..95050e719233 100644 --- a/fs/gfs2/dentry.c +++ b/fs/gfs2/dentry.c @@ -21,7 +21,9 @@ /** * gfs2_drevalidate - Check directory lookup consistency - * @dentry: the mapping to check + * @dir: expected parent directory inode + * @name: expexted name + * @dentry: dentry to check * @flags: lookup flags * * Check to make sure the lookup necessary to arrive at this inode from its @@ -30,50 +32,43 @@ * Returns: 1 if the dentry is ok, 0 if it isn't */ -static int gfs2_drevalidate(struct dentry *dentry, unsigned int flags) +static int gfs2_drevalidate(struct inode *dir, const struct qstr *name, + struct dentry *dentry, unsigned int flags) { - struct dentry *parent; - struct gfs2_sbd *sdp; - struct gfs2_inode *dip; + struct gfs2_sbd *sdp = GFS2_SB(dir); + struct gfs2_inode *dip = GFS2_I(dir); struct inode *inode; struct gfs2_holder d_gh; struct gfs2_inode *ip = NULL; - int error, valid = 0; + int error, valid; int had_lock = 0; if (flags & LOOKUP_RCU) return -ECHILD; - parent = dget_parent(dentry); - sdp = GFS2_SB(d_inode(parent)); - dip = GFS2_I(d_inode(parent)); inode = d_inode(dentry); if (inode) { if (is_bad_inode(inode)) - goto out; + return 0; ip = GFS2_I(inode); } - if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) { - valid = 1; - goto out; - } + if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) + return 1; had_lock = (gfs2_glock_is_locked_by_me(dip->i_gl) != NULL); if (!had_lock) { error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); if (error) - goto out; + return 0; } - error = gfs2_dir_check(d_inode(parent), &dentry->d_name, ip); + error = gfs2_dir_check(dir, name, ip); valid = inode ? !error : (error == -ENOENT); if (!had_lock) gfs2_glock_dq_uninit(&d_gh); -out: - dput(parent); return valid; } diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index 560e4624c09f..dbf1aede744c 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c @@ -562,15 +562,18 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf, int ret = 0; ret = gfs2_dirent_offset(GFS2_SB(inode), buf); - if (ret < 0) - goto consist_inode; - + if (ret < 0) { + gfs2_consist_inode(GFS2_I(inode)); + return ERR_PTR(-EIO); + } offset = ret; prev = NULL; dent = buf + offset; size = be16_to_cpu(dent->de_rec_len); - if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size, len, 1)) - goto consist_inode; + if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size, len, 1)) { + gfs2_consist_inode(GFS2_I(inode)); + return ERR_PTR(-EIO); + } do { ret = scan(dent, name, opaque); if (ret) @@ -582,8 +585,10 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf, dent = buf + offset; size = be16_to_cpu(dent->de_rec_len); if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size, - len, 0)) - goto consist_inode; + len, 0)) { + gfs2_consist_inode(GFS2_I(inode)); + return ERR_PTR(-EIO); + } } while(1); switch(ret) { @@ -597,10 +602,6 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf, BUG_ON(ret > 0); return ERR_PTR(ret); } - -consist_inode: - gfs2_consist_inode(GFS2_I(inode)); - return ERR_PTR(-EIO); } static int dirent_check_reclen(struct gfs2_inode *dip, @@ -609,14 +610,16 @@ static int dirent_check_reclen(struct gfs2_inode *dip, const void *ptr = d; u16 rec_len = be16_to_cpu(d->de_rec_len); - if (unlikely(rec_len < sizeof(struct gfs2_dirent))) - goto broken; + if (unlikely(rec_len < sizeof(struct gfs2_dirent))) { + gfs2_consist_inode(dip); + return -EIO; + } ptr += rec_len; if (ptr < end_p) return rec_len; if (ptr == end_p) return -ENOENT; -broken: + gfs2_consist_inode(dip); return -EIO; } diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c index d418d8b5367f..3334c394ce9c 100644 --- a/fs/gfs2/export.c +++ b/fs/gfs2/export.c @@ -190,6 +190,5 @@ const struct export_operations gfs2_export_ops = { .fh_to_parent = gfs2_fh_to_parent, .get_name = gfs2_get_name, .get_parent = gfs2_get_parent, - .flags = EXPORT_OP_ASYNC_LOCK, }; diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 4c42ada60ae7..fd1147aa3891 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -251,6 +251,7 @@ static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask) error = filemap_fdatawait(inode->i_mapping); if (error) goto out; + truncate_inode_pages(inode->i_mapping, 0); if (new_flags & GFS2_DIF_JDATA) gfs2_ordered_del_inode(ip); } @@ -376,23 +377,23 @@ static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size) } /** - * gfs2_allocate_page_backing - Allocate blocks for a write fault - * @page: The (locked) page to allocate backing for + * gfs2_allocate_folio_backing - Allocate blocks for a write fault + * @folio: The (locked) folio to allocate backing for * @length: Size of the allocation * - * We try to allocate all the blocks required for the page in one go. This + * We try to allocate all the blocks required for the folio in one go. This * might fail for various reasons, so we keep trying until all the blocks to - * back this page are allocated. If some of the blocks are already allocated, + * back this folio are allocated. If some of the blocks are already allocated, * that is ok too. */ -static int gfs2_allocate_page_backing(struct page *page, unsigned int length) +static int gfs2_allocate_folio_backing(struct folio *folio, size_t length) { - u64 pos = page_offset(page); + u64 pos = folio_pos(folio); do { struct iomap iomap = { }; - if (gfs2_iomap_alloc(page->mapping->host, pos, length, &iomap)) + if (gfs2_iomap_alloc(folio->mapping->host, pos, length, &iomap)) return -EIO; if (length < iomap.length) @@ -414,16 +415,16 @@ static int gfs2_allocate_page_backing(struct page *page, unsigned int length) static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) { - struct page *page = vmf->page; + struct folio *folio = page_folio(vmf->page); struct inode *inode = file_inode(vmf->vma->vm_file); struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_alloc_parms ap = {}; - u64 offset = page_offset(page); + u64 pos = folio_pos(folio); unsigned int data_blocks, ind_blocks, rblocks; vm_fault_t ret = VM_FAULT_LOCKED; struct gfs2_holder gh; - unsigned int length; + size_t length; loff_t size; int err; @@ -436,23 +437,23 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) goto out_uninit; } - /* Check page index against inode size */ + /* Check folio index against inode size */ size = i_size_read(inode); - if (offset >= size) { + if (pos >= size) { ret = VM_FAULT_SIGBUS; goto out_unlock; } - /* Update file times before taking page lock */ + /* Update file times before taking folio lock */ file_update_time(vmf->vma->vm_file); - /* page is wholly or partially inside EOF */ - if (size - offset < PAGE_SIZE) - length = size - offset; + /* folio is wholly or partially inside EOF */ + if (size - pos < folio_size(folio)) + length = size - pos; else - length = PAGE_SIZE; + length = folio_size(folio); - gfs2_size_hint(vmf->vma->vm_file, offset, length); + gfs2_size_hint(vmf->vma->vm_file, pos, length); set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); set_bit(GIF_SW_PAGED, &ip->i_flags); @@ -463,11 +464,12 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) */ if (!gfs2_is_stuffed(ip) && - !gfs2_write_alloc_required(ip, offset, length)) { - lock_page(page); - if (!PageUptodate(page) || page->mapping != inode->i_mapping) { + !gfs2_write_alloc_required(ip, pos, length)) { + folio_lock(folio); + if (!folio_test_uptodate(folio) || + folio->mapping != inode->i_mapping) { ret = VM_FAULT_NOPAGE; - unlock_page(page); + folio_unlock(folio); } goto out_unlock; } @@ -504,7 +506,7 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) goto out_trans_fail; } - /* Unstuff, if required, and allocate backing blocks for page */ + /* Unstuff, if required, and allocate backing blocks for folio */ if (gfs2_is_stuffed(ip)) { err = gfs2_unstuff_dinode(ip); if (err) { @@ -513,22 +515,22 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) } } - lock_page(page); + folio_lock(folio); /* If truncated, we must retry the operation, we may have raced * with the glock demotion code. */ - if (!PageUptodate(page) || page->mapping != inode->i_mapping) { + if (!folio_test_uptodate(folio) || folio->mapping != inode->i_mapping) { ret = VM_FAULT_NOPAGE; goto out_page_locked; } - err = gfs2_allocate_page_backing(page, length); + err = gfs2_allocate_folio_backing(folio, length); if (err) ret = vmf_fs_error(err); out_page_locked: if (ret != VM_FAULT_LOCKED) - unlock_page(page); + folio_unlock(folio); out_trans_end: gfs2_trans_end(sdp); out_trans_fail: @@ -540,8 +542,8 @@ out_unlock: out_uninit: gfs2_holder_uninit(&gh); if (ret == VM_FAULT_LOCKED) { - set_page_dirty(page); - wait_for_stable_page(page); + folio_mark_dirty(folio); + folio_wait_stable(folio); } sb_end_pagefault(inode->i_sb); return ret; @@ -818,7 +820,7 @@ static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to, /* * In this function, we disable page faults when we're holding the * inode glock while doing I/O. If a page fault occurs, we indicate - * that the inode glock may be dropped, fault in the pages manually, + * that the inode glock should be dropped, fault in the pages manually, * and retry. * * Unlike generic_file_read_iter, for reads, iomap_dio_rw can trigger @@ -883,7 +885,7 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from, /* * In this function, we disable page faults when we're holding the * inode glock while doing I/O. If a page fault occurs, we indicate - * that the inode glock may be dropped, fault in the pages manually, + * that the inode glock should be dropped, fault in the pages manually, * and retry. * * For writes, iomap_dio_rw only triggers manual page faults, so we @@ -955,7 +957,7 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to) /* * In this function, we disable page faults when we're holding the * inode glock while doing I/O. If a page fault occurs, we indicate - * that the inode glock may be dropped, fault in the pages manually, + * that the inode glock should be dropped, fault in the pages manually, * and retry. */ @@ -1022,7 +1024,7 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb, /* * In this function, we disable page faults when we're holding the * inode glock while doing I/O. If a page fault occurs, we indicate - * that the inode glock may be dropped, fault in the pages manually, + * that the inode glock should be dropped, fault in the pages manually, * and retry. */ @@ -1056,7 +1058,7 @@ retry: } pagefault_disable(); - ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops); + ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops, NULL); pagefault_enable(); if (ret > 0) written += ret; @@ -1585,6 +1587,7 @@ const struct file_operations gfs2_file_fops = { .splice_write = gfs2_file_splice_write, .setlease = simple_nosetlease, .fallocate = gfs2_fallocate, + .fop_flags = FOP_ASYNC_LOCK, }; const struct file_operations gfs2_dir_fops = { @@ -1597,6 +1600,7 @@ const struct file_operations gfs2_dir_fops = { .lock = gfs2_lock, .flock = gfs2_flock, .llseek = default_llseek, + .fop_flags = FOP_ASYNC_LOCK, }; #endif /* CONFIG_GFS2_FS_LOCKING_DLM */ diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 34540f9d011c..ba25b884169e 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -34,8 +34,8 @@ #include <linux/lockref.h> #include <linux/rhashtable.h> #include <linux/pid_namespace.h> -#include <linux/fdtable.h> #include <linux/file.h> +#include <linux/random.h> #include "gfs2.h" #include "incore.h" @@ -61,12 +61,10 @@ struct gfs2_glock_iter { typedef void (*glock_examiner) (struct gfs2_glock * gl); static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); -static void __gfs2_glock_dq(struct gfs2_holder *gh); -static void handle_callback(struct gfs2_glock *gl, unsigned int state, - unsigned long delay, bool remote); +static void request_demote(struct gfs2_glock *gl, unsigned int state, + unsigned long delay, bool remote); static struct dentry *gfs2_root; -static struct workqueue_struct *glock_workqueue; static LIST_HEAD(lru_list); static atomic_t lru_count = ATOMIC_INIT(0); static DEFINE_SPINLOCK(lru_lock); @@ -166,19 +164,45 @@ static bool glock_blocked_by_withdraw(struct gfs2_glock *gl) return true; } -void gfs2_glock_free(struct gfs2_glock *gl) +static void __gfs2_glock_free(struct gfs2_glock *gl) { - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; - - gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0); rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); smp_mb(); wake_up_glock(gl); call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); +} + +void gfs2_glock_free(struct gfs2_glock *gl) { + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + + __gfs2_glock_free(gl); if (atomic_dec_and_test(&sdp->sd_glock_disposal)) wake_up(&sdp->sd_kill_wait); } +void gfs2_glock_free_later(struct gfs2_glock *gl) { + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + + spin_lock(&lru_lock); + list_add(&gl->gl_lru, &sdp->sd_dead_glocks); + spin_unlock(&lru_lock); + if (atomic_dec_and_test(&sdp->sd_glock_disposal)) + wake_up(&sdp->sd_kill_wait); +} + +static void gfs2_free_dead_glocks(struct gfs2_sbd *sdp) +{ + struct list_head *list = &sdp->sd_dead_glocks; + + while(!list_empty(list)) { + struct gfs2_glock *gl; + + gl = list_first_entry(list, struct gfs2_glock, gl_lru); + list_del_init(&gl->gl_lru); + __gfs2_glock_free(gl); + } +} + /** * gfs2_glock_hold() - increment reference count on glock * @gl: The glock to hold @@ -192,34 +216,9 @@ struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl) return gl; } -/** - * demote_ok - Check to see if it's ok to unlock a glock - * @gl: the glock - * - * Returns: 1 if it's ok - */ - -static int demote_ok(const struct gfs2_glock *gl) +static void gfs2_glock_add_to_lru(struct gfs2_glock *gl) { - const struct gfs2_glock_operations *glops = gl->gl_ops; - - if (gl->gl_state == LM_ST_UNLOCKED) - return 0; - if (!list_empty(&gl->gl_holders)) - return 0; - if (glops->go_demote_ok) - return glops->go_demote_ok(gl); - return 1; -} - - -void gfs2_glock_add_to_lru(struct gfs2_glock *gl) -{ - if (!(gl->gl_ops->go_flags & GLOF_LRU)) - return; - spin_lock(&lru_lock); - list_move_tail(&gl->gl_lru, &lru_list); if (!test_bit(GLF_LRU, &gl->gl_flags)) { @@ -232,9 +231,6 @@ void gfs2_glock_add_to_lru(struct gfs2_glock *gl) static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) { - if (!(gl->gl_ops->go_flags & GLOF_LRU)) - return; - spin_lock(&lru_lock); if (test_bit(GLF_LRU, &gl->gl_flags)) { list_del_init(&gl->gl_lru); @@ -248,8 +244,10 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) * Enqueue the glock on the work queue. Passes one glock reference on to the * work queue. */ -static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { - if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) { +static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + + if (!queue_delayed_work(sdp->sd_glock_wq, &gl->gl_work, delay)) { /* * We are holding the lockref spinlock, and the work was still * queued above. The queued work (glock_work_func) takes that @@ -261,12 +259,6 @@ static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) } } -static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { - spin_lock(&gl->gl_lockref.lock); - __gfs2_glock_queue_work(gl, delay); - spin_unlock(&gl->gl_lockref.lock); -} - static void __gfs2_glock_put(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; @@ -285,12 +277,18 @@ static void __gfs2_glock_put(struct gfs2_glock *gl) sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); } -/* - * Cause the glock to be put in work queue context. - */ -void gfs2_glock_queue_put(struct gfs2_glock *gl) +static bool __gfs2_glock_put_or_lock(struct gfs2_glock *gl) { - gfs2_glock_queue_work(gl, 0); + if (lockref_put_or_lock(&gl->gl_lockref)) + return true; + GLOCK_BUG_ON(gl, gl->gl_lockref.count != 1); + if (gl->gl_state != LM_ST_UNLOCKED) { + gl->gl_lockref.count--; + gfs2_glock_add_to_lru(gl); + spin_unlock(&gl->gl_lockref.lock); + return true; + } + return false; } /** @@ -301,12 +299,28 @@ void gfs2_glock_queue_put(struct gfs2_glock *gl) void gfs2_glock_put(struct gfs2_glock *gl) { - if (lockref_put_or_lock(&gl->gl_lockref)) + if (__gfs2_glock_put_or_lock(gl)) return; __gfs2_glock_put(gl); } +/* + * gfs2_glock_put_async - Decrement reference count without sleeping + * @gl: The glock to put + * + * Decrement the reference count on glock immediately unless it is the last + * reference. Defer putting the last reference to work queue context. + */ +void gfs2_glock_put_async(struct gfs2_glock *gl) +{ + if (__gfs2_glock_put_or_lock(gl)) + return; + + gfs2_glock_queue_work(gl, 0); + spin_unlock(&gl->gl_lockref.lock); +} + /** * may_grant - check if it's ok to grant a new lock * @gl: The glock @@ -541,18 +555,6 @@ static inline struct gfs2_holder *find_last_waiter(const struct gfs2_glock *gl) static void state_change(struct gfs2_glock *gl, unsigned int new_state) { - int held1, held2; - - held1 = (gl->gl_state != LM_ST_UNLOCKED); - held2 = (new_state != LM_ST_UNLOCKED); - - if (held1 != held2) { - GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); - if (held2) - gl->gl_lockref.count++; - else - gl->gl_lockref.count--; - } if (new_state != gl->gl_target) /* shorten our minimum hold time */ gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR, @@ -561,11 +563,11 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state) gl->gl_tchange = jiffies; } -static void gfs2_set_demote(struct gfs2_glock *gl) +static void gfs2_set_demote(int nr, struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; - set_bit(GLF_DEMOTE, &gl->gl_flags); + set_bit(nr, &gl->gl_flags); smp_mb(); wake_up(&sdp->sd_async_glock_wait); } @@ -591,7 +593,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) struct gfs2_holder *gh; unsigned state = ret & LM_OUT_ST_MASK; - spin_lock(&gl->gl_lockref.lock); trace_gfs2_glock_state_change(gl, state); state_change(gl, state); gh = find_first_waiter(gl); @@ -606,14 +607,19 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) if (gh && (ret & LM_OUT_CANCELED)) gfs2_holder_wake(gh); if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) { - /* move to back of queue and try next entry */ if (ret & LM_OUT_CANCELED) { - list_move_tail(&gh->gh_list, &gl->gl_holders); + list_del_init(&gh->gh_list); + trace_gfs2_glock_queue(gh, 0); + gl->gl_target = gl->gl_state; gh = find_first_waiter(gl); - gl->gl_target = gh->gh_state; - if (do_promote(gl)) - goto out; - goto retry; + if (gh) { + gl->gl_target = gh->gh_state; + if (do_promote(gl)) + goto out; + do_xmote(gl, gh, gl->gl_target); + return; + } + goto out; } /* Some error or failed "try lock" - report it */ if ((ret & LM_OUT_ERROR) || @@ -626,7 +632,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) switch(state) { /* Unlocked due to conversion deadlock, try again */ case LM_ST_UNLOCKED: -retry: do_xmote(gl, gh, gl->gl_target); break; /* Conversion fails, unlock and try again */ @@ -639,7 +644,6 @@ retry: gl->gl_target, state); GLOCK_BUG_ON(gl, 1); } - spin_unlock(&gl->gl_lockref.lock); return; } @@ -661,8 +665,8 @@ retry: do_promote(gl); } out: - clear_bit(GLF_LOCK, &gl->gl_flags); - spin_unlock(&gl->gl_lockref.lock); + if (!test_bit(GLF_CANCELING, &gl->gl_flags)) + clear_bit(GLF_LOCK, &gl->gl_flags); } static bool is_system_glock(struct gfs2_glock *gl) @@ -690,6 +694,7 @@ __acquires(&gl->gl_lockref.lock) { const struct gfs2_glock_operations *glops = gl->gl_ops; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct lm_lockstruct *ls = &sdp->sd_lockstruct; unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0); int ret; @@ -718,6 +723,9 @@ __acquires(&gl->gl_lockref.lock) (gl->gl_state == LM_ST_EXCLUSIVE) || (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB))) clear_bit(GLF_BLOCKING, &gl->gl_flags); + if (!glops->go_inval && !glops->go_sync) + goto skip_inval; + spin_unlock(&gl->gl_lockref.lock); if (glops->go_sync) { ret = glops->go_sync(gl); @@ -730,6 +738,7 @@ __acquires(&gl->gl_lockref.lock) fs_err(sdp, "Error %d syncing glock \n", ret); gfs2_dump_glock(NULL, gl, true); } + spin_lock(&gl->gl_lockref.lock); goto skip_inval; } } @@ -750,9 +759,10 @@ __acquires(&gl->gl_lockref.lock) glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); } + spin_lock(&gl->gl_lockref.lock); skip_inval: - gfs2_glock_hold(gl); + gl->gl_lockref.count++; /* * Check for an error encountered since we called go_sync and go_inval. * If so, we can't withdraw from the glock code because the withdraw @@ -780,7 +790,7 @@ skip_inval: (target != LM_ST_UNLOCKED || test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) { if (!is_system_glock(gl)) { - handle_callback(gl, LM_ST_UNLOCKED, 0, false); /* sets demote */ + request_demote(gl, LM_ST_UNLOCKED, 0, false); /* * Ordinarily, we would call dlm and its callback would call * finish_xmote, which would call state_change() to the new state. @@ -795,30 +805,38 @@ skip_inval: clear_bit(GLF_LOCK, &gl->gl_flags); clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); - goto out; + return; } else { clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); } } - if (sdp->sd_lockstruct.ls_ops->lm_lock) { - /* lock_dlm */ - ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); + if (ls->ls_ops->lm_lock) { + set_bit(GLF_PENDING_REPLY, &gl->gl_flags); + spin_unlock(&gl->gl_lockref.lock); + ret = ls->ls_ops->lm_lock(gl, target, lck_flags); + spin_lock(&gl->gl_lockref.lock); + if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED && target == LM_ST_UNLOCKED && - test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) { - finish_xmote(gl, target); - gfs2_glock_queue_work(gl, 0); + test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) { + /* + * The lockspace has been released and the lock has + * been unlocked implicitly. + */ } else if (ret) { fs_err(sdp, "lm_lock ret %d\n", ret); - GLOCK_BUG_ON(gl, !gfs2_withdrawing_or_withdrawn(sdp)); + target = gl->gl_state | LM_OUT_ERROR; + } else { + /* The operation will be completed asynchronously. */ + return; } - } else { /* lock_nolock */ - finish_xmote(gl, target); - gfs2_glock_queue_work(gl, 0); + clear_bit(GLF_PENDING_REPLY, &gl->gl_flags); } -out: - spin_lock(&gl->gl_lockref.lock); + + /* Complete the operation now. */ + finish_xmote(gl, target); + gfs2_glock_queue_work(gl, 0); } /** @@ -832,11 +850,13 @@ static void run_queue(struct gfs2_glock *gl, const int nonblock) __releases(&gl->gl_lockref.lock) __acquires(&gl->gl_lockref.lock) { - struct gfs2_holder *gh = NULL; + struct gfs2_holder *gh; - if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) + if (test_bit(GLF_LOCK, &gl->gl_flags)) return; + set_bit(GLF_LOCK, &gl->gl_flags); + /* While a demote is in progress, the GLF_LOCK flag must be set. */ GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); if (test_bit(GLF_DEMOTE, &gl->gl_flags) && @@ -848,30 +868,33 @@ __acquires(&gl->gl_lockref.lock) set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); gl->gl_target = gl->gl_demote_state; + do_xmote(gl, NULL, gl->gl_target); + return; } else { if (test_bit(GLF_DEMOTE, &gl->gl_flags)) gfs2_demote_wake(gl); if (do_promote(gl)) goto out_unlock; gh = find_first_waiter(gl); + if (!gh) + goto out_unlock; gl->gl_target = gh->gh_state; if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) do_error(gl, 0); /* Fail queued try locks */ + do_xmote(gl, gh, gl->gl_target); + return; } - do_xmote(gl, gh, gl->gl_target); - return; out_sched: clear_bit(GLF_LOCK, &gl->gl_flags); smp_mb__after_atomic(); gl->gl_lockref.count++; - __gfs2_glock_queue_work(gl, 0); + gfs2_glock_queue_work(gl, 0); return; out_unlock: clear_bit(GLF_LOCK, &gl->gl_flags); smp_mb__after_atomic(); - return; } /** @@ -887,12 +910,8 @@ void glock_set_object(struct gfs2_glock *gl, void *object) prev_object = gl->gl_object; gl->gl_object = object; spin_unlock(&gl->gl_lockref.lock); - if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == NULL)) { - pr_warn("glock=%u/%llx\n", - gl->gl_name.ln_type, - (unsigned long long)gl->gl_name.ln_number); + if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == NULL)) gfs2_dump_glock(NULL, gl, true); - } } /** @@ -908,12 +927,8 @@ void glock_clear_object(struct gfs2_glock *gl, void *object) prev_object = gl->gl_object; gl->gl_object = NULL; spin_unlock(&gl->gl_lockref.lock); - if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == object)) { - pr_warn("glock=%u/%llx\n", - gl->gl_name.ln_type, - (unsigned long long)gl->gl_name.ln_number); + if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == object)) gfs2_dump_glock(NULL, gl, true); - } } void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation) @@ -948,48 +963,56 @@ static void gfs2_glock_poke(struct gfs2_glock *gl) gfs2_holder_uninit(&gh); } -static bool gfs2_try_evict(struct gfs2_glock *gl) +static struct gfs2_inode *gfs2_grab_existing_inode(struct gfs2_glock *gl) +{ + struct gfs2_inode *ip; + + spin_lock(&gl->gl_lockref.lock); + ip = gl->gl_object; + if (ip && !igrab(&ip->i_inode)) + ip = NULL; + spin_unlock(&gl->gl_lockref.lock); + if (ip) { + wait_on_inode(&ip->i_inode); + if (is_bad_inode(&ip->i_inode)) { + iput(&ip->i_inode); + ip = NULL; + } + } + return ip; +} + +static void gfs2_try_evict(struct gfs2_glock *gl) { struct gfs2_inode *ip; - bool evicted = false; /* * If there is contention on the iopen glock and we have an inode, try - * to grab and release the inode so that it can be evicted. This will - * allow the remote node to go ahead and delete the inode without us - * having to do it, which will avoid rgrp glock thrashing. + * to grab and release the inode so that it can be evicted. The + * GIF_DEFER_DELETE flag indicates to gfs2_evict_inode() that the inode + * should not be deleted locally. This will allow the remote node to + * go ahead and delete the inode without us having to do it, which will + * avoid rgrp glock thrashing. * * The remote node is likely still holding the corresponding inode * glock, so it will run before we get to verify that the delete has - * happened below. + * happened below. (Verification is triggered by the call to + * gfs2_queue_verify_delete() in gfs2_evict_inode().) */ - spin_lock(&gl->gl_lockref.lock); - ip = gl->gl_object; - if (ip && !igrab(&ip->i_inode)) - ip = NULL; - spin_unlock(&gl->gl_lockref.lock); + ip = gfs2_grab_existing_inode(gl); if (ip) { - gl->gl_no_formal_ino = ip->i_no_formal_ino; - set_bit(GIF_DEFERRED_DELETE, &ip->i_flags); + set_bit(GLF_DEFER_DELETE, &gl->gl_flags); d_prune_aliases(&ip->i_inode); iput(&ip->i_inode); + clear_bit(GLF_DEFER_DELETE, &gl->gl_flags); /* If the inode was evicted, gl->gl_object will now be NULL. */ - spin_lock(&gl->gl_lockref.lock); - ip = gl->gl_object; - if (ip) { - clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags); - if (!igrab(&ip->i_inode)) - ip = NULL; - } - spin_unlock(&gl->gl_lockref.lock); + ip = gfs2_grab_existing_inode(gl); if (ip) { gfs2_glock_poke(ip->i_gl); iput(&ip->i_inode); } - evicted = !ip; } - return evicted; } bool gfs2_queue_try_to_evict(struct gfs2_glock *gl) @@ -998,18 +1021,18 @@ bool gfs2_queue_try_to_evict(struct gfs2_glock *gl) if (test_and_set_bit(GLF_TRY_TO_EVICT, &gl->gl_flags)) return false; - return queue_delayed_work(sdp->sd_delete_wq, - &gl->gl_delete, 0); + return !mod_delayed_work(sdp->sd_delete_wq, &gl->gl_delete, 0); } -static bool gfs2_queue_verify_evict(struct gfs2_glock *gl) +bool gfs2_queue_verify_delete(struct gfs2_glock *gl, bool later) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + unsigned long delay; - if (test_and_set_bit(GLF_VERIFY_EVICT, &gl->gl_flags)) + if (test_and_set_bit(GLF_VERIFY_DELETE, &gl->gl_flags)) return false; - return queue_delayed_work(sdp->sd_delete_wq, - &gl->gl_delete, 5 * HZ); + delay = later ? HZ + get_random_long() % (HZ * 9) : 0; + return queue_delayed_work(sdp->sd_delete_wq, &gl->gl_delete, delay); } static void delete_work_func(struct work_struct *work) @@ -1017,43 +1040,21 @@ static void delete_work_func(struct work_struct *work) struct delayed_work *dwork = to_delayed_work(work); struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete); struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; - struct inode *inode; - u64 no_addr = gl->gl_name.ln_number; + bool verify_delete = test_and_clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags); - if (test_and_clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags)) { - /* - * If we can evict the inode, give the remote node trying to - * delete the inode some time before verifying that the delete - * has happened. Otherwise, if we cause contention on the inode glock - * immediately, the remote node will think that we still have - * the inode in use, and so it will give up waiting. - * - * If we can't evict the inode, signal to the remote node that - * the inode is still in use. We'll later try to delete the - * inode locally in gfs2_evict_inode. - * - * FIXME: We only need to verify that the remote node has - * deleted the inode because nodes before this remote delete - * rework won't cooperate. At a later time, when we no longer - * care about compatibility with such nodes, we can skip this - * step entirely. - */ - if (gfs2_try_evict(gl)) { - if (test_bit(SDF_KILL, &sdp->sd_flags)) - goto out; - if (gfs2_queue_verify_evict(gl)) - return; - } - goto out; - } + if (test_and_clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags)) + gfs2_try_evict(gl); + + if (verify_delete) { + u64 no_addr = gl->gl_name.ln_number; + struct inode *inode; - if (test_and_clear_bit(GLF_VERIFY_EVICT, &gl->gl_flags)) { inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino, GFS2_BLKST_UNLINKED); if (IS_ERR(inode)) { if (PTR_ERR(inode) == -EAGAIN && !test_bit(SDF_KILL, &sdp->sd_flags) && - gfs2_queue_verify_evict(gl)) + gfs2_queue_verify_delete(gl, true)) return; } else { d_prune_aliases(inode); @@ -1061,7 +1062,6 @@ static void delete_work_func(struct work_struct *work) } } -out: gfs2_glock_put(gl); } @@ -1071,43 +1071,44 @@ static void glock_work_func(struct work_struct *work) struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); unsigned int drop_refs = 1; - if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { + spin_lock(&gl->gl_lockref.lock); + if (test_bit(GLF_HAVE_REPLY, &gl->gl_flags)) { + clear_bit(GLF_HAVE_REPLY, &gl->gl_flags); finish_xmote(gl, gl->gl_reply); drop_refs++; } - spin_lock(&gl->gl_lockref.lock); if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && gl->gl_state != LM_ST_UNLOCKED && gl->gl_demote_state != LM_ST_EXCLUSIVE) { - unsigned long holdtime, now = jiffies; + if (gl->gl_name.ln_type == LM_TYPE_INODE) { + unsigned long holdtime, now = jiffies; - holdtime = gl->gl_tchange + gl->gl_hold_time; - if (time_before(now, holdtime)) - delay = holdtime - now; + holdtime = gl->gl_tchange + gl->gl_hold_time; + if (time_before(now, holdtime)) + delay = holdtime - now; + } if (!delay) { clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); - gfs2_set_demote(gl); + gfs2_set_demote(GLF_DEMOTE, gl); } } run_queue(gl, 0); if (delay) { /* Keep one glock reference for the work we requeue. */ drop_refs--; - if (gl->gl_name.ln_type != LM_TYPE_INODE) - delay = 0; - __gfs2_glock_queue_work(gl, delay); + gfs2_glock_queue_work(gl, delay); } - /* - * Drop the remaining glock references manually here. (Mind that - * __gfs2_glock_queue_work depends on the lockref spinlock begin held - * here as well.) - */ + /* Drop the remaining glock references manually. */ + GLOCK_BUG_ON(gl, gl->gl_lockref.count < drop_refs); gl->gl_lockref.count -= drop_refs; if (!gl->gl_lockref.count) { - __gfs2_glock_put(gl); - return; + if (gl->gl_state == LM_ST_UNLOCKED) { + __gfs2_glock_put(gl); + return; + } + gfs2_glock_add_to_lru(gl); } spin_unlock(&gl->gl_lockref.lock); } @@ -1143,6 +1144,8 @@ again: out: rcu_read_unlock(); finish_wait(wq, &wait.wait); + if (gl) + gfs2_glock_remove_from_lru(gl); return gl; } @@ -1163,19 +1166,15 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, const struct gfs2_glock_operations *glops, int create, struct gfs2_glock **glp) { - struct super_block *s = sdp->sd_vfs; struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type, .ln_sbd = sdp }; struct gfs2_glock *gl, *tmp; struct address_space *mapping; - int ret = 0; gl = find_insert_glock(&name, NULL); - if (gl) { - *glp = gl; - return 0; - } + if (gl) + goto found; if (!create) return -ENOENT; @@ -1203,10 +1202,12 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, atomic_inc(&sdp->sd_glock_disposal); gl->gl_node.next = NULL; - gl->gl_flags = glops->go_instantiate ? BIT(GLF_INSTANTIATE_NEEDED) : 0; + gl->gl_flags = BIT(GLF_INITIAL); + if (glops->go_instantiate) + gl->gl_flags |= BIT(GLF_INSTANTIATE_NEEDED); gl->gl_name = name; + lockref_init(&gl->gl_lockref); lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass); - gl->gl_lockref.count = 1; gl->gl_state = LM_ST_UNLOCKED; gl->gl_target = LM_ST_UNLOCKED; gl->gl_demote_state = LM_ST_EXCLUSIVE; @@ -1227,7 +1228,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, mapping = gfs2_glock2aspace(gl); if (mapping) { mapping->a_ops = &gfs2_meta_aops; - mapping->host = s->s_bdev->bd_inode; + mapping->host = sdp->sd_inode; mapping->flags = 0; mapping_set_gfp_mask(mapping, GFP_NOFS); mapping->i_private_data = NULL; @@ -1235,23 +1236,19 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, } tmp = find_insert_glock(&name, gl); - if (!tmp) { - *glp = gl; - goto out; - } - if (IS_ERR(tmp)) { - ret = PTR_ERR(tmp); - goto out_free; - } - *glp = tmp; + if (tmp) { + gfs2_glock_dealloc(&gl->gl_rcu); + if (atomic_dec_and_test(&sdp->sd_glock_disposal)) + wake_up(&sdp->sd_kill_wait); -out_free: - gfs2_glock_dealloc(&gl->gl_rcu); - if (atomic_dec_and_test(&sdp->sd_glock_disposal)) - wake_up(&sdp->sd_kill_wait); + if (IS_ERR(tmp)) + return PTR_ERR(tmp); + gl = tmp; + } -out: - return ret; +found: + *glp = gl; + return 0; } /** @@ -1421,7 +1418,7 @@ out: } /** - * handle_callback - process a demote request + * request_demote - process a demote request * @gl: the glock * @state: the state the caller wants us to change to * @delay: zero to demote immediately; otherwise pending demote @@ -1431,13 +1428,10 @@ out: * practise: LM_ST_SHARED and LM_ST_UNLOCKED */ -static void handle_callback(struct gfs2_glock *gl, unsigned int state, - unsigned long delay, bool remote) +static void request_demote(struct gfs2_glock *gl, unsigned int state, + unsigned long delay, bool remote) { - if (delay) - set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); - else - gfs2_set_demote(gl); + gfs2_set_demote(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, gl); if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { gl->gl_demote_state = state; gl->gl_demote_time = jiffies; @@ -1473,9 +1467,7 @@ static inline bool pid_is_meaningful(const struct gfs2_holder *gh) { if (!(gh->gh_flags & GL_NOPID)) return true; - if (gh->gh_state == LM_ST_UNLOCKED) - return true; - return false; + return !test_bit(HIF_HOLDER, &gh->gh_iflags); } /** @@ -1494,7 +1486,6 @@ __acquires(&gl->gl_lockref.lock) { struct gfs2_glock *gl = gh->gh_gl; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; - struct list_head *insert_pt = NULL; struct gfs2_holder *gh2; int try_futile = 0; @@ -1530,21 +1521,11 @@ fail: gfs2_holder_wake(gh); return; } - if (test_bit(HIF_HOLDER, &gh2->gh_iflags)) - continue; } trace_gfs2_glock_queue(gh, 1); gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT); gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT); - if (likely(insert_pt == NULL)) { - list_add_tail(&gh->gh_list, &gl->gl_holders); - return; - } - list_add_tail(&gh->gh_list, insert_pt); - spin_unlock(&gl->gl_lockref.lock); - if (sdp->sd_lockstruct.ls_ops->lm_cancel) - sdp->sd_lockstruct.ls_ops->lm_cancel(gl); - spin_lock(&gl->gl_lockref.lock); + list_add_tail(&gh->gh_list, &gl->gl_holders); return; trap_recursive: @@ -1596,17 +1577,14 @@ unlock: return error; } - if (test_bit(GLF_LRU, &gl->gl_flags)) - gfs2_glock_remove_from_lru(gl); - gh->gh_error = 0; spin_lock(&gl->gl_lockref.lock); add_to_queue(gh); if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) && - test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { - set_bit(GLF_REPLY_PENDING, &gl->gl_flags); + test_and_clear_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags))) { + set_bit(GLF_HAVE_REPLY, &gl->gl_flags); gl->gl_lockref.count++; - __gfs2_glock_queue_work(gl, 0); + gfs2_glock_queue_work(gl, 0); } run_queue(gl, 1); spin_unlock(&gl->gl_lockref.lock); @@ -1630,12 +1608,6 @@ int gfs2_glock_poll(struct gfs2_holder *gh) return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1; } -static inline bool needs_demote(struct gfs2_glock *gl) -{ - return (test_bit(GLF_DEMOTE, &gl->gl_flags) || - test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags)); -} - static void __gfs2_glock_dq(struct gfs2_holder *gh) { struct gfs2_glock *gl = gh->gh_gl; @@ -1644,11 +1616,11 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh) /* * This holder should not be cached, so mark it for demote. - * Note: this should be done before the check for needs_demote - * below. + * Note: this should be done before the glock_needs_demote + * check below. */ if (gh->gh_flags & GL_NOCACHE) - handle_callback(gl, LM_ST_UNLOCKED, 0, false); + request_demote(gl, LM_ST_UNLOCKED, 0, false); list_del_init(&gh->gh_list); clear_bit(HIF_HOLDER, &gh->gh_iflags); @@ -1658,21 +1630,18 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh) * If there hasn't been a demote request we are done. * (Let the remaining holders, if any, keep holding it.) */ - if (!needs_demote(gl)) { + if (!glock_needs_demote(gl)) { if (list_empty(&gl->gl_holders)) fast_path = 1; } - if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl)) - gfs2_glock_add_to_lru(gl); - if (unlikely(!fast_path)) { gl->gl_lockref.count++; if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && !test_bit(GLF_DEMOTE, &gl->gl_flags) && gl->gl_name.ln_type == LM_TYPE_INODE) delay = gl->gl_hold_time; - __gfs2_glock_queue_work(gl, delay); + gfs2_glock_queue_work(gl, delay); } } @@ -1696,11 +1665,19 @@ void gfs2_glock_dq(struct gfs2_holder *gh) } if (list_is_first(&gh->gh_list, &gl->gl_holders) && - !test_bit(HIF_HOLDER, &gh->gh_iflags)) { + !test_bit(HIF_HOLDER, &gh->gh_iflags) && + test_bit(GLF_LOCK, &gl->gl_flags) && + !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && + !test_bit(GLF_CANCELING, &gl->gl_flags)) { + set_bit(GLF_CANCELING, &gl->gl_flags); spin_unlock(&gl->gl_lockref.lock); gl->gl_name.ln_sbd->sd_lockstruct.ls_ops->lm_cancel(gl); wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); spin_lock(&gl->gl_lockref.lock); + clear_bit(GLF_CANCELING, &gl->gl_flags); + clear_bit(GLF_LOCK, &gl->gl_flags); + if (!gfs2_holder_queued(gh)) + goto out; } /* @@ -1882,21 +1859,23 @@ void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs) void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) { unsigned long delay = 0; - unsigned long holdtime; - unsigned long now = jiffies; gfs2_glock_hold(gl); spin_lock(&gl->gl_lockref.lock); - holdtime = gl->gl_tchange + gl->gl_hold_time; if (!list_empty(&gl->gl_holders) && gl->gl_name.ln_type == LM_TYPE_INODE) { + unsigned long now = jiffies; + unsigned long holdtime; + + holdtime = gl->gl_tchange + gl->gl_hold_time; + if (time_before(now, holdtime)) delay = holdtime - now; - if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) + if (test_bit(GLF_HAVE_REPLY, &gl->gl_flags)) delay = gl->gl_hold_time; } - handle_callback(gl, state, delay, true); - __gfs2_glock_queue_work(gl, delay); + request_demote(gl, state, delay, true); + gfs2_glock_queue_work(gl, delay); spin_unlock(&gl->gl_lockref.lock); } @@ -1944,19 +1923,20 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret) struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; spin_lock(&gl->gl_lockref.lock); + clear_bit(GLF_PENDING_REPLY, &gl->gl_flags); gl->gl_reply = ret; if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) { if (gfs2_should_freeze(gl)) { - set_bit(GLF_FROZEN, &gl->gl_flags); + set_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags); spin_unlock(&gl->gl_lockref.lock); return; } } gl->gl_lockref.count++; - set_bit(GLF_REPLY_PENDING, &gl->gl_flags); - __gfs2_glock_queue_work(gl, 0); + set_bit(GLF_HAVE_REPLY, &gl->gl_flags); + gfs2_glock_queue_work(gl, 0); spin_unlock(&gl->gl_lockref.lock); } @@ -1976,6 +1956,16 @@ static int glock_cmp(void *priv, const struct list_head *a, return 0; } +static bool can_free_glock(struct gfs2_glock *gl) +{ + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + + return !test_bit(GLF_LOCK, &gl->gl_flags) && + !gl->gl_lockref.count && + (!test_bit(GLF_LFLUSH, &gl->gl_flags) || + test_bit(SDF_KILL, &sdp->sd_flags)); +} + /** * gfs2_dispose_glock_lru - Demote a list of glocks * @list: The list to dispose of @@ -1990,37 +1980,38 @@ static int glock_cmp(void *priv, const struct list_head *a, * private) */ -static void gfs2_dispose_glock_lru(struct list_head *list) +static unsigned long gfs2_dispose_glock_lru(struct list_head *list) __releases(&lru_lock) __acquires(&lru_lock) { struct gfs2_glock *gl; + unsigned long freed = 0; list_sort(NULL, list, glock_cmp); while(!list_empty(list)) { gl = list_first_entry(list, struct gfs2_glock, gl_lru); - list_del_init(&gl->gl_lru); - clear_bit(GLF_LRU, &gl->gl_flags); if (!spin_trylock(&gl->gl_lockref.lock)) { add_back_to_lru: - list_add(&gl->gl_lru, &lru_list); - set_bit(GLF_LRU, &gl->gl_flags); - atomic_inc(&lru_count); + list_move(&gl->gl_lru, &lru_list); continue; } - if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { + if (!can_free_glock(gl)) { spin_unlock(&gl->gl_lockref.lock); goto add_back_to_lru; } + list_del_init(&gl->gl_lru); + atomic_dec(&lru_count); + clear_bit(GLF_LRU, &gl->gl_flags); + freed++; gl->gl_lockref.count++; - if (demote_ok(gl)) - handle_callback(gl, LM_ST_UNLOCKED, 0, false); - WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); - __gfs2_glock_queue_work(gl, 0); + if (gl->gl_state != LM_ST_UNLOCKED) + request_demote(gl, LM_ST_UNLOCKED, 0, false); + gfs2_glock_queue_work(gl, 0); spin_unlock(&gl->gl_lockref.lock); cond_resched_lock(&lru_lock); } + return freed; } /** @@ -2032,32 +2023,21 @@ add_back_to_lru: * gfs2_dispose_glock_lru() above. */ -static long gfs2_scan_glock_lru(int nr) +static unsigned long gfs2_scan_glock_lru(unsigned long nr) { struct gfs2_glock *gl, *next; LIST_HEAD(dispose); - long freed = 0; + unsigned long freed = 0; spin_lock(&lru_lock); list_for_each_entry_safe(gl, next, &lru_list, gl_lru) { - if (nr-- <= 0) + if (!nr--) break; - /* Test for being demotable */ - if (!test_bit(GLF_LOCK, &gl->gl_flags)) { - if (!spin_trylock(&gl->gl_lockref.lock)) - continue; - if (gl->gl_lockref.count <= 1 && - (gl->gl_state == LM_ST_UNLOCKED || - demote_ok(gl))) { - list_move(&gl->gl_lru, &dispose); - atomic_dec(&lru_count); - freed++; - } - spin_unlock(&gl->gl_lockref.lock); - } + if (can_free_glock(gl)) + list_move(&gl->gl_lru, &dispose); } if (!list_empty(&dispose)) - gfs2_dispose_glock_lru(&dispose); + freed = gfs2_dispose_glock_lru(&dispose); spin_unlock(&lru_lock); return freed; @@ -2113,7 +2093,7 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) void gfs2_cancel_delete_work(struct gfs2_glock *gl) { clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags); - clear_bit(GLF_VERIFY_EVICT, &gl->gl_flags); + clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags); if (cancel_delayed_work(&gl->gl_delete)) gfs2_glock_put(gl); } @@ -2144,12 +2124,16 @@ void gfs2_flush_delete_work(struct gfs2_sbd *sdp) static void thaw_glock(struct gfs2_glock *gl) { - if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) + if (!test_and_clear_bit(GLF_HAVE_FROZEN_REPLY, &gl->gl_flags)) return; if (!lockref_get_not_dead(&gl->gl_lockref)) return; - set_bit(GLF_REPLY_PENDING, &gl->gl_flags); + + gfs2_glock_remove_from_lru(gl); + spin_lock(&gl->gl_lockref.lock); + set_bit(GLF_HAVE_REPLY, &gl->gl_flags); gfs2_glock_queue_work(gl, 0); + spin_unlock(&gl->gl_lockref.lock); } /** @@ -2166,8 +2150,8 @@ static void clear_glock(struct gfs2_glock *gl) if (!__lockref_is_dead(&gl->gl_lockref)) { gl->gl_lockref.count++; if (gl->gl_state != LM_ST_UNLOCKED) - handle_callback(gl, LM_ST_UNLOCKED, 0, false); - __gfs2_glock_queue_work(gl, 0); + request_demote(gl, LM_ST_UNLOCKED, 0, false); + gfs2_glock_queue_work(gl, 0); } spin_unlock(&gl->gl_lockref.lock); } @@ -2218,14 +2202,31 @@ void gfs2_gl_dq_holders(struct gfs2_sbd *sdp) void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) { + unsigned long start = jiffies; + bool timed_out = false; + set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags); - flush_workqueue(glock_workqueue); + flush_workqueue(sdp->sd_glock_wq); glock_hash_walk(clear_glock, sdp); - flush_workqueue(glock_workqueue); - wait_event_timeout(sdp->sd_kill_wait, - atomic_read(&sdp->sd_glock_disposal) == 0, - HZ * 600); + flush_workqueue(sdp->sd_glock_wq); + + while (!timed_out) { + wait_event_timeout(sdp->sd_kill_wait, + !atomic_read(&sdp->sd_glock_disposal), + HZ * 60); + if (!atomic_read(&sdp->sd_glock_disposal)) + break; + timed_out = time_after(jiffies, start + (HZ * 600)); + fs_warn(sdp, "%u glocks left after %u seconds%s\n", + atomic_read(&sdp->sd_glock_disposal), + jiffies_to_msecs(jiffies - start) / 1000, + timed_out ? ":" : "; still waiting"); + } + gfs2_lm_unmount(sdp); + gfs2_free_dead_glocks(sdp); glock_hash_walk(dump_glock_func, sdp); + destroy_workqueue(sdp->sd_glock_wq); + sdp->sd_glock_wq = NULL; } static const char *state2str(unsigned state) @@ -2323,11 +2324,13 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl) *p++ = 'f'; if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags)) *p++ = 'i'; - if (test_bit(GLF_REPLY_PENDING, gflags)) + if (test_bit(GLF_PENDING_REPLY, gflags)) + *p++ = 'R'; + if (test_bit(GLF_HAVE_REPLY, gflags)) *p++ = 'r'; if (test_bit(GLF_INITIAL, gflags)) - *p++ = 'I'; - if (test_bit(GLF_FROZEN, gflags)) + *p++ = 'a'; + if (test_bit(GLF_HAVE_FROZEN_REPLY, gflags)) *p++ = 'F'; if (!list_empty(&gl->gl_holders)) *p++ = 'q'; @@ -2337,7 +2340,7 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl) *p++ = 'o'; if (test_bit(GLF_BLOCKING, gflags)) *p++ = 'b'; - if (test_bit(GLF_FREEING, gflags)) + if (test_bit(GLF_UNLOCKED, gflags)) *p++ = 'x'; if (test_bit(GLF_INSTANTIATE_NEEDED, gflags)) *p++ = 'n'; @@ -2345,8 +2348,12 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl) *p++ = 'N'; if (test_bit(GLF_TRY_TO_EVICT, gflags)) *p++ = 'e'; - if (test_bit(GLF_VERIFY_EVICT, gflags)) + if (test_bit(GLF_VERIFY_DELETE, gflags)) *p++ = 'E'; + if (test_bit(GLF_DEFER_DELETE, gflags)) + *p++ = 's'; + if (test_bit(GLF_CANCELING, gflags)) + *p++ = 'C'; *p = 0; return buf; } @@ -2490,16 +2497,8 @@ int __init gfs2_glock_init(void) if (ret < 0) return ret; - glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | - WQ_HIGHPRI | WQ_FREEZABLE, 0); - if (!glock_workqueue) { - rhashtable_destroy(&gl_hash_table); - return -ENOMEM; - } - glock_shrinker = shrinker_alloc(0, "gfs2-glock"); if (!glock_shrinker) { - destroy_workqueue(glock_workqueue); rhashtable_destroy(&gl_hash_table); return -ENOMEM; } @@ -2519,7 +2518,6 @@ void gfs2_glock_exit(void) { shrinker_free(glock_shrinker); rhashtable_destroy(&gl_hash_table); - destroy_workqueue(glock_workqueue); } static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n) @@ -2529,8 +2527,7 @@ static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n) if (gl) { if (n == 0) return; - if (!lockref_put_not_zero(&gl->gl_lockref)) - gfs2_glock_queue_put(gl); + gfs2_glock_put_async(gl); } for (;;) { gl = rhashtable_walk_next(&gi->hti); @@ -2752,25 +2749,18 @@ static struct file *gfs2_glockfd_next_file(struct gfs2_glockfd_iter *i) i->file = NULL; } - rcu_read_lock(); for(;; i->fd++) { - struct inode *inode; - - i->file = task_lookup_next_fdget_rcu(i->task, &i->fd); + i->file = fget_task_next(i->task, &i->fd); if (!i->file) { i->fd = 0; break; } - inode = file_inode(i->file); - if (inode->i_sb == i->sb) + if (file_inode(i->file)->i_sb == i->sb) break; - rcu_read_unlock(); fput(i->file); - rcu_read_lock(); } - rcu_read_unlock(); return i->file; } diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h index 0114f3e0ebe0..c171f745650f 100644 --- a/fs/gfs2/glock.h +++ b/fs/gfs2/glock.h @@ -172,7 +172,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, int create, struct gfs2_glock **glp); struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl); void gfs2_glock_put(struct gfs2_glock *gl); -void gfs2_glock_queue_put(struct gfs2_glock *gl); +void gfs2_glock_put_async(struct gfs2_glock *gl); void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags, struct gfs2_holder *gh, @@ -245,13 +245,14 @@ static inline int gfs2_glock_nq_init(struct gfs2_glock *gl, void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state); void gfs2_glock_complete(struct gfs2_glock *gl, int ret); bool gfs2_queue_try_to_evict(struct gfs2_glock *gl); +bool gfs2_queue_verify_delete(struct gfs2_glock *gl, bool later); void gfs2_cancel_delete_work(struct gfs2_glock *gl); void gfs2_flush_delete_work(struct gfs2_sbd *sdp); void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); void gfs2_gl_dq_holders(struct gfs2_sbd *sdp); void gfs2_glock_thaw(struct gfs2_sbd *sdp); -void gfs2_glock_add_to_lru(struct gfs2_glock *gl); void gfs2_glock_free(struct gfs2_glock *gl); +void gfs2_glock_free_later(struct gfs2_glock *gl); int __init gfs2_glock_init(void); void gfs2_glock_exit(void); @@ -284,4 +285,10 @@ static inline bool gfs2_holder_queued(struct gfs2_holder *gh) void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation); bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation); +static inline bool glock_needs_demote(struct gfs2_glock *gl) +{ + return (test_bit(GLF_DEMOTE, &gl->gl_flags) || + test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags)); +} + #endif /* __GLOCK_DOT_H__ */ diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 45653cbc8a87..cebd66b22694 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -82,6 +82,9 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); spin_unlock(&sdp->sd_ail_lock); gfs2_log_unlock(sdp); + + if (gfs2_withdrawing(sdp)) + gfs2_withdraw(sdp); } @@ -165,7 +168,7 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) static int gfs2_rgrp_metasync(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; - struct address_space *metamapping = &sdp->sd_aspace; + struct address_space *metamapping = gfs2_aspace(sdp); struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); const unsigned bsize = sdp->sd_sb.sb_bsize; loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK; @@ -222,7 +225,7 @@ static int rgrp_go_sync(struct gfs2_glock *gl) static void rgrp_go_inval(struct gfs2_glock *gl, int flags) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; - struct address_space *mapping = &sdp->sd_aspace; + struct address_space *mapping = gfs2_aspace(sdp); struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); const unsigned bsize = sdp->sd_sb.sb_bsize; loff_t start, end; @@ -382,23 +385,6 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags) gfs2_clear_glop_pending(ip); } -/** - * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock - * @gl: the glock - * - * Returns: 1 if it's ok - */ - -static int inode_go_demote_ok(const struct gfs2_glock *gl) -{ - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; - - if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) - return 0; - - return 1; -} - static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); @@ -409,10 +395,14 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) struct inode *inode = &ip->i_inode; bool is_new = inode->i_state & I_NEW; - if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) - goto corrupt; - if (unlikely(!is_new && inode_wrong_type(inode, mode))) - goto corrupt; + if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) { + gfs2_consist_inode(ip); + return -EIO; + } + if (unlikely(!is_new && inode_wrong_type(inode, mode))) { + gfs2_consist_inode(ip); + return -EIO; + } ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino); inode->i_mode = mode; if (is_new) { @@ -449,26 +439,28 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */ gfs2_set_inode_flags(inode); height = be16_to_cpu(str->di_height); - if (unlikely(height > sdp->sd_max_height)) - goto corrupt; + if (unlikely(height > sdp->sd_max_height)) { + gfs2_consist_inode(ip); + return -EIO; + } ip->i_height = (u8)height; depth = be16_to_cpu(str->di_depth); - if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) - goto corrupt; + if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) { + gfs2_consist_inode(ip); + return -EIO; + } ip->i_depth = (u8)depth; ip->i_entries = be32_to_cpu(str->di_entries); - if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip)) - goto corrupt; - + if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip)) { + gfs2_consist_inode(ip); + return -EIO; + } if (S_ISREG(inode->i_mode)) gfs2_set_aops(inode); return 0; -corrupt: - gfs2_consist_inode(ip); - return -EIO; } /** @@ -478,7 +470,7 @@ corrupt: * Returns: errno */ -int gfs2_inode_refresh(struct gfs2_inode *ip) +static int gfs2_inode_refresh(struct gfs2_inode *ip) { struct buffer_head *dibh; int error; @@ -502,11 +494,18 @@ int gfs2_inode_refresh(struct gfs2_inode *ip) static int inode_go_instantiate(struct gfs2_glock *gl) { struct gfs2_inode *ip = gl->gl_object; + struct gfs2_glock *io_gl; + int error; if (!ip) /* no inode to populate - read it in later */ return 0; - return gfs2_inode_refresh(ip); + error = gfs2_inode_refresh(ip); + if (error) + return error; + io_gl = ip->i_iopen_gh.gh_gl; + io_gl->gl_no_formal_ino = ip->i_no_formal_ino; + return 0; } static int inode_go_held(struct gfs2_holder *gh) @@ -602,14 +601,13 @@ static int freeze_go_xmote_bh(struct gfs2_glock *gl) if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); - error = gfs2_find_jhead(sdp->sd_jdesc, &head, false); + error = gfs2_find_jhead(sdp->sd_jdesc, &head); if (gfs2_assert_withdraw_delayed(sdp, !error)) return error; if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) return -EIO; - sdp->sd_log_sequence = head.lh_sequence + 1; - gfs2_log_pointers_init(sdp, head.lh_blkno); + gfs2_log_pointers_init(sdp, &head); } return 0; } @@ -639,21 +637,21 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote) } /** - * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it - * @gl: glock being freed + * inode_go_unlocked - wake up anyone waiting for dlm's unlock ast + * @gl: glock being unlocked * * For now, this is only used for the journal inode glock. In withdraw - * situations, we need to wait for the glock to be freed so that we know + * situations, we need to wait for the glock to be unlocked so that we know * other nodes may proceed with recovery / journal replay. */ -static void inode_go_free(struct gfs2_glock *gl) +static void inode_go_unlocked(struct gfs2_glock *gl) { /* Note that we cannot reference gl_object because it's already set * to NULL by this point in its lifecycle. */ - if (!test_bit(GLF_FREEING, &gl->gl_flags)) + if (!test_bit(GLF_UNLOCKED, &gl->gl_flags)) return; - clear_bit_unlock(GLF_FREEING, &gl->gl_flags); - wake_up_bit(&gl->gl_flags, GLF_FREEING); + clear_bit_unlock(GLF_UNLOCKED, &gl->gl_flags); + wake_up_bit(&gl->gl_flags, GLF_UNLOCKED); } /** @@ -713,13 +711,12 @@ const struct gfs2_glock_operations gfs2_meta_glops = { const struct gfs2_glock_operations gfs2_inode_glops = { .go_sync = inode_go_sync, .go_inval = inode_go_inval, - .go_demote_ok = inode_go_demote_ok, .go_instantiate = inode_go_instantiate, .go_held = inode_go_held, .go_dump = inode_go_dump, .go_type = LM_TYPE_INODE, - .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB, - .go_free = inode_go_free, + .go_flags = GLOF_ASPACE | GLOF_LVB, + .go_unlocked = inode_go_unlocked, }; const struct gfs2_glock_operations gfs2_rgrp_glops = { @@ -742,13 +739,13 @@ const struct gfs2_glock_operations gfs2_iopen_glops = { .go_type = LM_TYPE_IOPEN, .go_callback = iopen_go_callback, .go_dump = inode_go_dump, - .go_flags = GLOF_LRU | GLOF_NONDISK, + .go_flags = GLOF_NONDISK, .go_subclass = 1, }; const struct gfs2_glock_operations gfs2_flock_glops = { .go_type = LM_TYPE_FLOCK, - .go_flags = GLOF_LRU | GLOF_NONDISK, + .go_flags = GLOF_NONDISK, }; const struct gfs2_glock_operations gfs2_nondisk_glops = { @@ -759,7 +756,7 @@ const struct gfs2_glock_operations gfs2_nondisk_glops = { const struct gfs2_glock_operations gfs2_quota_glops = { .go_type = LM_TYPE_QUOTA, - .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK, + .go_flags = GLOF_LVB | GLOF_NONDISK, }; const struct gfs2_glock_operations gfs2_journal_glops = { diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 95a334d64da2..0a41c4e76b32 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h @@ -218,19 +218,17 @@ struct gfs2_glock_operations { int (*go_sync) (struct gfs2_glock *gl); int (*go_xmote_bh)(struct gfs2_glock *gl); void (*go_inval) (struct gfs2_glock *gl, int flags); - int (*go_demote_ok) (const struct gfs2_glock *gl); int (*go_instantiate) (struct gfs2_glock *gl); int (*go_held)(struct gfs2_holder *gh); void (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl, const char *fs_id_buf); void (*go_callback)(struct gfs2_glock *gl, bool remote); - void (*go_free)(struct gfs2_glock *gl); + void (*go_unlocked)(struct gfs2_glock *gl); const int go_subclass; const int go_type; const unsigned long go_flags; #define GLOF_ASPACE 1 /* address space attached */ #define GLOF_LVB 2 /* Lock Value Block attached */ -#define GLOF_LRU 4 /* LRU managed */ #define GLOF_NONDISK 8 /* not I/O related */ }; @@ -322,16 +320,19 @@ enum { GLF_DIRTY = 6, GLF_LFLUSH = 7, GLF_INVALIDATE_IN_PROGRESS = 8, - GLF_REPLY_PENDING = 9, + GLF_HAVE_REPLY = 9, GLF_INITIAL = 10, - GLF_FROZEN = 11, + GLF_HAVE_FROZEN_REPLY = 11, GLF_INSTANTIATE_IN_PROG = 12, /* instantiate happening now */ GLF_LRU = 13, GLF_OBJECT = 14, /* Used only for tracing */ GLF_BLOCKING = 15, - GLF_FREEING = 16, /* Wait for glock to be freed */ + GLF_UNLOCKED = 16, /* Wait for glock to be unlocked */ GLF_TRY_TO_EVICT = 17, /* iopen glocks only */ - GLF_VERIFY_EVICT = 18, /* iopen glocks only */ + GLF_VERIFY_DELETE = 18, /* iopen glocks only */ + GLF_PENDING_REPLY = 19, + GLF_DEFER_DELETE = 20, /* iopen glocks only */ + GLF_CANCELING = 21, }; struct gfs2_glock { @@ -378,7 +379,6 @@ enum { GIF_SW_PAGED = 3, GIF_FREE_VFS_INODE = 5, GIF_GLOP_PENDING = 6, - GIF_DEFERRED_DELETE = 7, }; struct gfs2_inode { @@ -772,6 +772,7 @@ struct gfs2_sbd { /* Workqueue stuff */ + struct workqueue_struct *sd_glock_wq; struct workqueue_struct *sd_delete_wq; /* Daemon stuff */ @@ -783,7 +784,6 @@ struct gfs2_sbd { struct list_head sd_quota_list; atomic_t sd_quota_count; - struct mutex sd_quota_mutex; struct mutex sd_quota_sync_mutex; wait_queue_head_t sd_quota_wait; @@ -795,7 +795,7 @@ struct gfs2_sbd { /* Log stuff */ - struct address_space sd_aspace; + struct inode *sd_inode; spinlock_t sd_log_lock; @@ -838,6 +838,7 @@ struct gfs2_sbd { /* For quiescing the filesystem */ struct gfs2_holder sd_freeze_gh; struct mutex sd_freeze_mutex; + struct list_head sd_dead_glocks; char sd_fsname[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2]; char sd_table_name[GFS2_FSNAME_LEN]; @@ -850,6 +851,13 @@ struct gfs2_sbd { unsigned long sd_glock_dqs_held; }; +#define GFS2_BAD_INO 1 + +static inline struct address_space *gfs2_aspace(struct gfs2_sbd *sdp) +{ + return sdp->sd_inode->i_mapping; +} + static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which) { gl->gl_stats.stats[which]++; diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 1b95db2c3aac..187d789a8f1e 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -439,6 +439,74 @@ out: return error; } +static void gfs2_final_release_pages(struct gfs2_inode *ip) +{ + struct inode *inode = &ip->i_inode; + struct gfs2_glock *gl = ip->i_gl; + + if (unlikely(!gl)) { + /* This can only happen during incomplete inode creation. */ + BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags)); + return; + } + + truncate_inode_pages(gfs2_glock2aspace(gl), 0); + truncate_inode_pages(&inode->i_data, 0); + + if (atomic_read(&gl->gl_revokes) == 0) { + clear_bit(GLF_LFLUSH, &gl->gl_flags); + clear_bit(GLF_DIRTY, &gl->gl_flags); + } +} + +int gfs2_dinode_dealloc(struct gfs2_inode *ip) +{ + struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); + struct gfs2_rgrpd *rgd; + struct gfs2_holder gh; + int error; + + if (gfs2_get_inode_blocks(&ip->i_inode) != 1) { + gfs2_consist_inode(ip); + return -EIO; + } + + gfs2_rindex_update(sdp); + + error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); + if (error) + return error; + + rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1); + if (!rgd) { + gfs2_consist_inode(ip); + error = -EIO; + goto out_qs; + } + + error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, + LM_FLAG_NODE_SCOPE, &gh); + if (error) + goto out_qs; + + error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, + sdp->sd_jdesc->jd_blocks); + if (error) + goto out_rg_gunlock; + + gfs2_free_di(rgd, ip); + + gfs2_final_release_pages(ip); + + gfs2_trans_end(sdp); + +out_rg_gunlock: + gfs2_glock_dq_uninit(&gh); +out_qs: + gfs2_quota_unhold(ip); + return error; +} + static void gfs2_init_dir(struct buffer_head *dibh, const struct gfs2_inode *parent) { @@ -629,10 +697,11 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, struct gfs2_inode *dip = GFS2_I(dir), *ip; struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); struct gfs2_glock *io_gl; - int error; + int error, dealloc_error; u32 aflags = 0; unsigned blocks = 1; struct gfs2_diradd da = { .bh = NULL, .save_loc = 1, }; + bool xattr_initialized = false; if (!name->len || name->len > GFS2_FNAMESIZE) return -ENAMETOOLONG; @@ -659,7 +728,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, if (!IS_ERR(inode)) { if (S_ISDIR(inode->i_mode)) { iput(inode); - inode = ERR_PTR(-EISDIR); + inode = NULL; + error = -EISDIR; goto fail_gunlock; } d_instantiate(dentry, inode); @@ -744,12 +814,13 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); if (error) - goto fail_free_inode; + goto fail_dealloc_inode; error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl); if (error) - goto fail_free_inode; + goto fail_dealloc_inode; gfs2_cancel_delete_work(io_gl); + io_gl->gl_no_formal_ino = ip->i_no_formal_ino; retry: error = insert_inode_locked4(inode, ip->i_no_addr, iget_test, &ip->i_no_addr); @@ -766,13 +837,16 @@ retry: error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh); if (error) goto fail_gunlock3; + clear_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags); error = gfs2_trans_begin(sdp, blocks, 0); if (error) goto fail_gunlock3; - if (blocks > 1) + if (blocks > 1) { gfs2_init_xattr(ip); + xattr_initialized = true; + } init_dinode(dip, ip, symname); gfs2_trans_end(sdp); @@ -827,6 +901,18 @@ fail_gunlock3: gfs2_glock_dq_uninit(&ip->i_iopen_gh); fail_gunlock2: gfs2_glock_put(io_gl); +fail_dealloc_inode: + set_bit(GIF_ALLOC_FAILED, &ip->i_flags); + dealloc_error = 0; + if (ip->i_eattr) + dealloc_error = gfs2_ea_dealloc(ip, xattr_initialized); + clear_nlink(inode); + mark_inode_dirty(inode); + if (!dealloc_error) + dealloc_error = gfs2_dinode_dealloc(ip); + if (dealloc_error) + fs_warn(sdp, "%s: %d\n", __func__, dealloc_error); + ip->i_no_addr = 0; fail_free_inode: if (ip->i_gl) { gfs2_glock_put(ip->i_gl); @@ -841,10 +927,6 @@ fail_gunlock: gfs2_dir_no_add(&da); gfs2_glock_dq_uninit(&d_gh); if (!IS_ERR_OR_NULL(inode)) { - set_bit(GIF_ALLOC_FAILED, &ip->i_flags); - clear_nlink(inode); - if (ip->i_no_addr) - mark_inode_dirty(inode); if (inode->i_state & I_NEW) iget_failed(inode); else @@ -1247,14 +1329,15 @@ static int gfs2_symlink(struct mnt_idmap *idmap, struct inode *dir, * @dentry: The dentry of the new directory * @mode: The mode of the new directory * - * Returns: errno + * Returns: the dentry, or ERR_PTR(errno) */ -static int gfs2_mkdir(struct mnt_idmap *idmap, struct inode *dir, - struct dentry *dentry, umode_t mode) +static struct dentry *gfs2_mkdir(struct mnt_idmap *idmap, struct inode *dir, + struct dentry *dentry, umode_t mode) { unsigned dsize = gfs2_max_stuffed_size(GFS2_I(dir)); - return gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0); + + return ERR_PTR(gfs2_create_inode(dir, dentry, NULL, S_IFDIR | mode, 0, NULL, dsize, 0)); } /** diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h index fd15d1c6b6fb..eafe123617e6 100644 --- a/fs/gfs2/inode.h +++ b/fs/gfs2/inode.h @@ -92,8 +92,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr, u64 no_formal_ino, unsigned int blktype); - -int gfs2_inode_refresh(struct gfs2_inode *ip); +int gfs2_dinode_dealloc(struct gfs2_inode *ip); struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name, int is_root); diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index d1ac5d0679ea..7cb9d216d8bb 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -121,6 +121,11 @@ static void gdlm_ast(void *arg) struct gfs2_glock *gl = arg; unsigned ret = gl->gl_state; + /* If the glock is dead, we only react to a dlm_unlock() reply. */ + if (__lockref_is_dead(&gl->gl_lockref) && + gl->gl_lksb.sb_status != -DLM_EUNLOCK) + return; + gfs2_update_reply_times(gl); BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED); @@ -129,8 +134,8 @@ static void gdlm_ast(void *arg) switch (gl->gl_lksb.sb_status) { case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */ - if (gl->gl_ops->go_free) - gl->gl_ops->go_free(gl); + if (gl->gl_ops->go_unlocked) + gl->gl_ops->go_unlocked(gl); gfs2_glock_free(gl); return; case -DLM_ECANCEL: /* Cancel while getting lock */ @@ -158,11 +163,21 @@ static void gdlm_ast(void *arg) BUG(); } - set_bit(GLF_INITIAL, &gl->gl_flags); + /* + * The GLF_INITIAL flag is initially set for new glocks. Upon the + * first successful new (non-conversion) request, we clear this flag to + * indicate that a DLM lock exists and that gl->gl_lksb.sb_lkid is the + * identifier to use for identifying it. + * + * Any failed initial requests do not create a DLM lock, so we ignore + * the gl->gl_lksb.sb_lkid values that come with such requests. + */ + + clear_bit(GLF_INITIAL, &gl->gl_flags); gfs2_glock_complete(gl, ret); return; out: - if (!test_bit(GLF_INITIAL, &gl->gl_flags)) + if (test_bit(GLF_INITIAL, &gl->gl_flags)) gl->gl_lksb.sb_lkid = 0; gfs2_glock_complete(gl, ret); } @@ -171,6 +186,9 @@ static void gdlm_bast(void *arg, int mode) { struct gfs2_glock *gl = arg; + if (__lockref_is_dead(&gl->gl_lockref)) + return; + switch (mode) { case DLM_LOCK_EX: gfs2_glock_cb(gl, LM_ST_UNLOCKED); @@ -206,8 +224,21 @@ static int make_mode(struct gfs2_sbd *sdp, const unsigned int lmstate) return -1; } +/* Taken from fs/dlm/lock.c. */ + +static bool middle_conversion(int cur, int req) +{ + return (cur == DLM_LOCK_PR && req == DLM_LOCK_CW) || + (cur == DLM_LOCK_CW && req == DLM_LOCK_PR); +} + +static bool down_conversion(int cur, int req) +{ + return !middle_conversion(cur, req) && req < cur; +} + static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags, - const int req) + const int cur, const int req) { u32 lkf = 0; @@ -231,9 +262,16 @@ static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags, BUG(); } - if (gl->gl_lksb.sb_lkid != 0) { + if (!test_bit(GLF_INITIAL, &gl->gl_flags)) { lkf |= DLM_LKF_CONVERT; - if (test_bit(GLF_BLOCKING, &gl->gl_flags)) + + /* + * The DLM_LKF_QUECVT flag needs to be set for "first come, + * first served" semantics, but it must only be set for + * "upward" lock conversions or else DLM will reject the + * request as invalid. + */ + if (!down_conversion(cur, req)) lkf |= DLM_LKF_QUECVT; } @@ -253,23 +291,24 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state, unsigned int flags) { struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; - int req; + int cur, req; u32 lkf; char strname[GDLM_STRNAME_BYTES] = ""; int error; + cur = make_mode(gl->gl_name.ln_sbd, gl->gl_state); req = make_mode(gl->gl_name.ln_sbd, req_state); - lkf = make_flags(gl, flags, req); + lkf = make_flags(gl, flags, cur, req); gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); - if (gl->gl_lksb.sb_lkid) { - gfs2_update_request_times(gl); - } else { + if (test_bit(GLF_INITIAL, &gl->gl_flags)) { memset(strname, ' ', GDLM_STRNAME_BYTES - 1); strname[GDLM_STRNAME_BYTES - 1] = '\0'; gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type); gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number); gl->gl_dstamp = ktime_get_real(); + } else { + gfs2_update_request_times(gl); } /* * Submit the actual lock request. @@ -289,10 +328,15 @@ static void gdlm_put_lock(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct lm_lockstruct *ls = &sdp->sd_lockstruct; + uint32_t flags = 0; int error; - if (gl->gl_lksb.sb_lkid == 0) - goto out_free; + BUG_ON(!__lockref_is_dead(&gl->gl_lockref)); + + if (test_bit(GLF_INITIAL, &gl->gl_flags)) { + gfs2_glock_free(gl); + return; + } clear_bit(GLF_BLOCKING, &gl->gl_flags); gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); @@ -300,16 +344,29 @@ static void gdlm_put_lock(struct gfs2_glock *gl) gfs2_update_request_times(gl); /* don't want to call dlm if we've unmounted the lock protocol */ - if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) - goto out_free; - /* don't want to skip dlm_unlock writing the lvb when lock has one */ + if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) { + gfs2_glock_free(gl); + return; + } + + /* + * When the lockspace is released, all remaining glocks will be + * unlocked automatically. This is more efficient than unlocking them + * individually, but when the lock is held in DLM_LOCK_EX or + * DLM_LOCK_PW mode, the lock value block (LVB) would be lost. + */ if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && - !gl->gl_lksb.sb_lvbptr) - goto out_free; + (!gl->gl_lksb.sb_lvbptr || gl->gl_state != LM_ST_EXCLUSIVE)) { + gfs2_glock_free_later(gl); + return; + } + + if (gl->gl_lksb.sb_lvbptr) + flags |= DLM_LKF_VALBLK; again: - error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, + error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, flags, NULL, gl); if (error == -EBUSY) { msleep(20); @@ -321,10 +378,6 @@ again: gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number, error); } - return; - -out_free: - gfs2_glock_free(gl); } static void gdlm_cancel(struct gfs2_glock *gl) @@ -947,14 +1000,15 @@ locks_done: if (sdp->sd_args.ar_spectator) { fs_info(sdp, "Recovery is required. Waiting for a " "non-spectator to mount.\n"); + spin_unlock(&ls->ls_recover_spin); msleep_interruptible(1000); } else { fs_info(sdp, "control_mount wait1 block %u start %u " "mount %u lvb %u flags %lx\n", block_gen, start_gen, mount_gen, lvb_gen, ls->ls_recover_flags); + spin_unlock(&ls->ls_recover_spin); } - spin_unlock(&ls->ls_recover_spin); goto restart; } diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 8cddf955ebc0..115c4ac457e9 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -31,6 +31,7 @@ #include "dir.h" #include "trace_gfs2.h" #include "trans.h" +#include "aops.h" static void gfs2_log_shutdown(struct gfs2_sbd *sdp); @@ -80,15 +81,6 @@ void gfs2_remove_from_ail(struct gfs2_bufdata *bd) brelse(bd->bd_bh); } -static int __gfs2_writepage(struct folio *folio, struct writeback_control *wbc, - void *data) -{ - struct address_space *mapping = data; - int ret = mapping->a_ops->writepage(&folio->page, wbc); - mapping_set_error(mapping, ret); - return ret; -} - /** * gfs2_ail1_start_one - Start I/O on a transaction * @sdp: The superblock @@ -140,7 +132,11 @@ __acquires(&sdp->sd_ail_lock) if (!mapping) continue; spin_unlock(&sdp->sd_ail_lock); - ret = write_cache_pages(mapping, wbc, __gfs2_writepage, mapping); + BUG_ON(GFS2_SB(mapping->host) != sdp); + if (gfs2_is_jdata(GFS2_I(mapping->host))) + ret = gfs2_jdata_writeback(mapping, wbc); + else + ret = mapping->a_ops->writepages(mapping, wbc); if (need_resched()) { blk_finish_plug(plug); cond_resched(); @@ -149,6 +145,7 @@ __acquires(&sdp->sd_ail_lock) spin_lock(&sdp->sd_ail_lock); if (ret == -ENODATA) /* if a jdata write into a new hole */ ret = 0; /* ignore it */ + mapping_set_error(mapping, ret); if (ret || wbc->nr_to_write <= 0) break; return -EBUSY; @@ -786,7 +783,7 @@ void gfs2_glock_remove_revoke(struct gfs2_glock *gl) { if (atomic_dec_return(&gl->gl_revokes) == 0) { clear_bit(GLF_LFLUSH, &gl->gl_flags); - gfs2_glock_queue_put(gl); + gfs2_glock_put_async(gl); } } @@ -1108,7 +1105,8 @@ repeat: lops_before_commit(sdp, tr); if (gfs2_withdrawing_or_withdrawn(sdp)) goto out_withdraw; - gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE); + if (sdp->sd_jdesc) + gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE); if (gfs2_withdrawing_or_withdrawn(sdp)) goto out_withdraw; diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h index c27b05099c1e..fc30ebdad83a 100644 --- a/fs/gfs2/log.h +++ b/fs/gfs2/log.h @@ -44,17 +44,6 @@ __releases(&sdp->sd_log_lock) spin_unlock(&sdp->sd_log_lock); } -static inline void gfs2_log_pointers_init(struct gfs2_sbd *sdp, - unsigned int value) -{ - if (++value == sdp->sd_jdesc->jd_blocks) { - value = 0; - } - sdp->sd_log_tail = value; - sdp->sd_log_flush_tail = value; - sdp->sd_log_head = value; -} - static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 314ec2a70167..9c8c305a75c4 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -157,7 +157,9 @@ u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock) /** * gfs2_end_log_write_bh - end log write of pagecache data with buffers * @sdp: The superblock - * @bvec: The bio_vec + * @folio: The folio + * @offset: The first byte within the folio that completed + * @size: The number of bytes that completed * @error: The i/o status * * This finds the relevant buffers and unlocks them and sets the @@ -166,17 +168,13 @@ u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock) * that is pinned in the pagecache. */ -static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, - struct bio_vec *bvec, - blk_status_t error) +static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct folio *folio, + size_t offset, size_t size, blk_status_t error) { struct buffer_head *bh, *next; - struct page *page = bvec->bv_page; - unsigned size; - bh = page_buffers(page); - size = bvec->bv_len; - while (bh_offset(bh) < bvec->bv_offset) + bh = folio_buffers(folio); + while (bh_offset(bh) < offset) bh = bh->b_this_page; do { if (error) @@ -186,7 +184,7 @@ static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, size -= bh->b_size; brelse(bh); bh = next; - } while(bh && size); + } while (bh && size); } /** @@ -203,13 +201,14 @@ static void gfs2_end_log_write(struct bio *bio) { struct gfs2_sbd *sdp = bio->bi_private; struct bio_vec *bvec; - struct page *page; struct bvec_iter_all iter_all; if (bio->bi_status) { - if (!cmpxchg(&sdp->sd_log_error, 0, (int)bio->bi_status)) + int err = blk_status_to_errno(bio->bi_status); + + if (!cmpxchg(&sdp->sd_log_error, 0, err)) fs_err(sdp, "Error %d writing to journal, jid=%u\n", - bio->bi_status, sdp->sd_jdesc->jd_jid); + err, sdp->sd_jdesc->jd_jid); gfs2_withdraw_delayed(sdp); /* prevent more writes to the journal */ clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); @@ -217,9 +216,12 @@ static void gfs2_end_log_write(struct bio *bio) } bio_for_each_segment_all(bvec, bio, iter_all) { - page = bvec->bv_page; - if (page_has_buffers(page)) - gfs2_end_log_write_bh(sdp, bvec, bio->bi_status); + struct page *page = bvec->bv_page; + struct folio *folio = page_folio(page); + + if (folio && folio_buffers(folio)) + gfs2_end_log_write_bh(sdp, folio, bvec->bv_offset, + bvec->bv_len, bio->bi_status); else mempool_free(page, gfs2_page_pool); } @@ -359,8 +361,8 @@ static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh) dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head); gfs2_log_incr_head(sdp); - gfs2_log_write(sdp, sdp->sd_jdesc, bh->b_page, bh->b_size, - bh_offset(bh), dblock); + gfs2_log_write(sdp, sdp->sd_jdesc, folio_page(bh->b_folio, 0), + bh->b_size, bh_offset(bh), dblock); } /** @@ -406,17 +408,16 @@ static void gfs2_end_log_read(struct bio *bio) } /** - * gfs2_jhead_pg_srch - Look for the journal head in a given page. + * gfs2_jhead_folio_search - Look for the journal head in a given page. * @jd: The journal descriptor * @head: The journal head to start from - * @page: The page to look in + * @folio: The folio to look in * * Returns: 1 if found, 0 otherwise. */ - -static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd, - struct gfs2_log_header_host *head, - struct page *page) +static bool gfs2_jhead_folio_search(struct gfs2_jdesc *jd, + struct gfs2_log_header_host *head, + struct folio *folio) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); struct gfs2_log_header_host lh; @@ -424,7 +425,8 @@ static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd, unsigned int offset; bool ret = false; - kaddr = kmap_local_page(page); + VM_BUG_ON_FOLIO(folio_test_large(folio), folio); + kaddr = kmap_local_folio(folio, 0); for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) { if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) { if (lh.lh_sequence >= head->lh_sequence) @@ -449,7 +451,7 @@ static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd, * Find the folio with 'index' in the journal's mapping. Search the folio for * the journal head if requested (cleanup == false). Release refs on the * folio so the page cache can reclaim it. We grabbed a - * reference on this folio twice, first when we did a grab_cache_page() + * reference on this folio twice, first when we did a filemap_grab_folio() * to obtain the folio to add it to the bio and second when we do a * filemap_get_folio() here to get the folio to wait on while I/O on it is being * completed. @@ -472,9 +474,9 @@ static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index, *done = true; if (!*done) - *done = gfs2_jhead_pg_srch(jd, head, &folio->page); + *done = gfs2_jhead_folio_search(jd, head, folio); - /* filemap_get_folio() and the earlier grab_cache_page() */ + /* filemap_get_folio() and the earlier filemap_grab_folio() */ folio_put_refs(folio, 2); } @@ -494,15 +496,13 @@ static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs) * gfs2_find_jhead - find the head of a log * @jd: The journal descriptor * @head: The log descriptor for the head of the log is returned here - * @keep_cache: If set inode pages will not be truncated * * Do a search of a journal by reading it in large chunks using bios and find * the valid log entry with the highest sequence number. (i.e. the log head) * * Returns: 0 on success, errno otherwise */ -int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, - bool keep_cache) +int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); struct address_space *mapping = jd->jd_inode->i_mapping; @@ -512,9 +512,9 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, unsigned int shift = PAGE_SHIFT - bsize_shift; unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift; struct gfs2_journal_extent *je; - int sz, ret = 0; + int ret = 0; struct bio *bio = NULL; - struct page *page = NULL; + struct folio *folio = NULL; bool done = false; errseq_t since; @@ -527,10 +527,11 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, u64 dblock = je->dblock; for (; block < je->lblock + je->blocks; block++, dblock++) { - if (!page) { - page = grab_cache_page(mapping, block >> shift); - if (!page) { - ret = -ENOMEM; + if (!folio) { + folio = filemap_grab_folio(mapping, + block >> shift); + if (IS_ERR(folio)) { + ret = PTR_ERR(folio); done = true; goto out; } @@ -541,8 +542,7 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, sector_t sector = dblock << sdp->sd_fsb2bb_shift; if (bio_end_sector(bio) == sector) { - sz = bio_add_page(bio, page, bsize, off); - if (sz == bsize) + if (bio_add_folio(bio, folio, bsize, off)) goto block_added; } if (off) { @@ -562,12 +562,12 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read); bio->bi_opf = REQ_OP_READ; add_block_to_new_bio: - sz = bio_add_page(bio, page, bsize, off); - BUG_ON(sz != bsize); + if (!bio_add_folio(bio, folio, bsize, off)) + BUG(); block_added: off += bsize; - if (off == PAGE_SIZE) - page = NULL; + if (off == folio_size(folio)) + folio = NULL; if (blocks_submitted <= blocks_read + max_blocks) { /* Keep at least one bio in flight */ continue; @@ -591,8 +591,7 @@ out: if (!ret) ret = filemap_check_wb_err(mapping, since); - if (!keep_cache) - truncate_inode_pages(mapping, 0); + truncate_inode_pages(mapping, 0); return ret; } @@ -615,15 +614,13 @@ static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type, static void gfs2_check_magic(struct buffer_head *bh) { - void *kaddr; __be32 *ptr; clear_buffer_escaped(bh); - kaddr = kmap_local_page(bh->b_page); - ptr = kaddr + bh_offset(bh); + ptr = kmap_local_folio(bh->b_folio, bh_offset(bh)); if (*ptr == cpu_to_be32(GFS2_MAGIC)) set_buffer_escaped(bh); - kunmap_local(kaddr); + kunmap_local(ptr); } static int blocknr_cmp(void *priv, const struct list_head *a, diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h index 07890c7b145d..be740bf33666 100644 --- a/fs/gfs2/lops.h +++ b/fs/gfs2/lops.h @@ -20,7 +20,7 @@ void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf); void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh); int gfs2_find_jhead(struct gfs2_jdesc *jd, - struct gfs2_log_header_host *head, bool keep_cache); + struct gfs2_log_header_host *head); void gfs2_drain_revokes(struct gfs2_sbd *sdp); static inline unsigned int buf_limit(struct gfs2_sbd *sdp) diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index 04cadc02e5a6..0727f60ad028 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -51,7 +51,6 @@ static void gfs2_init_glock_once(void *foo) { struct gfs2_glock *gl = foo; - spin_lock_init(&gl->gl_lockref.lock); INIT_LIST_HEAD(&gl->gl_holders); INIT_LIST_HEAD(&gl->gl_lru); INIT_LIST_HEAD(&gl->gl_ail_list); diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index f814054c8cd0..9dc8885c95d0 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -30,16 +30,16 @@ #include "util.h" #include "trace_gfs2.h" -static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc) +static void gfs2_aspace_write_folio(struct folio *folio, + struct writeback_control *wbc) { struct buffer_head *bh, *head; int nr_underway = 0; blk_opf_t write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc); - BUG_ON(!PageLocked(page)); - BUG_ON(!page_has_buffers(page)); + BUG_ON(!folio_test_locked(folio)); - head = page_buffers(page); + head = folio_buffers(folio); bh = head; do { @@ -55,7 +55,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb if (wbc->sync_mode != WB_SYNC_NONE) { lock_buffer(bh); } else if (!trylock_buffer(bh)) { - redirty_page_for_writepage(wbc, page); + folio_redirty_for_writepage(wbc, folio); continue; } if (test_clear_buffer_dirty(bh)) { @@ -66,11 +66,11 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb } while ((bh = bh->b_this_page) != head); /* - * The page and its buffers are protected by PageWriteback(), so we can - * drop the bh refcounts early. + * The folio and its buffers are protected from truncation by + * the writeback flag, so we can drop the bh refcounts early. */ - BUG_ON(PageWriteback(page)); - set_page_writeback(page); + BUG_ON(folio_test_writeback(folio)); + folio_start_writeback(folio); do { struct buffer_head *next = bh->b_this_page; @@ -80,25 +80,35 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb } bh = next; } while (bh != head); - unlock_page(page); + folio_unlock(folio); if (nr_underway == 0) - end_page_writeback(page); + folio_end_writeback(folio); +} - return 0; +static int gfs2_aspace_writepages(struct address_space *mapping, + struct writeback_control *wbc) +{ + struct folio *folio = NULL; + int error; + + while ((folio = writeback_iter(mapping, wbc, folio, &error))) + gfs2_aspace_write_folio(folio, wbc); + + return error; } const struct address_space_operations gfs2_meta_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .writepage = gfs2_aspace_writepage, + .writepages = gfs2_aspace_writepages, .release_folio = gfs2_release_folio, }; const struct address_space_operations gfs2_rgrp_aops = { .dirty_folio = block_dirty_folio, .invalidate_folio = block_invalidate_folio, - .writepage = gfs2_aspace_writepage, + .writepages = gfs2_aspace_writepages, .release_folio = gfs2_release_folio, }; @@ -122,7 +132,7 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) unsigned int bufnum; if (mapping == NULL) - mapping = &sdp->sd_aspace; + mapping = gfs2_aspace(sdp); shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift; index = blkno >> shift; /* convert block to page */ @@ -188,15 +198,14 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno) static void gfs2_meta_read_endio(struct bio *bio) { - struct bio_vec *bvec; - struct bvec_iter_all iter_all; + struct folio_iter fi; - bio_for_each_segment_all(bvec, bio, iter_all) { - struct page *page = bvec->bv_page; - struct buffer_head *bh = page_buffers(page); - unsigned int len = bvec->bv_len; + bio_for_each_folio_all(fi, bio) { + struct folio *folio = fi.folio; + struct buffer_head *bh = folio_buffers(folio); + size_t len = fi.length; - while (bh_offset(bh) < bvec->bv_offset) + while (bh_offset(bh) < fi.offset) bh = bh->b_this_page; do { struct buffer_head *next = bh->b_this_page; @@ -222,7 +231,7 @@ static void gfs2_submit_bhs(blk_opf_t opf, struct buffer_head *bhs[], int num) bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); while (num > 0) { bh = *bhs; - if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) { + if (!bio_add_folio(bio, bh->b_folio, bh->b_size, bh_offset(bh))) { BUG_ON(bio->bi_iter.bi_size == 0); break; } diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h index 831d988c2ceb..b7c8a6684d02 100644 --- a/fs/gfs2/meta_io.h +++ b/fs/gfs2/meta_io.h @@ -44,9 +44,7 @@ static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping) struct gfs2_glock_aspace *gla = container_of(mapping, struct gfs2_glock_aspace, mapping); return gla->glock.gl_name.ln_sbd; - } else if (mapping->a_ops == &gfs2_rgrp_aops) - return container_of(mapping, struct gfs2_sbd, sd_aspace); - else + } else return inode->i_sb->s_fs_info; } diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 572d58e86296..653f0ff4b057 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -64,15 +64,13 @@ static void gfs2_tune_init(struct gfs2_tune *gt) void free_sbd(struct gfs2_sbd *sdp) { - if (sdp->sd_lkstats) - free_percpu(sdp->sd_lkstats); + free_percpu(sdp->sd_lkstats); kfree(sdp); } static struct gfs2_sbd *init_sbd(struct super_block *sb) { struct gfs2_sbd *sdp; - struct address_space *mapping; sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL); if (!sdp) @@ -103,23 +101,12 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) init_completion(&sdp->sd_journal_ready); INIT_LIST_HEAD(&sdp->sd_quota_list); - mutex_init(&sdp->sd_quota_mutex); mutex_init(&sdp->sd_quota_sync_mutex); init_waitqueue_head(&sdp->sd_quota_wait); spin_lock_init(&sdp->sd_bitmap_lock); INIT_LIST_HEAD(&sdp->sd_sc_inodes_list); - mapping = &sdp->sd_aspace; - - address_space_init_once(mapping); - mapping->a_ops = &gfs2_rgrp_aops; - mapping->host = sb->s_bdev->bd_inode; - mapping->flags = 0; - mapping_set_gfp_mask(mapping, GFP_NOFS); - mapping->i_private_data = NULL; - mapping->writeback_index = 0; - spin_lock_init(&sdp->sd_log_lock); atomic_set(&sdp->sd_log_pinned, 0); INIT_LIST_HEAD(&sdp->sd_log_revokes); @@ -136,6 +123,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) atomic_set(&sdp->sd_log_in_flight, 0); init_waitqueue_head(&sdp->sd_log_flush_wait); mutex_init(&sdp->sd_freeze_mutex); + INIT_LIST_HEAD(&sdp->sd_dead_glocks); return sdp; @@ -184,22 +172,10 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent) return 0; } -static void end_bio_io_page(struct bio *bio) -{ - struct page *page = bio->bi_private; - - if (!bio->bi_status) - SetPageUptodate(page); - else - pr_warn("error %d reading superblock\n", bio->bi_status); - unlock_page(page); -} - -static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf) +static void gfs2_sb_in(struct gfs2_sbd *sdp, const struct gfs2_sb *str) { struct gfs2_sb_host *sb = &sdp->sd_sb; struct super_block *s = sdp->sd_vfs; - const struct gfs2_sb *str = buf; sb->sb_magic = be32_to_cpu(str->sb_header.mh_magic); sb->sb_type = be32_to_cpu(str->sb_header.mh_type); @@ -238,36 +214,22 @@ static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf) static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent) { - struct super_block *sb = sdp->sd_vfs; - struct gfs2_sb *p; - struct page *page; - struct bio *bio; + struct gfs2_sb *sb; + int err; - page = alloc_page(GFP_NOFS); - if (unlikely(!page)) + sb = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (unlikely(!sb)) return -ENOMEM; - - ClearPageUptodate(page); - ClearPageDirty(page); - lock_page(page); - - bio = bio_alloc(sb->s_bdev, 1, REQ_OP_READ | REQ_META, GFP_NOFS); - bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9); - __bio_add_page(bio, page, PAGE_SIZE, 0); - - bio->bi_end_io = end_bio_io_page; - bio->bi_private = page; - submit_bio(bio); - wait_on_page_locked(page); - bio_put(bio); - if (!PageUptodate(page)) { - __free_page(page); - return -EIO; - } - p = kmap(page); - gfs2_sb_in(sdp, p); - kunmap(page); - __free_page(page); + err = bdev_rw_virt(sdp->sd_vfs->s_bdev, + sector * (sdp->sd_vfs->s_blocksize >> 9), sb, PAGE_SIZE, + REQ_OP_READ | REQ_META); + if (err) { + pr_warn("error %d reading superblock\n", err); + kfree(sb); + return err; + } + gfs2_sb_in(sdp, sb); + kfree(sb); return gfs2_check_sb(sdp, silent); } @@ -520,7 +482,9 @@ static int init_sb(struct gfs2_sbd *sdp, int silent) sdp->sd_sb.sb_bsize, (unsigned int)PAGE_SIZE); goto out; } - sb_set_blocksize(sb, sdp->sd_sb.sb_bsize); + ret = -EINVAL; + if (!sb_set_blocksize(sb, sdp->sd_sb.sb_bsize)) + goto out; /* Get the root inode */ no_addr = sdp->sd_sb.sb_root_dir.no_addr; @@ -1155,6 +1119,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) int silent = fc->sb_flags & SB_SILENT; struct gfs2_sbd *sdp; struct gfs2_holder mount_gh; + struct address_space *mapping; int error; sdp = init_sbd(sb); @@ -1176,6 +1141,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_flags |= SB_NOSEC; sb->s_magic = GFS2_MAGIC; sb->s_op = &gfs2_super_ops; + sb->s_d_op = &gfs2_dops; sb->s_export_op = &gfs2_export_ops; sb->s_qcop = &gfs2_quotactl_ops; @@ -1187,6 +1153,9 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) /* Set up the buffer cache and fill in some fake block size values to allow us to read-in the on-disk superblock. */ sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, 512); + error = -EINVAL; + if (!sdp->sd_sb.sb_bsize) + goto fail_free; sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits; sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - 9; sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift); @@ -1201,17 +1170,35 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) sdp->sd_tune.gt_statfs_quantum = 30; } + /* Set up an address space for metadata writes */ + sdp->sd_inode = new_inode(sb); + error = -ENOMEM; + if (!sdp->sd_inode) + goto fail_free; + sdp->sd_inode->i_ino = GFS2_BAD_INO; + sdp->sd_inode->i_size = OFFSET_MAX; + + mapping = gfs2_aspace(sdp); + mapping->a_ops = &gfs2_rgrp_aops; + mapping_set_gfp_mask(mapping, GFP_NOFS); + error = init_names(sdp, silent); if (error) - goto fail_free; + goto fail_iput; snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s", sdp->sd_table_name); + error = -ENOMEM; + sdp->sd_glock_wq = alloc_workqueue("gfs2-glock/%s", + WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 0, + sdp->sd_fsname); + if (!sdp->sd_glock_wq) + goto fail_iput; + sdp->sd_delete_wq = alloc_workqueue("gfs2-delete/%s", WQ_MEM_RECLAIM | WQ_FREEZABLE, 0, sdp->sd_fsname); - error = -ENOMEM; if (!sdp->sd_delete_wq) - goto fail_free; + goto fail_glock_wq; error = gfs2_sys_fs_add(sdp); if (error) @@ -1288,7 +1275,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) error = gfs2_make_fs_rw(sdp); if (error) { - gfs2_freeze_unlock(&sdp->sd_freeze_gh); + gfs2_freeze_unlock(sdp); gfs2_destroy_threads(sdp); fs_err(sdp, "can't make FS RW: %d\n", error); goto fail_per_node; @@ -1320,6 +1307,11 @@ fail_debug: gfs2_sys_fs_del(sdp); fail_delete_wq: destroy_workqueue(sdp->sd_delete_wq); +fail_glock_wq: + if (sdp->sd_glock_wq) + destroy_workqueue(sdp->sd_glock_wq); +fail_iput: + iput(sdp->sd_inode); fail_free: free_sbd(sdp); sb->s_fs_info = NULL; diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index aa9cf0102848..2298e06797ac 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -75,9 +75,6 @@ #define GFS2_QD_HASH_SIZE BIT(GFS2_QD_HASH_SHIFT) #define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1) -#define QC_CHANGE 0 -#define QC_SYNC 1 - /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */ /* -> sd_bitmap_lock */ static DEFINE_SPINLOCK(qd_lock); @@ -152,7 +149,7 @@ static void gfs2_qd_list_dispose(struct list_head *list) static enum lru_status gfs2_qd_isolate(struct list_head *item, - struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) + struct list_lru_one *lru, void *arg) { struct list_head *dispose = arg; struct gfs2_quota_data *qd = @@ -239,8 +236,7 @@ static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, str return NULL; qd->qd_sbd = sdp; - qd->qd_lockref.count = 0; - spin_lock_init(&qd->qd_lockref.lock); + lockref_init(&qd->qd_lockref); qd->qd_id = qid; qd->qd_slot = -1; INIT_LIST_HEAD(&qd->qd_lru); @@ -301,7 +297,6 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid, spin_lock_bucket(hash); *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid); if (qd == NULL) { - new_qd->qd_lockref.count++; *qdp = new_qd; list_add(&new_qd->qd_list, &sdp->sd_quota_list); hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]); @@ -319,11 +314,11 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid, } -static void qd_hold(struct gfs2_quota_data *qd) +static void __qd_hold(struct gfs2_quota_data *qd) { struct gfs2_sbd *sdp = qd->qd_sbd; - gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref)); - lockref_get(&qd->qd_lockref); + gfs2_assert(sdp, qd->qd_lockref.count > 0); + qd->qd_lockref.count++; } static void qd_put(struct gfs2_quota_data *qd) @@ -400,16 +395,17 @@ static int bh_get(struct gfs2_quota_data *qd) struct inode *inode = sdp->sd_qc_inode; struct gfs2_inode *ip = GFS2_I(inode); unsigned int block, offset; - struct buffer_head *bh; + struct buffer_head *bh = NULL; struct iomap iomap = { }; int error; - mutex_lock(&sdp->sd_quota_mutex); - - if (qd->qd_bh_count++) { - mutex_unlock(&sdp->sd_quota_mutex); + spin_lock(&qd->qd_lockref.lock); + if (qd->qd_bh_count) { + qd->qd_bh_count++; + spin_unlock(&qd->qd_lockref.lock); return 0; } + spin_unlock(&qd->qd_lockref.lock); block = qd->qd_slot / sdp->sd_qc_per_block; offset = qd->qd_slot % sdp->sd_qc_per_block; @@ -418,122 +414,83 @@ static int bh_get(struct gfs2_quota_data *qd) (loff_t)block << inode->i_blkbits, i_blocksize(inode), &iomap); if (error) - goto fail; + return error; error = -ENOENT; if (iomap.type != IOMAP_MAPPED) - goto fail; + return error; error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits, DIO_WAIT, 0, &bh); if (error) - goto fail; + return error; error = -EIO; if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) - goto fail_brelse; - - qd->qd_bh = bh; - qd->qd_bh_qc = (struct gfs2_quota_change *) - (bh->b_data + sizeof(struct gfs2_meta_header) + - offset * sizeof(struct gfs2_quota_change)); - - mutex_unlock(&sdp->sd_quota_mutex); + goto out; - return 0; + spin_lock(&qd->qd_lockref.lock); + if (qd->qd_bh == NULL) { + qd->qd_bh = bh; + qd->qd_bh_qc = (struct gfs2_quota_change *) + (bh->b_data + sizeof(struct gfs2_meta_header) + + offset * sizeof(struct gfs2_quota_change)); + bh = NULL; + } + qd->qd_bh_count++; + spin_unlock(&qd->qd_lockref.lock); + error = 0; -fail_brelse: +out: brelse(bh); -fail: - qd->qd_bh_count--; - mutex_unlock(&sdp->sd_quota_mutex); return error; } static void bh_put(struct gfs2_quota_data *qd) { struct gfs2_sbd *sdp = qd->qd_sbd; + struct buffer_head *bh = NULL; - mutex_lock(&sdp->sd_quota_mutex); + spin_lock(&qd->qd_lockref.lock); gfs2_assert(sdp, qd->qd_bh_count); if (!--qd->qd_bh_count) { - brelse(qd->qd_bh); + bh = qd->qd_bh; qd->qd_bh = NULL; qd->qd_bh_qc = NULL; } - mutex_unlock(&sdp->sd_quota_mutex); + spin_unlock(&qd->qd_lockref.lock); + brelse(bh); } -static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd, - u64 *sync_gen) +static bool qd_grab_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd, + u64 sync_gen) { + bool ret = false; + + spin_lock(&qd->qd_lockref.lock); if (test_bit(QDF_LOCKED, &qd->qd_flags) || !test_bit(QDF_CHANGE, &qd->qd_flags) || - (sync_gen && (qd->qd_sync_gen >= *sync_gen))) - return 0; - - /* - * If qd_change is 0 it means a pending quota change was negated. - * We should not sync it, but we still have a qd reference and slot - * reference taken by gfs2_quota_change -> do_qc that need to be put. - */ - if (!qd->qd_change && test_and_clear_bit(QDF_CHANGE, &qd->qd_flags)) { - slot_put(qd); - qd_put(qd); - return 0; - } + qd->qd_sync_gen >= sync_gen) + goto out; - if (!lockref_get_not_dead(&qd->qd_lockref)) - return 0; + if (__lockref_is_dead(&qd->qd_lockref)) + goto out; + qd->qd_lockref.count++; list_move_tail(&qd->qd_list, &sdp->sd_quota_list); set_bit(QDF_LOCKED, &qd->qd_flags); qd->qd_change_sync = qd->qd_change; slot_hold(qd); - return 1; + ret = true; + +out: + spin_unlock(&qd->qd_lockref.lock); + return ret; } -static int qd_bh_get_or_undo(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd) +static void qd_ungrab_sync(struct gfs2_quota_data *qd) { - int error; - - error = bh_get(qd); - if (!error) - return 0; - clear_bit(QDF_LOCKED, &qd->qd_flags); slot_put(qd); qd_put(qd); - return error; -} - -static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) -{ - struct gfs2_quota_data *qd = NULL, *iter; - int error; - - *qdp = NULL; - - if (sb_rdonly(sdp->sd_vfs)) - return 0; - - spin_lock(&qd_lock); - - list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) { - if (qd_check_sync(sdp, iter, &sdp->sd_quota_sync_gen)) { - qd = iter; - break; - } - } - - spin_unlock(&qd_lock); - - if (qd) { - error = qd_bh_get_or_undo(sdp, qd); - if (error) - return error; - *qdp = qd; - } - - return 0; } static void qdsb_put(struct gfs2_quota_data *qd) @@ -545,8 +502,10 @@ static void qdsb_put(struct gfs2_quota_data *qd) static void qd_unlock(struct gfs2_quota_data *qd) { + spin_lock(&qd->qd_lockref.lock); gfs2_assert_warn(qd->qd_sbd, test_bit(QDF_LOCKED, &qd->qd_flags)); clear_bit(QDF_LOCKED, &qd->qd_flags); + spin_unlock(&qd->qd_lockref.lock); qdsb_put(qd); } @@ -710,48 +669,57 @@ static int sort_qd(const void *a, const void *b) return 0; } -static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type) +static void do_qc(struct gfs2_quota_data *qd, s64 change) { struct gfs2_sbd *sdp = qd->qd_sbd; struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); struct gfs2_quota_change *qc = qd->qd_bh_qc; + bool needs_put = false; s64 x; - mutex_lock(&sdp->sd_quota_mutex); gfs2_trans_add_meta(ip->i_gl, qd->qd_bh); - if (!test_bit(QDF_CHANGE, &qd->qd_flags)) { - qc->qc_change = 0; + /* + * The QDF_CHANGE flag indicates that the slot in the quota change file + * is used. Here, we use the value of qc->qc_change when the slot is + * used, and we assume a value of 0 otherwise. + */ + + spin_lock(&qd->qd_lockref.lock); + + x = 0; + if (test_bit(QDF_CHANGE, &qd->qd_flags)) + x = be64_to_cpu(qc->qc_change); + x += change; + qd->qd_change += change; + + if (!x && test_bit(QDF_CHANGE, &qd->qd_flags)) { + /* The slot in the quota change file becomes unused. */ + clear_bit(QDF_CHANGE, &qd->qd_flags); + qc->qc_flags = 0; + qc->qc_id = 0; + needs_put = true; + } else if (x && !test_bit(QDF_CHANGE, &qd->qd_flags)) { + /* The slot in the quota change file becomes used. */ + set_bit(QDF_CHANGE, &qd->qd_flags); + __qd_hold(qd); + slot_hold(qd); + qc->qc_flags = 0; if (qd->qd_id.type == USRQUOTA) qc->qc_flags = cpu_to_be32(GFS2_QCF_USER); qc->qc_id = cpu_to_be32(from_kqid(&init_user_ns, qd->qd_id)); } - - x = be64_to_cpu(qc->qc_change) + change; qc->qc_change = cpu_to_be64(x); - spin_lock(&qd_lock); - qd->qd_change = x; - spin_unlock(&qd_lock); + spin_unlock(&qd->qd_lockref.lock); - if (qc_type == QC_CHANGE) { - if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) { - qd_hold(qd); - slot_hold(qd); - } - } else { - gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags)); - clear_bit(QDF_CHANGE, &qd->qd_flags); - qc->qc_flags = 0; - qc->qc_id = 0; + if (needs_put) { slot_put(qd); qd_put(qd); } - if (change < 0) /* Reset quiet flag if we freed some blocks */ clear_bit(QDF_QMSG_QUIET, &qd->qd_flags); - mutex_unlock(&sdp->sd_quota_mutex); } static int gfs2_write_buf_to_page(struct gfs2_sbd *sdp, unsigned long index, @@ -890,6 +858,7 @@ static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc, be64_add_cpu(&q.qu_value, change); if (((s64)be64_to_cpu(q.qu_value)) < 0) q.qu_value = 0; /* Never go negative on quota usage */ + spin_lock(&qd->qd_lockref.lock); qd->qd_qb.qb_value = q.qu_value; if (fdq) { if (fdq->d_fieldmask & QC_SPC_SOFT) { @@ -905,6 +874,7 @@ static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc, qd->qd_qb.qb_value = q.qu_value; } } + spin_unlock(&qd->qd_lockref.lock); err = gfs2_write_disk_quota(sdp, &q, loc); if (!err) { @@ -919,7 +889,8 @@ static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc, return err; } -static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) +static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda, + u64 sync_gen) { struct gfs2_sbd *sdp = (*qda)->qd_sbd; struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); @@ -992,7 +963,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) if (error) goto out_end_trans; - do_qc(qd, -qd->qd_change_sync, QC_SYNC); + do_qc(qd, -qd->qd_change_sync); set_bit(QDF_REFRESH, &qd->qd_flags); } @@ -1010,8 +981,13 @@ out_dq: gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC); if (!error) { - for (x = 0; x < num_qd; x++) - qda[x]->qd_sync_gen = sdp->sd_quota_sync_gen; + for (x = 0; x < num_qd; x++) { + qd = qda[x]; + spin_lock(&qd->qd_lockref.lock); + if (qd->qd_sync_gen < sync_gen) + qd->qd_sync_gen = sync_gen; + spin_unlock(&qd->qd_lockref.lock); + } } return error; } @@ -1036,7 +1012,9 @@ static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd) qlvb->qb_limit = q.qu_limit; qlvb->qb_warn = q.qu_warn; qlvb->qb_value = q.qu_value; + spin_lock(&qd->qd_lockref.lock); qd->qd_qb = *qlvb; + spin_unlock(&qd->qd_lockref.lock); return 0; } @@ -1058,7 +1036,9 @@ restart: if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags)) force_refresh = FORCE; + spin_lock(&qd->qd_lockref.lock); qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr; + spin_unlock(&qd->qd_lockref.lock); if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { gfs2_glock_dq_uninit(q_gh); @@ -1129,35 +1109,36 @@ static bool need_sync(struct gfs2_quota_data *qd) { struct gfs2_sbd *sdp = qd->qd_sbd; struct gfs2_tune *gt = &sdp->sd_tune; - s64 value; + s64 value, change, limit; unsigned int num, den; + int ret = false; + spin_lock(&qd->qd_lockref.lock); if (!qd->qd_qb.qb_limit) - return false; + goto out; - spin_lock(&qd_lock); - value = qd->qd_change; - spin_unlock(&qd_lock); + change = qd->qd_change; + if (change <= 0) + goto out; + value = (s64)be64_to_cpu(qd->qd_qb.qb_value); + limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit); + if (value >= limit) + goto out; spin_lock(>->gt_spin); num = gt->gt_quota_scale_num; den = gt->gt_quota_scale_den; spin_unlock(>->gt_spin); - if (value <= 0) - return false; - else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >= - (s64)be64_to_cpu(qd->qd_qb.qb_limit)) - return false; - else { - value *= gfs2_jindex_size(sdp) * num; - value = div_s64(value, den); - value += (s64)be64_to_cpu(qd->qd_qb.qb_value); - if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit)) - return false; - } + change *= gfs2_jindex_size(sdp) * num; + change = div_s64(change, den); + if (value + change < limit) + goto out; - return true; + ret = true; +out: + spin_unlock(&qd->qd_lockref.lock); + return ret; } void gfs2_quota_unlock(struct gfs2_inode *ip) @@ -1166,7 +1147,6 @@ void gfs2_quota_unlock(struct gfs2_inode *ip) struct gfs2_quota_data *qda[2 * GFS2_MAXQUOTAS]; unsigned int count = 0; u32 x; - int found; if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags)) return; @@ -1174,6 +1154,7 @@ void gfs2_quota_unlock(struct gfs2_inode *ip) for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { struct gfs2_quota_data *qd; bool sync; + int error; qd = ip->i_qadata->qa_qd[x]; sync = need_sync(qd); @@ -1183,18 +1164,26 @@ void gfs2_quota_unlock(struct gfs2_inode *ip) continue; spin_lock(&qd_lock); - found = qd_check_sync(sdp, qd, NULL); + sync = qd_grab_sync(sdp, qd, U64_MAX); spin_unlock(&qd_lock); - if (!found) + if (!sync) continue; - if (!qd_bh_get_or_undo(sdp, qd)) - qda[count++] = qd; + gfs2_assert_warn(sdp, qd->qd_change_sync); + error = bh_get(qd); + if (error) { + qd_ungrab_sync(qd); + continue; + } + + qda[count++] = qd; } if (count) { - do_sync(count, qda); + u64 sync_gen = READ_ONCE(sdp->sd_quota_sync_gen); + + do_sync(count, qda, sync_gen); for (x = 0; x < count; x++) qd_unlock(qda[x]); } @@ -1253,12 +1242,12 @@ int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid, qid_eq(qd->qd_id, make_kqid_gid(gid)))) continue; + spin_lock(&qd->qd_lockref.lock); warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn); limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit); value = (s64)be64_to_cpu(qd->qd_qb.qb_value); - spin_lock(&qd_lock); value += qd->qd_change; - spin_unlock(&qd_lock); + spin_unlock(&qd->qd_lockref.lock); if (limit > 0 && (limit - value) < ap->allowed) ap->allowed = limit - value; @@ -1312,39 +1301,20 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change, if (qid_eq(qd->qd_id, make_kqid_uid(uid)) || qid_eq(qd->qd_id, make_kqid_gid(gid))) { - do_qc(qd, change, QC_CHANGE); + do_qc(qd, change); } } } -static bool qd_changed(struct gfs2_sbd *sdp) -{ - struct gfs2_quota_data *qd; - bool changed = false; - - spin_lock(&qd_lock); - list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { - if (test_bit(QDF_LOCKED, &qd->qd_flags) || - !test_bit(QDF_CHANGE, &qd->qd_flags)) - continue; - - changed = true; - break; - } - spin_unlock(&qd_lock); - return changed; -} - int gfs2_quota_sync(struct super_block *sb, int type) { struct gfs2_sbd *sdp = sb->s_fs_info; struct gfs2_quota_data **qda; unsigned int max_qd = PAGE_SIZE / sizeof(struct gfs2_holder); - unsigned int num_qd; - unsigned int x; + u64 sync_gen; int error = 0; - if (!qd_changed(sdp)) + if (sb_rdonly(sdp->sd_vfs)) return 0; qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL); @@ -1352,27 +1322,44 @@ int gfs2_quota_sync(struct super_block *sb, int type) return -ENOMEM; mutex_lock(&sdp->sd_quota_sync_mutex); - sdp->sd_quota_sync_gen++; + sync_gen = sdp->sd_quota_sync_gen + 1; do { - num_qd = 0; + struct gfs2_quota_data *iter; + unsigned int num_qd = 0; + unsigned int x; - for (;;) { - error = qd_fish(sdp, qda + num_qd); - if (error || !qda[num_qd]) - break; - if (++num_qd == max_qd) - break; + spin_lock(&qd_lock); + list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) { + if (qd_grab_sync(sdp, iter, sync_gen)) { + qda[num_qd++] = iter; + if (num_qd == max_qd) + break; + } } + spin_unlock(&qd_lock); - if (num_qd) { + if (!num_qd) + break; + + for (x = 0; x < num_qd; x++) { + error = bh_get(qda[x]); if (!error) - error = do_sync(num_qd, qda); + continue; + + while (x < num_qd) + qd_ungrab_sync(qda[--num_qd]); + break; + } - for (x = 0; x < num_qd; x++) - qd_unlock(qda[x]); + if (!error) { + WRITE_ONCE(sdp->sd_quota_sync_gen, sync_gen); + error = do_sync(num_qd, qda, sync_gen); } - } while (!error && num_qd == max_qd); + + for (x = 0; x < num_qd; x++) + qd_unlock(qda[x]); + } while (!error); mutex_unlock(&sdp->sd_quota_sync_mutex); kfree(qda); @@ -1407,6 +1394,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp) unsigned int found = 0; unsigned int hash; unsigned int bm_size; + struct buffer_head *bh; u64 dblock; u32 extlen = 0; int error; @@ -1426,8 +1414,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp) return error; for (x = 0; x < blocks; x++) { - struct buffer_head *bh; - const struct gfs2_quota_change *qc; + struct gfs2_quota_change *qc; unsigned int y; if (!extlen) { @@ -1440,15 +1427,13 @@ int gfs2_quota_init(struct gfs2_sbd *sdp) bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); if (!bh) goto fail; - if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) { - brelse(bh); - goto fail; - } + if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) + goto fail_brelse; - qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header)); + qc = (struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header)); for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots; y++, slot++) { - struct gfs2_quota_data *qd; + struct gfs2_quota_data *old_qd, *qd; s64 qc_change = be64_to_cpu(qc->qc_change); u32 qc_flags = be32_to_cpu(qc->qc_flags); enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ? @@ -1461,29 +1446,51 @@ int gfs2_quota_init(struct gfs2_sbd *sdp) hash = gfs2_qd_hash(sdp, qc_id); qd = qd_alloc(hash, sdp, qc_id); - if (qd == NULL) { - brelse(bh); - goto fail; - } + if (qd == NULL) + goto fail_brelse; + qd->qd_lockref.count = 0; set_bit(QDF_CHANGE, &qd->qd_flags); qd->qd_change = qc_change; qd->qd_slot = slot; qd->qd_slot_ref = 1; spin_lock(&qd_lock); + spin_lock_bucket(hash); + old_qd = gfs2_qd_search_bucket(hash, sdp, qc_id); + if (old_qd) { + fs_err(sdp, "Corruption found in quota_change%u" + "file: duplicate identifier in " + "slot %u\n", + sdp->sd_jdesc->jd_jid, slot); + + spin_unlock_bucket(hash); + spin_unlock(&qd_lock); + qd_put(old_qd); + + gfs2_glock_put(qd->qd_gl); + kmem_cache_free(gfs2_quotad_cachep, qd); + + /* zero out the duplicate slot */ + lock_buffer(bh); + memset(qc, 0, sizeof(*qc)); + mark_buffer_dirty(bh); + unlock_buffer(bh); + + continue; + } BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap)); list_add(&qd->qd_list, &sdp->sd_quota_list); atomic_inc(&sdp->sd_quota_count); - spin_unlock(&qd_lock); - - spin_lock_bucket(hash); hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]); spin_unlock_bucket(hash); + spin_unlock(&qd_lock); found++; } + if (buffer_dirty(bh)) + sync_dirty_buffer(bh); brelse(bh); dblock++; extlen--; @@ -1494,6 +1501,10 @@ int gfs2_quota_init(struct gfs2_sbd *sdp) return 0; +fail_brelse: + if (buffer_dirty(bh)) + sync_dirty_buffer(bh); + brelse(bh); fail: gfs2_quota_cleanup(sdp); return error; diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h index f462d9cb3087..988f38dc5b2c 100644 --- a/fs/gfs2/quota.h +++ b/fs/gfs2/quota.h @@ -44,8 +44,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip, int ret; ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */ - if (capable(CAP_SYS_RESOURCE) || - sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) + if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF || + capable(CAP_SYS_RESOURCE)) return 0; ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); if (ret) diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c index f4fe7039f725..24250478b085 100644 --- a/fs/gfs2/recovery.c +++ b/fs/gfs2/recovery.c @@ -118,6 +118,7 @@ void gfs2_revoke_clean(struct gfs2_jdesc *jd) int __get_log_header(struct gfs2_sbd *sdp, const struct gfs2_log_header *lh, unsigned int blkno, struct gfs2_log_header_host *head) { + const u32 zero = 0; u32 hash, crc; if (lh->lh_header.mh_magic != cpu_to_be32(GFS2_MAGIC) || @@ -126,7 +127,7 @@ int __get_log_header(struct gfs2_sbd *sdp, const struct gfs2_log_header *lh, return 1; hash = crc32(~0, lh, LH_V1_SIZE - 4); - hash = ~crc32_le_shift(hash, 4); /* assume lh_hash is zero */ + hash = ~crc32(hash, &zero, 4); /* assume lh_hash is zero */ if (be32_to_cpu(lh->lh_hash) != hash) return 1; @@ -263,16 +264,12 @@ static void clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); - u32 lblock = head->lh_blkno; - gfs2_replay_incr_blk(jd, &lblock); - gfs2_write_log_header(sdp, jd, head->lh_sequence + 1, 0, lblock, + gfs2_replay_incr_blk(jd, &head->lh_blkno); + head->lh_sequence++; + gfs2_write_log_header(sdp, jd, head->lh_sequence, 0, head->lh_blkno, GFS2_LOG_HEAD_UNMOUNT | GFS2_LOG_HEAD_RECOVERY, REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC); - if (jd->jd_jid == sdp->sd_lockstruct.ls_jid) { - sdp->sd_log_flush_head = lblock; - gfs2_log_incr_head(sdp); - } } @@ -457,7 +454,7 @@ void gfs2_recover_func(struct work_struct *work) if (error) goto fail_gunlock_ji; - error = gfs2_find_jhead(jd, &head, true); + error = gfs2_find_jhead(jd, &head); if (error) goto fail_gunlock_ji; t_jhd = ktime_get(); @@ -533,6 +530,9 @@ void gfs2_recover_func(struct work_struct *work) ktime_ms_delta(t_rep, t_tlck)); } + if (jd->jd_jid == sdp->sd_lockstruct.ls_jid) + gfs2_log_pointers_init(sdp, &head); + gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_SUCCESS); if (jlocked) { @@ -580,3 +580,13 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd, bool wait) return wait ? jd->jd_recover_error : 0; } +void gfs2_log_pointers_init(struct gfs2_sbd *sdp, + struct gfs2_log_header_host *head) +{ + sdp->sd_log_sequence = head->lh_sequence + 1; + gfs2_replay_incr_blk(sdp->sd_jdesc, &head->lh_blkno); + sdp->sd_log_tail = head->lh_blkno; + sdp->sd_log_flush_head = head->lh_blkno; + sdp->sd_log_flush_tail = head->lh_blkno; + sdp->sd_log_head = head->lh_blkno; +} diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h index 6a0fd42e1120..5a5ba72ecd75 100644 --- a/fs/gfs2/recovery.h +++ b/fs/gfs2/recovery.h @@ -29,6 +29,8 @@ void gfs2_recover_func(struct work_struct *work); int __get_log_header(struct gfs2_sbd *sdp, const struct gfs2_log_header *lh, unsigned int blkno, struct gfs2_log_header_host *head); +void gfs2_log_pointers_init(struct gfs2_sbd *sdp, + struct gfs2_log_header_host *head); #endif /* __RECOVERY_DOT_H__ */ diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 26d6c1eea559..b14e54b38ee8 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -814,11 +814,11 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd) bi = rgd->rd_bits + (length - 1); if ((bi->bi_start + bi->bi_bytes) * GFS2_NBBY != rgd->rd_data) { gfs2_lm(sdp, - "ri_addr = %llu\n" - "ri_length = %u\n" - "ri_data0 = %llu\n" - "ri_data = %u\n" - "ri_bitbytes = %u\n" + "ri_addr=%llu " + "ri_length=%u " + "ri_data0=%llu " + "ri_data=%u " + "ri_bitbytes=%u " "start=%u len=%u offset=%u\n", (unsigned long long)rgd->rd_addr, rgd->rd_length, @@ -1879,7 +1879,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip */ ip = gl->gl_object; - if (ip || !gfs2_queue_try_to_evict(gl)) + if (ip || !gfs2_queue_verify_delete(gl, false)) gfs2_glock_put(gl); else found++; @@ -1987,10 +1987,8 @@ static bool gfs2_rgrp_used_recently(const struct gfs2_blkreserv *rs, static u32 gfs2_orlov_skip(const struct gfs2_inode *ip) { const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); - u32 skip; - get_random_bytes(&skip, sizeof(skip)); - return skip % sdp->sd_rgrps; + return get_random_u32() % sdp->sd_rgrps; } static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin) diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index e5f79466340d..7c518c4ff638 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -44,10 +44,10 @@ #include "xattr.h" #include "lops.h" -enum dinode_demise { - SHOULD_DELETE_DINODE, - SHOULD_NOT_DELETE_DINODE, - SHOULD_DEFER_EVICTION, +enum evict_behavior { + EVICT_SHOULD_DELETE, + EVICT_SHOULD_SKIP_DELETE, + EVICT_SHOULD_DEFER_DELETE, }; /** @@ -67,9 +67,13 @@ void gfs2_jindex_free(struct gfs2_sbd *sdp) sdp->sd_journals = 0; spin_unlock(&sdp->sd_jindex_spin); + down_write(&sdp->sd_log_flush_lock); sdp->sd_jdesc = NULL; + up_write(&sdp->sd_log_flush_lock); + while (!list_empty(&list)) { jd = list_first_entry(&list, struct gfs2_jdesc, jd_list); + BUG_ON(jd->jd_log_bio); gfs2_free_journal_extents(jd); list_del(&jd->jd_list); iput(jd->jd_inode); @@ -130,28 +134,18 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp) { struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); struct gfs2_glock *j_gl = ip->i_gl; - struct gfs2_log_header_host head; int error; j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); if (gfs2_withdrawing_or_withdrawn(sdp)) return -EIO; - error = gfs2_find_jhead(sdp->sd_jdesc, &head, false); - if (error) { - gfs2_consist(sdp); - return error; - } - - if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { - gfs2_consist(sdp); + if (sdp->sd_log_sequence == 0) { + fs_err(sdp, "unknown status of our own journal jid %d", + sdp->sd_lockstruct.ls_jid); return -EIO; } - /* Initialize some head of the log stuff */ - sdp->sd_log_sequence = head.lh_sequence + 1; - gfs2_log_pointers_init(sdp, head.lh_blkno); - error = gfs2_quota_init(sdp); if (!error && gfs2_withdrawing_or_withdrawn(sdp)) error = -EIO; @@ -354,7 +348,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp) list_add(&lfcc->list, &list); } - gfs2_freeze_unlock(&sdp->sd_freeze_gh); + gfs2_freeze_unlock(sdp); error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE, LM_FLAG_NOEXP | GL_NOPID, @@ -366,7 +360,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp) error = gfs2_jdesc_check(jd); if (error) break; - error = gfs2_find_jhead(jd, &lh, false); + error = gfs2_find_jhead(jd, &lh); if (error) break; if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { @@ -378,7 +372,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp) if (!error) goto out; /* success */ - gfs2_freeze_unlock(&sdp->sd_freeze_gh); + gfs2_freeze_unlock(sdp); relock_shared: error2 = gfs2_freeze_lock_shared(sdp); @@ -617,7 +611,7 @@ restart: /* Release stuff */ - gfs2_freeze_unlock(&sdp->sd_freeze_gh); + gfs2_freeze_unlock(sdp); iput(sdp->sd_jindex); iput(sdp->sd_statfs_inode); @@ -644,12 +638,9 @@ restart: gfs2_jindex_free(sdp); /* Take apart glock structures and buffer lists */ gfs2_gl_hash_clear(sdp); - truncate_inode_pages_final(&sdp->sd_aspace); + iput(sdp->sd_inode); gfs2_delete_debugfs_file(sdp); - /* Unmount the locking protocol */ - gfs2_lm_unmount(sdp); - /* At this point, we're through participating in the lockspace */ gfs2_sys_fs_del(sdp); free_sbd(sdp); } @@ -673,7 +664,7 @@ static int gfs2_sync_fs(struct super_block *sb, int wait) return sdp->sd_log_error; } -static int gfs2_do_thaw(struct gfs2_sbd *sdp) +static int gfs2_do_thaw(struct gfs2_sbd *sdp, enum freeze_holder who, const void *freeze_owner) { struct super_block *sb = sdp->sd_vfs; int error; @@ -681,7 +672,7 @@ static int gfs2_do_thaw(struct gfs2_sbd *sdp) error = gfs2_freeze_lock_shared(sdp); if (error) goto fail; - error = thaw_super(sb, FREEZE_HOLDER_USERSPACE); + error = thaw_super(sb, who, freeze_owner); if (!error) return 0; @@ -702,14 +693,14 @@ void gfs2_freeze_func(struct work_struct *work) if (test_bit(SDF_FROZEN, &sdp->sd_flags)) goto freeze_failed; - error = freeze_super(sb, FREEZE_HOLDER_USERSPACE); + error = freeze_super(sb, FREEZE_HOLDER_USERSPACE, NULL); if (error) goto freeze_failed; - gfs2_freeze_unlock(&sdp->sd_freeze_gh); + gfs2_freeze_unlock(sdp); set_bit(SDF_FROZEN, &sdp->sd_flags); - error = gfs2_do_thaw(sdp); + error = gfs2_do_thaw(sdp, FREEZE_HOLDER_USERSPACE, NULL); if (error) goto out; @@ -727,10 +718,13 @@ out: /** * gfs2_freeze_super - prevent further writes to the filesystem * @sb: the VFS structure for the filesystem + * @who: freeze flags + * @freeze_owner: owner of the freeze * */ -static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who) +static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who, + const void *freeze_owner) { struct gfs2_sbd *sdp = sb->s_fs_info; int error; @@ -743,7 +737,7 @@ static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who) } for (;;) { - error = freeze_super(sb, FREEZE_HOLDER_USERSPACE); + error = freeze_super(sb, who, freeze_owner); if (error) { fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n", error); @@ -757,7 +751,7 @@ static int gfs2_freeze_super(struct super_block *sb, enum freeze_holder who) break; } - error = gfs2_do_thaw(sdp); + error = gfs2_do_thaw(sdp, who, freeze_owner); if (error) goto out; @@ -795,10 +789,13 @@ static int gfs2_freeze_fs(struct super_block *sb) /** * gfs2_thaw_super - reallow writes to the filesystem * @sb: the VFS structure for the filesystem + * @who: freeze flags + * @freeze_owner: owner of the freeze * */ -static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who) +static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who, + const void *freeze_owner) { struct gfs2_sbd *sdp = sb->s_fs_info; int error; @@ -811,9 +808,9 @@ static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who) } atomic_inc(&sb->s_active); - gfs2_freeze_unlock(&sdp->sd_freeze_gh); + gfs2_freeze_unlock(sdp); - error = gfs2_do_thaw(sdp); + error = gfs2_do_thaw(sdp, who, freeze_owner); if (!error) { clear_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags); @@ -832,7 +829,7 @@ void gfs2_thaw_freeze_initiator(struct super_block *sb) if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags)) goto out; - gfs2_freeze_unlock(&sdp->sd_freeze_gh); + gfs2_freeze_unlock(sdp); out: mutex_unlock(&sdp->sd_freeze_mutex); @@ -1029,7 +1026,7 @@ static int gfs2_drop_inode(struct inode *inode) if (inode->i_nlink && gfs2_holder_initialized(&ip->i_iopen_gh)) { struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; - if (test_bit(GLF_DEMOTE, &gl->gl_flags)) + if (glock_needs_demote(gl)) clear_nlink(inode); } @@ -1044,8 +1041,8 @@ static int gfs2_drop_inode(struct inode *inode) struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; gfs2_glock_hold(gl); - if (!gfs2_queue_try_to_evict(gl)) - gfs2_glock_queue_put(gl); + if (!gfs2_queue_verify_delete(gl, true)) + gfs2_glock_put_async(gl); return 0; } @@ -1172,74 +1169,6 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root) return 0; } -static void gfs2_final_release_pages(struct gfs2_inode *ip) -{ - struct inode *inode = &ip->i_inode; - struct gfs2_glock *gl = ip->i_gl; - - if (unlikely(!gl)) { - /* This can only happen during incomplete inode creation. */ - BUG_ON(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags)); - return; - } - - truncate_inode_pages(gfs2_glock2aspace(gl), 0); - truncate_inode_pages(&inode->i_data, 0); - - if (atomic_read(&gl->gl_revokes) == 0) { - clear_bit(GLF_LFLUSH, &gl->gl_flags); - clear_bit(GLF_DIRTY, &gl->gl_flags); - } -} - -static int gfs2_dinode_dealloc(struct gfs2_inode *ip) -{ - struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); - struct gfs2_rgrpd *rgd; - struct gfs2_holder gh; - int error; - - if (gfs2_get_inode_blocks(&ip->i_inode) != 1) { - gfs2_consist_inode(ip); - return -EIO; - } - - gfs2_rindex_update(sdp); - - error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); - if (error) - return error; - - rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1); - if (!rgd) { - gfs2_consist_inode(ip); - error = -EIO; - goto out_qs; - } - - error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, - LM_FLAG_NODE_SCOPE, &gh); - if (error) - goto out_qs; - - error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, - sdp->sd_jdesc->jd_blocks); - if (error) - goto out_rg_gunlock; - - gfs2_free_di(rgd, ip); - - gfs2_final_release_pages(ip); - - gfs2_trans_end(sdp); - -out_rg_gunlock: - gfs2_glock_dq_uninit(&gh); -out_qs: - gfs2_quota_unhold(ip); - return error; -} - /** * gfs2_glock_put_eventually * @gl: The glock to put @@ -1251,17 +1180,16 @@ out_qs: static void gfs2_glock_put_eventually(struct gfs2_glock *gl) { if (current->flags & PF_MEMALLOC) - gfs2_glock_queue_put(gl); + gfs2_glock_put_async(gl); else gfs2_glock_put(gl); } -static bool gfs2_upgrade_iopen_glock(struct inode *inode) +static enum evict_behavior gfs2_upgrade_iopen_glock(struct inode *inode) { struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_holder *gh = &ip->i_iopen_gh; - long timeout = 5 * HZ; int error; gh->gh_flags |= GL_NOCACHE; @@ -1272,9 +1200,9 @@ static bool gfs2_upgrade_iopen_glock(struct inode *inode) * exclusive access to the iopen glock here. * * Otherwise, the other nodes holding the lock will be notified about - * our locking request. If they do not have the inode open, they are - * expected to evict the cached inode and release the lock, allowing us - * to proceed. + * our locking request (see iopen_go_callback()). If they do not have + * the inode open, they are expected to evict the cached inode and + * release the lock, allowing us to proceed. * * Otherwise, if they cannot evict the inode, they are expected to poke * the inode glock (note: not the iopen glock). We will notice that @@ -1290,17 +1218,22 @@ static bool gfs2_upgrade_iopen_glock(struct inode *inode) gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh); error = gfs2_glock_nq(gh); if (error) - return false; + return EVICT_SHOULD_SKIP_DELETE; - timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait, + wait_event_interruptible_timeout(sdp->sd_async_glock_wait, !test_bit(HIF_WAIT, &gh->gh_iflags) || - test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags), - timeout); + glock_needs_demote(ip->i_gl), + 5 * HZ); if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) { gfs2_glock_dq(gh); - return false; + if (glock_needs_demote(ip->i_gl)) + return EVICT_SHOULD_SKIP_DELETE; + return EVICT_SHOULD_DEFER_DELETE; } - return gfs2_glock_holder_ready(gh) == 0; + error = gfs2_glock_holder_ready(gh); + if (error) + return EVICT_SHOULD_SKIP_DELETE; + return EVICT_SHOULD_DELETE; } /** @@ -1313,58 +1246,47 @@ static bool gfs2_upgrade_iopen_glock(struct inode *inode) * * Returns: the fate of the dinode */ -static enum dinode_demise evict_should_delete(struct inode *inode, - struct gfs2_holder *gh) +static enum evict_behavior evict_should_delete(struct inode *inode, + struct gfs2_holder *gh) { struct gfs2_inode *ip = GFS2_I(inode); struct super_block *sb = inode->i_sb; struct gfs2_sbd *sdp = sb->s_fs_info; int ret; - if (unlikely(test_bit(GIF_ALLOC_FAILED, &ip->i_flags))) - goto should_delete; - - if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags)) - return SHOULD_DEFER_EVICTION; + if (gfs2_holder_initialized(&ip->i_iopen_gh) && + test_bit(GLF_DEFER_DELETE, &ip->i_iopen_gh.gh_gl->gl_flags)) + return EVICT_SHOULD_DEFER_DELETE; /* Deletes should never happen under memory pressure anymore. */ if (WARN_ON_ONCE(current->flags & PF_MEMALLOC)) - return SHOULD_DEFER_EVICTION; + return EVICT_SHOULD_DEFER_DELETE; /* Must not read inode block until block type has been verified */ ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh); - if (unlikely(ret)) { - glock_clear_object(ip->i_iopen_gh.gh_gl, ip); - ip->i_iopen_gh.gh_flags |= GL_NOCACHE; - gfs2_glock_dq_uninit(&ip->i_iopen_gh); - return SHOULD_DEFER_EVICTION; - } + if (unlikely(ret)) + return EVICT_SHOULD_SKIP_DELETE; if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino)) - return SHOULD_NOT_DELETE_DINODE; + return EVICT_SHOULD_SKIP_DELETE; ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED); if (ret) - return SHOULD_NOT_DELETE_DINODE; + return EVICT_SHOULD_SKIP_DELETE; ret = gfs2_instantiate(gh); if (ret) - return SHOULD_NOT_DELETE_DINODE; + return EVICT_SHOULD_SKIP_DELETE; /* * The inode may have been recreated in the meantime. */ if (inode->i_nlink) - return SHOULD_NOT_DELETE_DINODE; + return EVICT_SHOULD_SKIP_DELETE; -should_delete: if (gfs2_holder_initialized(&ip->i_iopen_gh) && - test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) { - if (!gfs2_upgrade_iopen_glock(inode)) { - gfs2_holder_uninit(&ip->i_iopen_gh); - return SHOULD_NOT_DELETE_DINODE; - } - } - return SHOULD_DELETE_DINODE; + test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) + return gfs2_upgrade_iopen_glock(inode); + return EVICT_SHOULD_DELETE; } /** @@ -1384,7 +1306,7 @@ static int evict_unlinked_inode(struct inode *inode) } if (ip->i_eattr) { - ret = gfs2_ea_dealloc(ip); + ret = gfs2_ea_dealloc(ip, true); if (ret) goto out; } @@ -1475,8 +1397,10 @@ static void gfs2_evict_inode(struct inode *inode) struct gfs2_sbd *sdp = sb->s_fs_info; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder gh; + enum evict_behavior behavior; int ret; + gfs2_holder_mark_uninitialized(&gh); if (inode->i_nlink || sb_rdonly(sb) || !ip->i_no_addr) goto out; @@ -1488,11 +1412,20 @@ static void gfs2_evict_inode(struct inode *inode) if (!sdp->sd_jdesc) goto out; - gfs2_holder_mark_uninitialized(&gh); - ret = evict_should_delete(inode, &gh); - if (ret == SHOULD_DEFER_EVICTION) - goto out; - if (ret == SHOULD_DELETE_DINODE) + behavior = evict_should_delete(inode, &gh); + if (behavior == EVICT_SHOULD_DEFER_DELETE && + !test_bit(SDF_KILL, &sdp->sd_flags)) { + struct gfs2_glock *io_gl = ip->i_iopen_gh.gh_gl; + + if (io_gl) { + gfs2_glock_hold(io_gl); + if (!gfs2_queue_verify_delete(io_gl, true)) + gfs2_glock_put(io_gl); + goto out; + } + behavior = EVICT_SHOULD_SKIP_DELETE; + } + if (behavior == EVICT_SHOULD_DELETE) ret = evict_unlinked_inode(inode); else ret = evict_linked_inode(inode); @@ -1500,11 +1433,11 @@ static void gfs2_evict_inode(struct inode *inode) if (gfs2_rs_active(&ip->i_res)) gfs2_rs_deltree(&ip->i_res); - if (gfs2_holder_initialized(&gh)) - gfs2_glock_dq_uninit(&gh); if (ret && ret != GLR_TRYFAILED && ret != -EROFS) fs_warn(sdp, "gfs2_evict_inode: %d\n", ret); out: + if (gfs2_holder_initialized(&gh)) + gfs2_glock_dq_uninit(&gh); truncate_inode_pages_final(&inode->i_data); if (ip->i_qadata) gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0); @@ -1524,7 +1457,6 @@ out: if (ip->i_gl) { glock_clear_object(ip->i_gl, ip); wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE); - gfs2_glock_add_to_lru(ip->i_gl); gfs2_glock_put_eventually(ip->i_gl); rcu_assign_pointer(ip->i_gl, NULL); } @@ -1538,11 +1470,13 @@ static struct inode *gfs2_alloc_inode(struct super_block *sb) if (!ip) return NULL; ip->i_no_addr = 0; + ip->i_no_formal_ino = 0; ip->i_flags = 0; ip->i_gl = NULL; gfs2_holder_mark_uninitialized(&ip->i_iopen_gh); memset(&ip->i_res, 0, sizeof(ip->i_res)); RB_CLEAR_NODE(&ip->i_res.rs_node); + ip->i_diskflags = 0; ip->i_rahead = 0; return &ip->i_inode; } diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c index 250f340cb44d..748125653d6c 100644 --- a/fs/gfs2/sys.c +++ b/fs/gfs2/sys.c @@ -88,7 +88,7 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf) "Withdraw In Prog: %d\n" "Remote Withdraw: %d\n" "Withdraw Recovery: %d\n" - "Deactivating: %d\n" + "Killing: %d\n" "sd_log_error: %d\n" "sd_log_flush_lock: %d\n" "sd_log_num_revoke: %u\n" @@ -174,10 +174,10 @@ static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len) switch (n) { case 0: - error = thaw_super(sdp->sd_vfs, FREEZE_HOLDER_USERSPACE); + error = thaw_super(sdp->sd_vfs, FREEZE_HOLDER_USERSPACE, NULL); break; case 1: - error = freeze_super(sdp->sd_vfs, FREEZE_HOLDER_USERSPACE); + error = freeze_super(sdp->sd_vfs, FREEZE_HOLDER_USERSPACE, NULL); break; default: return -EINVAL; @@ -336,7 +336,7 @@ static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len return -EINVAL; if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags)) fs_info(sdp, "demote interface used\n"); - rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl); + rv = gfs2_glock_get(sdp, glnum, glops, NO_CREATE, &gl); if (rv) return rv; gfs2_glock_cb(gl, glmode); diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h index a5deb9f86831..26036ffc3f33 100644 --- a/fs/gfs2/trace_gfs2.h +++ b/fs/gfs2/trace_gfs2.h @@ -53,12 +53,20 @@ {(1UL << GLF_DIRTY), "y" }, \ {(1UL << GLF_LFLUSH), "f" }, \ {(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \ - {(1UL << GLF_REPLY_PENDING), "r" }, \ - {(1UL << GLF_INITIAL), "I" }, \ - {(1UL << GLF_FROZEN), "F" }, \ + {(1UL << GLF_PENDING_REPLY), "R" }, \ + {(1UL << GLF_HAVE_REPLY), "r" }, \ + {(1UL << GLF_INITIAL), "a" }, \ + {(1UL << GLF_HAVE_FROZEN_REPLY), "F" }, \ {(1UL << GLF_LRU), "L" }, \ {(1UL << GLF_OBJECT), "o" }, \ - {(1UL << GLF_BLOCKING), "b" }) + {(1UL << GLF_BLOCKING), "b" }, \ + {(1UL << GLF_UNLOCKED), "x" }, \ + {(1UL << GLF_INSTANTIATE_NEEDED), "n" }, \ + {(1UL << GLF_INSTANTIATE_IN_PROG), "N" }, \ + {(1UL << GLF_TRY_TO_EVICT), "e" }, \ + {(1UL << GLF_VERIFY_DELETE), "E" }, \ + {(1UL << GLF_DEFER_DELETE), "s" }, \ + {(1UL << GLF_CANCELING), "C" }) #ifndef NUMPTY #define NUMPTY diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index 192213c7359a..075f7e9abe47 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -226,6 +226,27 @@ out: unlock_buffer(bh); } +void gfs2_trans_add_databufs(struct gfs2_glock *gl, struct folio *folio, + size_t from, size_t len) +{ + struct buffer_head *head = folio_buffers(folio); + unsigned int bsize = head->b_size; + struct buffer_head *bh; + size_t to = from + len; + size_t start, end; + + for (bh = head, start = 0; bh != head || !start; + bh = bh->b_this_page, start = end) { + end = start + bsize; + if (end <= from) + continue; + if (start >= to) + break; + set_buffer_uptodate(bh); + gfs2_trans_add_data(gl, bh); + } +} + void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) { @@ -246,12 +267,12 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) if (bd == NULL) { gfs2_log_unlock(sdp); unlock_buffer(bh); - lock_page(bh->b_page); + folio_lock(bh->b_folio); if (bh->b_private == NULL) bd = gfs2_alloc_bufdata(gl, bh); else bd = bh->b_private; - unlock_page(bh->b_page); + folio_unlock(bh->b_folio); lock_buffer(bh); gfs2_log_lock(sdp); } diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h index f8ce5302280d..790c55f59e61 100644 --- a/fs/gfs2/trans.h +++ b/fs/gfs2/trans.h @@ -42,6 +42,8 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, void gfs2_trans_end(struct gfs2_sbd *sdp); void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh); +void gfs2_trans_add_databufs(struct gfs2_glock *gl, struct folio *folio, + size_t from, size_t len); void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh); void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd); void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len); diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c index f52141ce9485..d5a1e63fa257 100644 --- a/fs/gfs2/util.c +++ b/fs/gfs2/util.c @@ -73,7 +73,7 @@ int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, "mount.\n"); goto out_unlock; } - error = gfs2_find_jhead(jd, &head, false); + error = gfs2_find_jhead(jd, &head); if (error) { if (verbose) fs_err(sdp, "Error parsing journal for spectator " @@ -99,20 +99,20 @@ out_unlock: */ int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp) { + int flags = LM_FLAG_NOEXP | GL_EXACT; int error; - error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, - LM_FLAG_NOEXP | GL_EXACT, + error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, flags, &sdp->sd_freeze_gh); - if (error) + if (error && error != GLR_TRYFAILED) fs_err(sdp, "can't lock the freeze glock: %d\n", error); return error; } -void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh) +void gfs2_freeze_unlock(struct gfs2_sbd *sdp) { - if (gfs2_holder_initialized(freeze_gh)) - gfs2_glock_dq_uninit(freeze_gh); + if (gfs2_holder_initialized(&sdp->sd_freeze_gh)) + gfs2_glock_dq_uninit(&sdp->sd_freeze_gh); } static void signal_our_withdraw(struct gfs2_sbd *sdp) @@ -206,9 +206,9 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp) * on other nodes to be successful, otherwise we remain the owner of * the glock as far as dlm is concerned. */ - if (i_gl->gl_ops->go_free) { - set_bit(GLF_FREEING, &i_gl->gl_flags); - wait_on_bit(&i_gl->gl_flags, GLF_FREEING, TASK_UNINTERRUPTIBLE); + if (i_gl->gl_ops->go_unlocked) { + set_bit(GLF_UNLOCKED, &i_gl->gl_flags); + wait_on_bit(&i_gl->gl_flags, GLF_UNLOCKED, TASK_UNINTERRUPTIBLE); } /* @@ -255,7 +255,7 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp) gfs2_glock_nq(&sdp->sd_live_gh); } - gfs2_glock_queue_put(live_gl); /* drop extra reference we acquired */ + gfs2_glock_put(live_gl); /* drop extra reference we acquired */ clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags); /* @@ -350,7 +350,6 @@ int gfs2_withdraw(struct gfs2_sbd *sdp) fs_err(sdp, "telling LM to unmount\n"); lm->lm_unmount(sdp); } - set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags); fs_err(sdp, "File system withdrawn\n"); dump_stack(); clear_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags); @@ -376,8 +375,8 @@ void gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion, return; fs_err(sdp, - "fatal: assertion \"%s\" failed\n" - " function = %s, file = %s, line = %u\n", + "fatal: assertion \"%s\" failed - " + "function = %s, file = %s, line = %u\n", assertion, function, file, line); /* @@ -407,7 +406,8 @@ void gfs2_assert_warn_i(struct gfs2_sbd *sdp, char *assertion, return; if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW) - fs_warn(sdp, "warning: assertion \"%s\" failed at function = %s, file = %s, line = %u\n", + fs_warn(sdp, "warning: assertion \"%s\" failed - " + "function = %s, file = %s, line = %u\n", assertion, function, file, line); if (sdp->sd_args.ar_debug) @@ -416,10 +416,10 @@ void gfs2_assert_warn_i(struct gfs2_sbd *sdp, char *assertion, dump_stack(); if (sdp->sd_args.ar_errors == GFS2_ERRORS_PANIC) - panic("GFS2: fsid=%s: warning: assertion \"%s\" failed\n" - "GFS2: fsid=%s: function = %s, file = %s, line = %u\n", + panic("GFS2: fsid=%s: warning: assertion \"%s\" failed - " + "function = %s, file = %s, line = %u\n", sdp->sd_fsname, assertion, - sdp->sd_fsname, function, file, line); + function, file, line); sdp->sd_last_warning = jiffies; } @@ -432,7 +432,8 @@ void gfs2_consist_i(struct gfs2_sbd *sdp, const char *function, char *file, unsigned int line) { gfs2_lm(sdp, - "fatal: filesystem consistency error - function = %s, file = %s, line = %u\n", + "fatal: filesystem consistency error - " + "function = %s, file = %s, line = %u\n", function, file, line); gfs2_withdraw(sdp); } @@ -447,9 +448,9 @@ void gfs2_consist_inode_i(struct gfs2_inode *ip, struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); gfs2_lm(sdp, - "fatal: filesystem consistency error\n" - " inode = %llu %llu\n" - " function = %s, file = %s, line = %u\n", + "fatal: filesystem consistency error - " + "inode = %llu %llu, " + "function = %s, file = %s, line = %u\n", (unsigned long long)ip->i_no_formal_ino, (unsigned long long)ip->i_no_addr, function, file, line); @@ -470,9 +471,9 @@ void gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd, sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname); gfs2_rgrp_dump(NULL, rgd, fs_id_buf); gfs2_lm(sdp, - "fatal: filesystem consistency error\n" - " RG = %llu\n" - " function = %s, file = %s, line = %u\n", + "fatal: filesystem consistency error - " + "RG = %llu, " + "function = %s, file = %s, line = %u\n", (unsigned long long)rgd->rd_addr, function, file, line); gfs2_dump_glock(NULL, rgd->rd_gl, 1); @@ -486,16 +487,16 @@ void gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd, */ int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh, - const char *type, const char *function, char *file, + const char *function, char *file, unsigned int line) { int me; gfs2_lm(sdp, - "fatal: invalid metadata block\n" - " bh = %llu (%s)\n" - " function = %s, file = %s, line = %u\n", - (unsigned long long)bh->b_blocknr, type, + "fatal: invalid metadata block - " + "bh = %llu (bad magic number), " + "function = %s, file = %s, line = %u\n", + (unsigned long long)bh->b_blocknr, function, file, line); me = gfs2_withdraw(sdp); return (me) ? -1 : -2; @@ -514,9 +515,9 @@ int gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh, int me; gfs2_lm(sdp, - "fatal: invalid metadata block\n" - " bh = %llu (type: exp=%u, found=%u)\n" - " function = %s, file = %s, line = %u\n", + "fatal: invalid metadata block - " + "bh = %llu (type: exp=%u, found=%u), " + "function = %s, file = %s, line = %u\n", (unsigned long long)bh->b_blocknr, type, t, function, file, line); me = gfs2_withdraw(sdp); @@ -533,8 +534,8 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function, char *file, unsigned int line) { gfs2_lm(sdp, - "fatal: I/O error\n" - " function = %s, file = %s, line = %u\n", + "fatal: I/O error - " + "function = %s, file = %s, line = %u\n", function, file, line); return gfs2_withdraw(sdp); } @@ -551,9 +552,9 @@ void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh, if (gfs2_withdrawing_or_withdrawn(sdp)) return; - fs_err(sdp, "fatal: I/O error\n" - " block = %llu\n" - " function = %s, file = %s, line = %u\n", + fs_err(sdp, "fatal: I/O error - " + "block = %llu, " + "function = %s, file = %s, line = %u\n", (unsigned long long)bh->b_blocknr, function, file, line); if (withdraw) gfs2_withdraw(sdp); diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h index ba071998461f..27d03b641024 100644 --- a/fs/gfs2/util.h +++ b/fs/gfs2/util.h @@ -92,7 +92,7 @@ gfs2_consist_rgrpd_i((rgd), __func__, __FILE__, __LINE__) int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh, - const char *type, const char *function, + const char *function, char *file, unsigned int line); static inline int gfs2_meta_check(struct gfs2_sbd *sdp, @@ -123,7 +123,7 @@ static inline int gfs2_metatype_check_i(struct gfs2_sbd *sdp, u32 magic = be32_to_cpu(mh->mh_magic); u16 t = be32_to_cpu(mh->mh_type); if (unlikely(magic != GFS2_MAGIC)) - return gfs2_meta_check_ii(sdp, bh, "magic number", function, + return gfs2_meta_check_ii(sdp, bh, function, file, line); if (unlikely(t != type)) return gfs2_metatype_check_ii(sdp, bh, type, t, function, @@ -150,7 +150,7 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function, int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, bool verbose); int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp); -void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh); +void gfs2_freeze_unlock(struct gfs2_sbd *sdp); #define gfs2_io_error(sdp) \ gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__) diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c index 8c96ba6230d1..df9c93de94c7 100644 --- a/fs/gfs2/xattr.c +++ b/fs/gfs2/xattr.c @@ -96,30 +96,34 @@ static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh, return -EIO; for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) { - if (!GFS2_EA_REC_LEN(ea)) - goto fail; + if (!GFS2_EA_REC_LEN(ea)) { + gfs2_consist_inode(ip); + return -EIO; + } if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <= - bh->b_data + bh->b_size)) - goto fail; - if (!gfs2_eatype_valid(sdp, ea->ea_type)) - goto fail; + bh->b_data + bh->b_size)) { + gfs2_consist_inode(ip); + return -EIO; + } + if (!gfs2_eatype_valid(sdp, ea->ea_type)) { + gfs2_consist_inode(ip); + return -EIO; + } error = ea_call(ip, bh, ea, prev, data); if (error) return error; if (GFS2_EA_IS_LAST(ea)) { if ((char *)GFS2_EA2NEXT(ea) != - bh->b_data + bh->b_size) - goto fail; + bh->b_data + bh->b_size) { + gfs2_consist_inode(ip); + return -EIO; + } break; } } return error; - -fail: - gfs2_consist_inode(ip); - return -EIO; } static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data) @@ -1379,7 +1383,7 @@ out: return error; } -static int ea_dealloc_block(struct gfs2_inode *ip) +static int ea_dealloc_block(struct gfs2_inode *ip, bool initialized) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_rgrpd *rgd; @@ -1412,7 +1416,7 @@ static int ea_dealloc_block(struct gfs2_inode *ip) ip->i_eattr = 0; gfs2_add_inode_blocks(&ip->i_inode, -1); - if (likely(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags))) { + if (initialized) { error = gfs2_meta_inode_buffer(ip, &dibh); if (!error) { gfs2_trans_add_meta(ip->i_gl, dibh); @@ -1431,11 +1435,12 @@ out_gunlock: /** * gfs2_ea_dealloc - deallocate the extended attribute fork * @ip: the inode + * @initialized: xattrs have been initialized * * Returns: errno */ -int gfs2_ea_dealloc(struct gfs2_inode *ip) +int gfs2_ea_dealloc(struct gfs2_inode *ip, bool initialized) { int error; @@ -1447,7 +1452,7 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip) if (error) return error; - if (likely(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags))) { + if (initialized) { error = ea_foreach(ip, ea_dealloc_unstuffed, NULL); if (error) goto out_quota; @@ -1459,7 +1464,7 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip) } } - error = ea_dealloc_block(ip); + error = ea_dealloc_block(ip, initialized); out_quota: gfs2_quota_unhold(ip); diff --git a/fs/gfs2/xattr.h b/fs/gfs2/xattr.h index eb12eb7e37c1..3c9788e0e137 100644 --- a/fs/gfs2/xattr.h +++ b/fs/gfs2/xattr.h @@ -54,7 +54,7 @@ int __gfs2_xattr_set(struct inode *inode, const char *name, const void *value, size_t size, int flags, int type); ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size); -int gfs2_ea_dealloc(struct gfs2_inode *ip); +int gfs2_ea_dealloc(struct gfs2_inode *ip, bool initialized); /* Exported to acl.c */ |