diff options
Diffstat (limited to 'fs/gfs2/glops.c')
| -rw-r--r-- | fs/gfs2/glops.c | 612 |
1 files changed, 364 insertions, 248 deletions
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 5f2e5224c51c..2173ccf5034b 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -1,10 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. - * - * This copyrighted material is made available to anyone wishing to use, - * modify, copy, or redistribute it subject to the terms and conditions - * of the GNU General Public License version 2. */ #include <linux/spinlock.h> @@ -13,6 +10,8 @@ #include <linux/gfs2_ondisk.h> #include <linux/bio.h> #include <linux/posix_acl.h> +#include <linux/security.h> +#include <linux/log2.h> #include "gfs2.h" #include "incore.h" @@ -27,29 +26,39 @@ #include "util.h" #include "trans.h" #include "dir.h" +#include "lops.h" + +struct workqueue_struct *gfs2_freeze_wq; static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) { - fs_err(gl->gl_sbd, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page state 0x%lx\n", + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + + fs_err(sdp, + "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page " + "state 0x%lx\n", bh, (unsigned long long)bh->b_blocknr, bh->b_state, - bh->b_page->mapping, bh->b_page->flags); - fs_err(gl->gl_sbd, "AIL glock %u:%llu mapping %p\n", + bh->b_folio->mapping, bh->b_folio->flags.f); + fs_err(sdp, "AIL glock %u:%llu mapping %p\n", gl->gl_name.ln_type, gl->gl_name.ln_number, gfs2_glock2aspace(gl)); - gfs2_lm_withdraw(gl->gl_sbd, "AIL error\n"); + gfs2_lm(sdp, "AIL error\n"); + gfs2_withdraw(sdp); } /** * __gfs2_ail_flush - remove all buffers for a given lock from the AIL * @gl: the glock * @fsync: set when called from fsync (not all buffers will be clean) + * @nr_revokes: Number of buffers to revoke * * None of the buffers should be dirty, locked, or pinned. */ -static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) +static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, + unsigned int nr_revokes) { - struct gfs2_sbd *sdp = gl->gl_sbd; + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct list_head *head = &gl->gl_ail_list; struct gfs2_bufdata *bd, *tmp; struct buffer_head *bh; @@ -57,7 +66,9 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) gfs2_log_lock(sdp); spin_lock(&sdp->sd_ail_lock); - list_for_each_entry_safe(bd, tmp, head, bd_ail_gl_list) { + list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) { + if (nr_revokes == 0) + break; bh = bd->bd_bh; if (bh->b_state & b_state) { if (fsync) @@ -65,6 +76,7 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) gfs2_ail_error(gl, bh); } gfs2_trans_add_revoke(sdp, bd); + nr_revokes--; } GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); spin_unlock(&sdp->sd_ail_lock); @@ -72,34 +84,62 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) } -static void gfs2_ail_empty_gl(struct gfs2_glock *gl) +static int gfs2_ail_empty_gl(struct gfs2_glock *gl) { - struct gfs2_sbd *sdp = gl->gl_sbd; + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_trans tr; + unsigned int revokes; + int ret = 0; + + revokes = atomic_read(&gl->gl_ail_count); + + if (!revokes) { + bool have_revokes; + bool log_in_flight; + + /* + * We have nothing on the ail, but there could be revokes on + * the sdp revoke queue, in which case, we still want to flush + * the log and wait for it to finish. + * + * If the sdp revoke list is empty too, we might still have an + * io outstanding for writing revokes, so we should wait for + * it before returning. + * + * If none of these conditions are true, our revokes are all + * flushed and we can return. + */ + gfs2_log_lock(sdp); + have_revokes = !list_empty(&sdp->sd_log_revokes); + log_in_flight = atomic_read(&sdp->sd_log_in_flight); + gfs2_log_unlock(sdp); + if (have_revokes) + goto flush; + if (log_in_flight) + log_flush_wait(sdp); + return 0; + } memset(&tr, 0, sizeof(tr)); - tr.tr_revokes = atomic_read(&gl->gl_ail_count); - - if (!tr.tr_revokes) - return; - - /* A shortened, inline version of gfs2_trans_begin() */ - tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64)); - tr.tr_ip = (unsigned long)__builtin_return_address(0); - sb_start_intwrite(sdp->sd_vfs); - gfs2_log_reserve(sdp, tr.tr_reserved); - WARN_ON_ONCE(current->journal_info); - current->journal_info = &tr; - - __gfs2_ail_flush(gl, 0); - + set_bit(TR_ONSTACK, &tr.tr_flags); + ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_); + if (ret) { + fs_err(sdp, "Transaction error %d: Unable to write revokes.", ret); + goto flush; + } + __gfs2_ail_flush(gl, 0, revokes); gfs2_trans_end(sdp); - gfs2_log_flush(sdp, NULL); + +flush: + if (!ret) + gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | + GFS2_LFC_AIL_EMPTY_GL); + return ret; } void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) { - struct gfs2_sbd *sdp = gl->gl_sbd; + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; unsigned int revokes = atomic_read(&gl->gl_ail_count); int ret; @@ -109,9 +149,35 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) ret = gfs2_trans_begin(sdp, 0, revokes); if (ret) return; - __gfs2_ail_flush(gl, fsync); + __gfs2_ail_flush(gl, fsync, revokes); gfs2_trans_end(sdp); - gfs2_log_flush(sdp, NULL); + gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | + GFS2_LFC_AIL_FLUSH); +} + +/** + * gfs2_rgrp_metasync - sync out the metadata of a resource group + * @gl: the glock protecting the resource group + * + */ + +static int gfs2_rgrp_metasync(struct gfs2_glock *gl) +{ + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct address_space *metamapping = gfs2_aspace(sdp); + struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); + const unsigned bsize = sdp->sd_sb.sb_bsize; + loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK; + loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1; + int error; + + filemap_fdatawrite_range(metamapping, start, end); + error = filemap_fdatawait_range(metamapping, start, end); + WARN_ON_ONCE(error && !gfs2_withdrawn(sdp)); + mapping_set_error(metamapping, error); + if (error) + gfs2_io_error(sdp); + return error; } /** @@ -120,30 +186,26 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) * * Called when demoting or unlocking an EX glock. We must flush * to disk all dirty buffers/pages relating to this glock, and must not - * not return to caller to demote/unlock the glock until I/O is complete. + * return to caller to demote/unlock the glock until I/O is complete. */ -static void rgrp_go_sync(struct gfs2_glock *gl) +static int rgrp_go_sync(struct gfs2_glock *gl) { - struct address_space *metamapping = gfs2_glock2aspace(gl); - struct gfs2_rgrpd *rgd; + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); int error; - if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) - return; + if (!rgd || !test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) + return 0; GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); - gfs2_log_flush(gl->gl_sbd, gl); - filemap_fdatawrite(metamapping); - error = filemap_fdatawait(metamapping); - mapping_set_error(metamapping, error); - gfs2_ail_empty_gl(gl); - - spin_lock(&gl->gl_spin); - rgd = gl->gl_object; - if (rgd) - gfs2_free_clones(rgd); - spin_unlock(&gl->gl_spin); + gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | + GFS2_LFC_RGRP_GO_SYNC); + error = gfs2_rgrp_metasync(gl); + if (!error) + error = gfs2_ail_empty_gl(gl); + gfs2_free_clones(rgd); + return error; } /** @@ -158,64 +220,136 @@ static void rgrp_go_sync(struct gfs2_glock *gl) static void rgrp_go_inval(struct gfs2_glock *gl, int flags) { - struct address_space *mapping = gfs2_glock2aspace(gl); + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct address_space *mapping = gfs2_aspace(sdp); + struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); + const unsigned bsize = sdp->sd_sb.sb_bsize; + loff_t start, end; + if (!rgd) + return; + start = (rgd->rd_addr * bsize) & PAGE_MASK; + end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1; + gfs2_rgrp_brelse(rgd); WARN_ON_ONCE(!(flags & DIO_METADATA)); - gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); - truncate_inode_pages(mapping, 0); + gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); + truncate_inode_pages_range(mapping, start, end); +} - if (gl->gl_object) { - struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object; - rgd->rd_flags &= ~GFS2_RDF_UPTODATE; - } +static void gfs2_rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl, + const char *fs_id_buf) +{ + struct gfs2_rgrpd *rgd = gl->gl_object; + + if (rgd) + gfs2_rgrp_dump(seq, rgd, fs_id_buf); +} + +static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl) +{ + struct gfs2_inode *ip; + + spin_lock(&gl->gl_lockref.lock); + ip = gl->gl_object; + if (ip) + set_bit(GIF_GLOP_PENDING, &ip->i_flags); + spin_unlock(&gl->gl_lockref.lock); + return ip; +} + +struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl) +{ + struct gfs2_rgrpd *rgd; + + spin_lock(&gl->gl_lockref.lock); + rgd = gl->gl_object; + spin_unlock(&gl->gl_lockref.lock); + + return rgd; +} + +static void gfs2_clear_glop_pending(struct gfs2_inode *ip) +{ + if (!ip) + return; + + clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags); + wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING); } /** - * inode_go_sync - Sync the dirty data and/or metadata for an inode glock + * gfs2_inode_metasync - sync out the metadata of an inode * @gl: the glock protecting the inode * */ - -static void inode_go_sync(struct gfs2_glock *gl) +int gfs2_inode_metasync(struct gfs2_glock *gl) { - struct gfs2_inode *ip = gl->gl_object; struct address_space *metamapping = gfs2_glock2aspace(gl); int error; - if (ip && !S_ISREG(ip->i_inode.i_mode)) - ip = NULL; - if (ip && test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) - unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); + filemap_fdatawrite(metamapping); + error = filemap_fdatawait(metamapping); + if (error) + gfs2_io_error(gl->gl_name.ln_sbd); + return error; +} + +/** + * inode_go_sync - Sync the dirty metadata of an inode + * @gl: the glock protecting the inode + * + */ + +static int inode_go_sync(struct gfs2_glock *gl) +{ + struct gfs2_inode *ip = gfs2_glock2inode(gl); + int isreg = ip && S_ISREG(ip->i_inode.i_mode); + struct address_space *metamapping = gfs2_glock2aspace(gl); + int error = 0, ret; + + if (isreg) { + if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) + unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); + inode_dio_wait(&ip->i_inode); + } if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) - return; + goto out; GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); - gfs2_log_flush(gl->gl_sbd, gl); + gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | + GFS2_LFC_INODE_GO_SYNC); filemap_fdatawrite(metamapping); - if (ip) { + if (isreg) { struct address_space *mapping = ip->i_inode.i_mapping; filemap_fdatawrite(mapping); error = filemap_fdatawait(mapping); mapping_set_error(mapping, error); } - error = filemap_fdatawait(metamapping); - mapping_set_error(metamapping, error); - gfs2_ail_empty_gl(gl); + ret = gfs2_inode_metasync(gl); + if (!error) + error = ret; + ret = gfs2_ail_empty_gl(gl); + if (!error) + error = ret; /* * Writeback of the data mapping may cause the dirty flag to be set * so we have to clear it again here. */ - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); clear_bit(GLF_DIRTY, &gl->gl_flags); + +out: + gfs2_clear_glop_pending(ip); + return error; } /** * inode_go_inval - prepare a inode glock to be released * @gl: the glock * @flags: - * - * Normally we invlidate everything, but if we are moving into + * + * Normally we invalidate everything, but if we are moving into * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we * can keep hold of the metadata, since it won't have changed. * @@ -223,110 +357,78 @@ static void inode_go_sync(struct gfs2_glock *gl) static void inode_go_inval(struct gfs2_glock *gl, int flags) { - struct gfs2_inode *ip = gl->gl_object; + struct gfs2_inode *ip = gfs2_glock2inode(gl); - gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count)); + gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count)); if (flags & DIO_METADATA) { struct address_space *mapping = gfs2_glock2aspace(gl); truncate_inode_pages(mapping, 0); if (ip) { - set_bit(GIF_INVALID, &ip->i_flags); + set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags); forget_all_cached_acls(&ip->i_inode); + security_inode_invalidate_secctx(&ip->i_inode); gfs2_dir_hash_inval(ip); } } - if (ip == GFS2_I(gl->gl_sbd->sd_rindex)) { - gfs2_log_flush(gl->gl_sbd, NULL); - gl->gl_sbd->sd_rindex_uptodate = 0; + if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) { + gfs2_log_flush(gl->gl_name.ln_sbd, NULL, + GFS2_LOG_HEAD_FLUSH_NORMAL | + GFS2_LFC_INODE_GO_INVAL); + gl->gl_name.ln_sbd->sd_rindex_uptodate = 0; } if (ip && S_ISREG(ip->i_inode.i_mode)) truncate_inode_pages(ip->i_inode.i_mapping, 0); -} - -/** - * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock - * @gl: the glock - * - * Returns: 1 if it's ok - */ - -static int inode_go_demote_ok(const struct gfs2_glock *gl) -{ - struct gfs2_sbd *sdp = gl->gl_sbd; - struct gfs2_holder *gh; - - if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) - return 0; - - if (!list_empty(&gl->gl_holders)) { - gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); - if (gh->gh_list.next != &gl->gl_holders) - return 0; - } - - return 1; -} -/** - * gfs2_set_nlink - Set the inode's link count based on on-disk info - * @inode: The inode in question - * @nlink: The link count - * - * If the link count has hit zero, it must never be raised, whatever the - * on-disk inode might say. When new struct inodes are created the link - * count is set to 1, so that we can safely use this test even when reading - * in on disk information for the first time. - */ - -static void gfs2_set_nlink(struct inode *inode, u32 nlink) -{ - /* - * We will need to review setting the nlink count here in the - * light of the forthcoming ro bind mount work. This is a reminder - * to do that. - */ - if ((inode->i_nlink != nlink) && (inode->i_nlink != 0)) { - if (nlink == 0) - clear_nlink(inode); - else - set_nlink(inode, nlink); - } + gfs2_clear_glop_pending(ip); } static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) { + struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); const struct gfs2_dinode *str = buf; - struct timespec atime; + struct timespec64 atime, iatime; u16 height, depth; + umode_t mode = be32_to_cpu(str->di_mode); + struct inode *inode = &ip->i_inode; + bool is_new = inode_state_read_once(inode) & I_NEW; - if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) - goto corrupt; + if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) { + gfs2_consist_inode(ip); + return -EIO; + } + if (unlikely(!is_new && inode_wrong_type(inode, mode))) { + gfs2_consist_inode(ip); + return -EIO; + } ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino); - ip->i_inode.i_mode = be32_to_cpu(str->di_mode); - ip->i_inode.i_rdev = 0; - switch (ip->i_inode.i_mode & S_IFMT) { - case S_IFBLK: - case S_IFCHR: - ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major), - be32_to_cpu(str->di_minor)); - break; - }; - - i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid)); - i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid)); - gfs2_set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink)); - i_size_write(&ip->i_inode, be64_to_cpu(str->di_size)); - gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks)); + inode->i_mode = mode; + if (is_new) { + inode->i_rdev = 0; + switch (mode & S_IFMT) { + case S_IFBLK: + case S_IFCHR: + inode->i_rdev = MKDEV(be32_to_cpu(str->di_major), + be32_to_cpu(str->di_minor)); + break; + } + } + + i_uid_write(inode, be32_to_cpu(str->di_uid)); + i_gid_write(inode, be32_to_cpu(str->di_gid)); + set_nlink(inode, be32_to_cpu(str->di_nlink)); + i_size_write(inode, be64_to_cpu(str->di_size)); + gfs2_set_inode_blocks(inode, be64_to_cpu(str->di_blocks)); atime.tv_sec = be64_to_cpu(str->di_atime); atime.tv_nsec = be32_to_cpu(str->di_atime_nsec); - if (timespec_compare(&ip->i_inode.i_atime, &atime) < 0) - ip->i_inode.i_atime = atime; - ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime); - ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec); - ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime); - ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec); + iatime = inode_get_atime(inode); + if (timespec64_compare(&iatime, &atime) < 0) + inode_set_atime_to_ts(inode, atime); + inode_set_mtime(inode, be64_to_cpu(str->di_mtime), + be32_to_cpu(str->di_mtime_nsec)); + inode_set_ctime(inode, be64_to_cpu(str->di_ctime), + be32_to_cpu(str->di_ctime_nsec)); ip->i_goal = be64_to_cpu(str->di_goal_meta); ip->i_generation = be64_to_cpu(str->di_generation); @@ -334,25 +436,35 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) ip->i_diskflags = be32_to_cpu(str->di_flags); ip->i_eattr = be64_to_cpu(str->di_eattr); /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */ - gfs2_set_inode_flags(&ip->i_inode); + gfs2_set_inode_flags(inode); height = be16_to_cpu(str->di_height); - if (unlikely(height > GFS2_MAX_META_HEIGHT)) - goto corrupt; + if (unlikely(height > sdp->sd_max_height)) { + gfs2_consist_inode(ip); + return -EIO; + } ip->i_height = (u8)height; depth = be16_to_cpu(str->di_depth); - if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) - goto corrupt; + if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) { + gfs2_consist_inode(ip); + return -EIO; + } + if ((ip->i_diskflags & GFS2_DIF_EXHASH) && + depth < ilog2(sdp->sd_hash_ptrs)) { + gfs2_consist_inode(ip); + return -EIO; + } ip->i_depth = (u8)depth; ip->i_entries = be32_to_cpu(str->di_entries); - if (S_ISREG(ip->i_inode.i_mode)) - gfs2_set_aops(&ip->i_inode); + if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip)) { + gfs2_consist_inode(ip); + return -EIO; + } + if (S_ISREG(inode->i_mode)) + gfs2_set_aops(inode); return 0; -corrupt: - gfs2_consist_inode(ip); - return -EIO; } /** @@ -362,7 +474,7 @@ corrupt: * Returns: errno */ -int gfs2_inode_refresh(struct gfs2_inode *ip) +static int gfs2_inode_refresh(struct gfs2_inode *ip) { struct buffer_head *dibh; int error; @@ -373,45 +485,49 @@ int gfs2_inode_refresh(struct gfs2_inode *ip) error = gfs2_dinode_in(ip, dibh->b_data); brelse(dibh); - clear_bit(GIF_INVALID, &ip->i_flags); - return error; } /** - * inode_go_lock - operation done after an inode lock is locked by a process - * @gl: the glock - * @flags: + * inode_go_instantiate - read in an inode if necessary + * @gl: The glock * * Returns: errno */ -static int inode_go_lock(struct gfs2_holder *gh) +static int inode_go_instantiate(struct gfs2_glock *gl) +{ + struct gfs2_inode *ip = gl->gl_object; + struct gfs2_glock *io_gl; + int error; + + if (!ip) /* no inode to populate - read it in later */ + return 0; + + error = gfs2_inode_refresh(ip); + if (error) + return error; + io_gl = ip->i_iopen_gh.gh_gl; + io_gl->gl_no_formal_ino = ip->i_no_formal_ino; + return 0; +} + +static int inode_go_held(struct gfs2_holder *gh) { struct gfs2_glock *gl = gh->gh_gl; - struct gfs2_sbd *sdp = gl->gl_sbd; struct gfs2_inode *ip = gl->gl_object; int error = 0; - if (!ip || (gh->gh_flags & GL_SKIP)) + if (!ip) /* no inode to populate - read it in later */ return 0; - if (test_bit(GIF_INVALID, &ip->i_flags)) { - error = gfs2_inode_refresh(ip); - if (error) - return error; - } + if (gh->gh_state != LM_ST_DEFERRED) + inode_dio_wait(&ip->i_inode); if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && (gl->gl_state == LM_ST_EXCLUSIVE) && - (gh->gh_state == LM_ST_EXCLUSIVE)) { - spin_lock(&sdp->sd_trunc_lock); - if (list_empty(&ip->i_trunc_list)) - list_add(&sdp->sd_trunc_list, &ip->i_trunc_list); - spin_unlock(&sdp->sd_trunc_lock); - wake_up(&sdp->sd_quota_wait); - return 1; - } + (gh->gh_state == LM_ST_EXCLUSIVE)) + error = gfs2_truncatei_resume(ip); return error; } @@ -419,53 +535,68 @@ static int inode_go_lock(struct gfs2_holder *gh) /** * inode_go_dump - print information about an inode * @seq: The iterator - * @ip: the inode + * @gl: The glock + * @fs_id_buf: file system id (may be empty) * - * Returns: 0 on success, -ENOBUFS when we run out of space */ -static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) +static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl, + const char *fs_id_buf) { - const struct gfs2_inode *ip = gl->gl_object; + struct gfs2_inode *ip = gl->gl_object; + const struct inode *inode = &ip->i_inode; + if (ip == NULL) - return 0; - gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n", + return; + + gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu " + "p:%lu\n", fs_id_buf, (unsigned long long)ip->i_no_formal_ino, (unsigned long long)ip->i_no_addr, - IF2DT(ip->i_inode.i_mode), ip->i_flags, + IF2DT(inode->i_mode), ip->i_flags, (unsigned int)ip->i_diskflags, - (unsigned long long)i_size_read(&ip->i_inode)); - return 0; + (unsigned long long)i_size_read(inode), + inode->i_data.nrpages); } /** - * trans_go_sync - promote/demote the transaction glock + * freeze_go_callback - A cluster node is requesting a freeze * @gl: the glock - * @state: the requested state - * @flags: - * + * @remote: true if this came from a different cluster node */ -static void trans_go_sync(struct gfs2_glock *gl) +static void freeze_go_callback(struct gfs2_glock *gl, bool remote) { - struct gfs2_sbd *sdp = gl->gl_sbd; + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct super_block *sb = sdp->sd_vfs; - if (gl->gl_state != LM_ST_UNLOCKED && - test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { - gfs2_meta_syncfs(sdp); - gfs2_log_shutdown(sdp); + if (!remote || + (gl->gl_state != LM_ST_SHARED && + gl->gl_state != LM_ST_UNLOCKED) || + gl->gl_demote_state != LM_ST_UNLOCKED) + return; + + /* + * Try to get an active super block reference to prevent racing with + * unmount (see super_trylock_shared()). But note that unmount isn't + * the only place where a write lock on s_umount is taken, and we can + * fail here because of things like remount as well. + */ + if (down_read_trylock(&sb->s_umount)) { + atomic_inc(&sb->s_active); + up_read(&sb->s_umount); + if (!queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work)) + deactivate_super(sb); } } /** - * trans_go_xmote_bh - After promoting/demoting the transaction glock + * freeze_go_xmote_bh - After promoting/demoting the freeze glock * @gl: the glock - * */ - -static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) +static int freeze_go_xmote_bh(struct gfs2_glock *gl) { - struct gfs2_sbd *sdp = gl->gl_sbd; + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); struct gfs2_glock *j_gl = ip->i_gl; struct gfs2_log_header_host head; @@ -475,51 +606,36 @@ static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh) j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); error = gfs2_find_jhead(sdp->sd_jdesc, &head); - if (error) - gfs2_consist(sdp); - if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) - gfs2_consist(sdp); - - /* Initialize some head of the log stuff */ - if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) { - sdp->sd_log_sequence = head.lh_sequence + 1; - gfs2_log_pointers_init(sdp, head.lh_blkno); - } + if (gfs2_assert_withdraw(sdp, !error)) + return error; + if (gfs2_assert_withdraw(sdp, head.lh_flags & + GFS2_LOG_HEAD_UNMOUNT)) + return -EIO; + gfs2_log_pointers_init(sdp, &head); } return 0; } /** - * trans_go_demote_ok - * @gl: the glock - * - * Always returns 0 - */ - -static int trans_go_demote_ok(const struct gfs2_glock *gl) -{ - return 0; -} - -/** * iopen_go_callback - schedule the dcache entry for the inode to be deleted * @gl: the glock + * @remote: true if this came from a different cluster node * - * gl_spin lock is held while calling this + * gl_lockref.lock lock is held while calling this */ static void iopen_go_callback(struct gfs2_glock *gl, bool remote) { - struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; - struct gfs2_sbd *sdp = gl->gl_sbd; + struct gfs2_inode *ip = gl->gl_object; + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; - if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY)) + if (!remote || test_bit(SDF_KILL, &sdp->sd_flags)) return; if (gl->gl_demote_state == LM_ST_UNLOCKED && gl->gl_state == LM_ST_SHARED && ip) { - gfs2_glock_hold(gl); - if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0) - gfs2_glock_put_nolock(gl); + gl->gl_lockref.count++; + if (!gfs2_queue_try_to_evict(gl)) + gl->gl_lockref.count--; } } @@ -530,33 +646,33 @@ const struct gfs2_glock_operations gfs2_meta_glops = { const struct gfs2_glock_operations gfs2_inode_glops = { .go_sync = inode_go_sync, .go_inval = inode_go_inval, - .go_demote_ok = inode_go_demote_ok, - .go_lock = inode_go_lock, + .go_instantiate = inode_go_instantiate, + .go_held = inode_go_held, .go_dump = inode_go_dump, .go_type = LM_TYPE_INODE, - .go_flags = GLOF_ASPACE, + .go_flags = GLOF_ASPACE | GLOF_LVB, }; const struct gfs2_glock_operations gfs2_rgrp_glops = { .go_sync = rgrp_go_sync, .go_inval = rgrp_go_inval, - .go_lock = gfs2_rgrp_go_lock, - .go_unlock = gfs2_rgrp_go_unlock, - .go_dump = gfs2_rgrp_dump, + .go_instantiate = gfs2_rgrp_go_instantiate, + .go_dump = gfs2_rgrp_go_dump, .go_type = LM_TYPE_RGRP, - .go_flags = GLOF_ASPACE | GLOF_LVB, + .go_flags = GLOF_LVB, }; -const struct gfs2_glock_operations gfs2_trans_glops = { - .go_sync = trans_go_sync, - .go_xmote_bh = trans_go_xmote_bh, - .go_demote_ok = trans_go_demote_ok, +const struct gfs2_glock_operations gfs2_freeze_glops = { + .go_xmote_bh = freeze_go_xmote_bh, + .go_callback = freeze_go_callback, .go_type = LM_TYPE_NONDISK, }; const struct gfs2_glock_operations gfs2_iopen_glops = { .go_type = LM_TYPE_IOPEN, .go_callback = iopen_go_callback, + .go_dump = inode_go_dump, + .go_subclass = 1, }; const struct gfs2_glock_operations gfs2_flock_glops = { |
