diff options
Diffstat (limited to 'fs/gfs2/lock_dlm.c')
| -rw-r--r-- | fs/gfs2/lock_dlm.c | 247 |
1 files changed, 147 insertions, 100 deletions
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 54911294687c..b8d249925395 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -15,9 +15,6 @@ #include <linux/sched/signal.h> #include "incore.h" -#include "glock.h" -#include "glops.h" -#include "recovery.h" #include "util.h" #include "sys.h" #include "trace_gfs2.h" @@ -58,6 +55,7 @@ static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index, /** * gfs2_update_reply_times - Update locking statistics * @gl: The glock to update + * @blocking: The operation may have been blocking * * This assumes that gl->gl_dstamp has been set earlier. * @@ -72,12 +70,12 @@ static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index, * TRY_1CB flags are set are classified as non-blocking. All * other DLM requests are counted as (potentially) blocking. */ -static inline void gfs2_update_reply_times(struct gfs2_glock *gl) +static inline void gfs2_update_reply_times(struct gfs2_glock *gl, + bool blocking) { struct gfs2_pcpu_lkstats *lks; const unsigned gltype = gl->gl_name.ln_type; - unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ? - GFS2_LKS_SRTTB : GFS2_LKS_SRTT; + unsigned index = blocking ? GFS2_LKS_SRTTB : GFS2_LKS_SRTT; s64 rtt; preempt_disable(); @@ -119,9 +117,18 @@ static inline void gfs2_update_request_times(struct gfs2_glock *gl) static void gdlm_ast(void *arg) { struct gfs2_glock *gl = arg; - unsigned ret = gl->gl_state; + bool blocking; + unsigned ret; + + blocking = test_bit(GLF_BLOCKING, &gl->gl_flags); + gfs2_update_reply_times(gl, blocking); + clear_bit(GLF_BLOCKING, &gl->gl_flags); + + /* If the glock is dead, we only react to a dlm_unlock() reply. */ + if (__lockref_is_dead(&gl->gl_lockref) && + gl->gl_lksb.sb_status != -DLM_EUNLOCK) + return; - gfs2_update_reply_times(gl); BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED); if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr) @@ -129,18 +136,19 @@ static void gdlm_ast(void *arg) switch (gl->gl_lksb.sb_status) { case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */ - if (gl->gl_ops->go_free) - gl->gl_ops->go_free(gl); gfs2_glock_free(gl); return; case -DLM_ECANCEL: /* Cancel while getting lock */ - ret |= LM_OUT_CANCELED; + ret = LM_OUT_CANCELED; goto out; case -EAGAIN: /* Try lock fails */ + ret = LM_OUT_TRY_AGAIN; + goto out; case -EDEADLK: /* Deadlock detected */ + ret = LM_OUT_DEADLOCK; goto out; case -ETIMEDOUT: /* Canceled due to timeout */ - ret |= LM_OUT_ERROR; + ret = LM_OUT_ERROR; goto out; case 0: /* Success */ break; @@ -149,20 +157,22 @@ static void gdlm_ast(void *arg) } ret = gl->gl_req; - if (gl->gl_lksb.sb_flags & DLM_SBF_ALTMODE) { - if (gl->gl_req == LM_ST_SHARED) - ret = LM_ST_DEFERRED; - else if (gl->gl_req == LM_ST_DEFERRED) - ret = LM_ST_SHARED; - else - BUG(); - } - set_bit(GLF_INITIAL, &gl->gl_flags); + /* + * The GLF_INITIAL flag is initially set for new glocks. Upon the + * first successful new (non-conversion) request, we clear this flag to + * indicate that a DLM lock exists and that gl->gl_lksb.sb_lkid is the + * identifier to use for identifying it. + * + * Any failed initial requests do not create a DLM lock, so we ignore + * the gl->gl_lksb.sb_lkid values that come with such requests. + */ + + clear_bit(GLF_INITIAL, &gl->gl_flags); gfs2_glock_complete(gl, ret); return; out: - if (!test_bit(GLF_INITIAL, &gl->gl_flags)) + if (test_bit(GLF_INITIAL, &gl->gl_flags)) gl->gl_lksb.sb_lkid = 0; gfs2_glock_complete(gl, ret); } @@ -171,6 +181,9 @@ static void gdlm_bast(void *arg, int mode) { struct gfs2_glock *gl = arg; + if (__lockref_is_dead(&gl->gl_lockref)) + return; + switch (mode) { case DLM_LOCK_EX: gfs2_glock_cb(gl, LM_ST_UNLOCKED); @@ -206,8 +219,21 @@ static int make_mode(struct gfs2_sbd *sdp, const unsigned int lmstate) return -1; } +/* Taken from fs/dlm/lock.c. */ + +static bool middle_conversion(int cur, int req) +{ + return (cur == DLM_LOCK_PR && req == DLM_LOCK_CW) || + (cur == DLM_LOCK_CW && req == DLM_LOCK_PR); +} + +static bool down_conversion(int cur, int req) +{ + return !middle_conversion(cur, req) && req < cur; +} + static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags, - const int req) + const int req, bool blocking) { u32 lkf = 0; @@ -222,23 +248,16 @@ static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags, lkf |= DLM_LKF_NOQUEUEBAST; } - if (gfs_flags & LM_FLAG_PRIORITY) { - lkf |= DLM_LKF_NOORDER; - lkf |= DLM_LKF_HEADQUE; - } - - if (gfs_flags & LM_FLAG_ANY) { - if (req == DLM_LOCK_PR) - lkf |= DLM_LKF_ALTCW; - else if (req == DLM_LOCK_CW) - lkf |= DLM_LKF_ALTPR; - else - BUG(); - } - - if (gl->gl_lksb.sb_lkid != 0) { + if (!test_bit(GLF_INITIAL, &gl->gl_flags)) { lkf |= DLM_LKF_CONVERT; - if (test_bit(GLF_BLOCKING, &gl->gl_flags)) + + /* + * The DLM_LKF_QUECVT flag needs to be set for "first come, + * first served" semantics, but it must only be set for + * "upward" lock conversions or else DLM will reject the + * request as invalid. + */ + if (blocking) lkf |= DLM_LKF_QUECVT; } @@ -258,31 +277,43 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state, unsigned int flags) { struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; - int req; + bool blocking; + int cur, req; u32 lkf; char strname[GDLM_STRNAME_BYTES] = ""; int error; + gl->gl_req = req_state; + cur = make_mode(gl->gl_name.ln_sbd, gl->gl_state); req = make_mode(gl->gl_name.ln_sbd, req_state); - lkf = make_flags(gl, flags, req); + blocking = !down_conversion(cur, req) && + !(flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)); + lkf = make_flags(gl, flags, req, blocking); + if (blocking) + set_bit(GLF_BLOCKING, &gl->gl_flags); gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); - if (gl->gl_lksb.sb_lkid) { - gfs2_update_request_times(gl); - } else { + if (test_bit(GLF_INITIAL, &gl->gl_flags)) { memset(strname, ' ', GDLM_STRNAME_BYTES - 1); strname[GDLM_STRNAME_BYTES - 1] = '\0'; gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type); gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number); gl->gl_dstamp = ktime_get_real(); + } else { + gfs2_update_request_times(gl); } /* * Submit the actual lock request. */ again: - error = dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname, - GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast); + down_read(&ls->ls_sem); + error = -ENODEV; + if (likely(ls->ls_dlm != NULL)) { + error = dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname, + GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast); + } + up_read(&ls->ls_sem); if (error == -EBUSY) { msleep(20); goto again; @@ -294,54 +325,75 @@ static void gdlm_put_lock(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct lm_lockstruct *ls = &sdp->sd_lockstruct; + uint32_t flags = 0; int error; - if (gl->gl_lksb.sb_lkid == 0) - goto out_free; + BUG_ON(!__lockref_is_dead(&gl->gl_lockref)); + + if (test_bit(GLF_INITIAL, &gl->gl_flags)) { + gfs2_glock_free(gl); + return; + } - clear_bit(GLF_BLOCKING, &gl->gl_flags); gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_update_request_times(gl); - /* don't want to call dlm if we've unmounted the lock protocol */ - if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) - goto out_free; - /* don't want to skip dlm_unlock writing the lvb when lock has one */ + /* + * When the lockspace is released, all remaining glocks will be + * unlocked automatically. This is more efficient than unlocking them + * individually, but when the lock is held in DLM_LOCK_EX or + * DLM_LOCK_PW mode, the lock value block (LVB) would be lost. + */ if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && - !gl->gl_lksb.sb_lvbptr) - goto out_free; + (!gl->gl_lksb.sb_lvbptr || gl->gl_state != LM_ST_EXCLUSIVE)) { + gfs2_glock_free_later(gl); + return; + } + + if (gl->gl_lksb.sb_lvbptr) + flags |= DLM_LKF_VALBLK; again: - error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, - NULL, gl); + down_read(&ls->ls_sem); + error = -ENODEV; + if (likely(ls->ls_dlm != NULL)) { + error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, flags, + NULL, gl); + } + up_read(&ls->ls_sem); if (error == -EBUSY) { msleep(20); goto again; } + if (error == -ENODEV) { + gfs2_glock_free(gl); + return; + } + if (error) { fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n", gl->gl_name.ln_type, (unsigned long long)gl->gl_name.ln_number, error); } - return; - -out_free: - gfs2_glock_free(gl); } static void gdlm_cancel(struct gfs2_glock *gl) { struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; - dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl); + + down_read(&ls->ls_sem); + if (likely(ls->ls_dlm != NULL)) { + dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl); + } + up_read(&ls->ls_sem); } /* * dlm/gfs2 recovery coordination using dlm_recover callbacks * - * 0. gfs2 checks for another cluster node withdraw, needing journal replay * 1. dlm_controld sees lockspace members change * 2. dlm_controld blocks dlm-kernel locking activity * 3. dlm_controld within dlm-kernel notifies gfs2 (recover_prep) @@ -516,7 +568,11 @@ static int sync_unlock(struct gfs2_sbd *sdp, struct dlm_lksb *lksb, char *name) struct lm_lockstruct *ls = &sdp->sd_lockstruct; int error; - error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls); + down_read(&ls->ls_sem); + error = -ENODEV; + if (likely(ls->ls_dlm != NULL)) + error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls); + up_read(&ls->ls_sem); if (error) { fs_err(sdp, "%s lkid %x error %d\n", name, lksb->sb_lkid, error); @@ -543,9 +599,14 @@ static int sync_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags, memset(strname, 0, GDLM_STRNAME_BYTES); snprintf(strname, GDLM_STRNAME_BYTES, "%8x%16x", LM_TYPE_NONDISK, num); - error = dlm_lock(ls->ls_dlm, mode, lksb, flags, - strname, GDLM_STRNAME_BYTES - 1, - 0, sync_wait_cb, ls, NULL); + down_read(&ls->ls_sem); + error = -ENODEV; + if (likely(ls->ls_dlm != NULL)) { + error = dlm_lock(ls->ls_dlm, mode, lksb, flags, + strname, GDLM_STRNAME_BYTES - 1, + 0, sync_wait_cb, ls, NULL); + } + up_read(&ls->ls_sem); if (error) { fs_err(sdp, "%s lkid %x flags %x mode %d error %d\n", name, lksb->sb_lkid, flags, mode, error); @@ -590,28 +651,6 @@ static int control_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags) &ls->ls_control_lksb, "control_lock"); } -/** - * remote_withdraw - react to a node withdrawing from the file system - * @sdp: The superblock - */ -static void remote_withdraw(struct gfs2_sbd *sdp) -{ - struct gfs2_jdesc *jd; - int ret = 0, count = 0; - - list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { - if (jd->jd_jid == sdp->sd_lockstruct.ls_jid) - continue; - ret = gfs2_recover_journal(jd, true); - if (ret) - break; - count++; - } - - /* Now drop the additional reference we acquired */ - fs_err(sdp, "Journals checked: %d, ret = %d.\n", count, ret); -} - static void gfs2_control_func(struct work_struct *work) { struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work); @@ -622,13 +661,6 @@ static void gfs2_control_func(struct work_struct *work) int recover_size; int i, error; - /* First check for other nodes that may have done a withdraw. */ - if (test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags)) { - remote_withdraw(sdp); - clear_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags); - return; - } - spin_lock(&ls->ls_recover_spin); /* * No MOUNT_DONE means we're still mounting; control_mount() @@ -952,14 +984,15 @@ locks_done: if (sdp->sd_args.ar_spectator) { fs_info(sdp, "Recovery is required. Waiting for a " "non-spectator to mount.\n"); + spin_unlock(&ls->ls_recover_spin); msleep_interruptible(1000); } else { fs_info(sdp, "control_mount wait1 block %u start %u " "mount %u lvb %u flags %lx\n", block_gen, start_gen, mount_gen, lvb_gen, ls->ls_recover_flags); + spin_unlock(&ls->ls_recover_spin); } - spin_unlock(&ls->ls_recover_spin); goto restart; } @@ -1271,6 +1304,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table) */ INIT_DELAYED_WORK(&sdp->sd_control_work, gfs2_control_func); + ls->ls_dlm = NULL; spin_lock_init(&ls->ls_recover_spin); ls->ls_recover_flags = 0; ls->ls_recover_mount = 0; @@ -1305,6 +1339,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table) * create/join lockspace */ + init_rwsem(&ls->ls_sem); error = dlm_new_lockspace(fsname, cluster, flags, GDLM_LVB_SIZE, &gdlm_lockspace_ops, sdp, &ops_result, &ls->ls_dlm); @@ -1348,7 +1383,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table) return 0; fail_release: - dlm_release_lockspace(ls->ls_dlm, 2); + dlm_release_lockspace(ls->ls_dlm, DLM_RELEASE_NORMAL); fail_free: free_recover_size(ls); fail: @@ -1368,7 +1403,15 @@ static void gdlm_first_done(struct gfs2_sbd *sdp) fs_err(sdp, "mount first_done error %d\n", error); } -static void gdlm_unmount(struct gfs2_sbd *sdp) +/* + * gdlm_unmount - release our lockspace + * @sdp: the superblock + * @clean: Indicates whether or not the remaining nodes in the cluster should + * perform recovery. Recovery is necessary when a node withdraws and + * its journal remains dirty. Recovery isn't necessary when a node + * cleanly unmounts a filesystem. + */ +static void gdlm_unmount(struct gfs2_sbd *sdp, bool clean) { struct lm_lockstruct *ls = &sdp->sd_lockstruct; @@ -1384,10 +1427,14 @@ static void gdlm_unmount(struct gfs2_sbd *sdp) /* mounted_lock and control_lock will be purged in dlm recovery */ release: + down_write(&ls->ls_sem); if (ls->ls_dlm) { - dlm_release_lockspace(ls->ls_dlm, 2); + dlm_release_lockspace(ls->ls_dlm, + clean ? DLM_RELEASE_NORMAL : + DLM_RELEASE_RECOVER); ls->ls_dlm = NULL; } + up_write(&ls->ls_sem); free_recover_size(ls); } |
