diff options
Diffstat (limited to 'fs/dlm')
38 files changed, 7493 insertions, 5544 deletions
diff --git a/fs/dlm/Kconfig b/fs/dlm/Kconfig index e4242c3f8486..b46165df5a91 100644 --- a/fs/dlm/Kconfig +++ b/fs/dlm/Kconfig @@ -1,8 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only menuconfig DLM tristate "Distributed Lock Manager (DLM)" depends on INET depends on SYSFS && CONFIGFS_FS && (IPV6 || IPV6=n) - select IP_SCTP help A general purpose distributed lock manager for kernel or userspace applications. diff --git a/fs/dlm/Makefile b/fs/dlm/Makefile index ca1c9124c8ce..5a471af1d1fe 100644 --- a/fs/dlm/Makefile +++ b/fs/dlm/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_DLM) += dlm.o dlm-y := ast.o \ config.o \ @@ -8,7 +9,6 @@ dlm-y := ast.o \ member.o \ memory.o \ midcomms.o \ - netlink.o \ lowcomms.o \ plock.o \ rcom.o \ diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c index 27a6ba9aaeec..0fe8d80ce5e8 100644 --- a/fs/dlm/ast.c +++ b/fs/dlm/ast.c @@ -1,65 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ +#include <trace/events/dlm.h> + #include "dlm_internal.h" +#include "lvb_table.h" +#include "memory.h" #include "lock.h" #include "user.h" +#include "ast.h" + +static void dlm_run_callback(uint32_t ls_id, uint32_t lkb_id, int8_t mode, + uint32_t flags, uint8_t sb_flags, int sb_status, + struct dlm_lksb *lksb, + void (*astfn)(void *astparam), + void (*bastfn)(void *astparam, int mode), + void *astparam, const char *res_name, + size_t res_length) +{ + if (flags & DLM_CB_BAST) { + trace_dlm_bast(ls_id, lkb_id, mode, res_name, res_length); + bastfn(astparam, mode); + } else if (flags & DLM_CB_CAST) { + trace_dlm_ast(ls_id, lkb_id, sb_flags, sb_status, res_name, + res_length); + lksb->sb_status = sb_status; + lksb->sb_flags = sb_flags; + astfn(astparam); + } +} -static uint64_t dlm_cb_seq; -static DEFINE_SPINLOCK(dlm_cb_seq_spin); +static void dlm_do_callback(struct dlm_callback *cb) +{ + dlm_run_callback(cb->ls_id, cb->lkb_id, cb->mode, cb->flags, + cb->sb_flags, cb->sb_status, cb->lkb_lksb, + cb->astfn, cb->bastfn, cb->astparam, + cb->res_name, cb->res_length); + dlm_free_cb(cb); +} -static void dlm_dump_lkb_callbacks(struct dlm_lkb *lkb) +static void dlm_callback_work(struct work_struct *work) { - int i; - - log_print("last_bast %x %llu flags %x mode %d sb %d %x", - lkb->lkb_id, - (unsigned long long)lkb->lkb_last_bast.seq, - lkb->lkb_last_bast.flags, - lkb->lkb_last_bast.mode, - lkb->lkb_last_bast.sb_status, - lkb->lkb_last_bast.sb_flags); - - log_print("last_cast %x %llu flags %x mode %d sb %d %x", - lkb->lkb_id, - (unsigned long long)lkb->lkb_last_cast.seq, - lkb->lkb_last_cast.flags, - lkb->lkb_last_cast.mode, - lkb->lkb_last_cast.sb_status, - lkb->lkb_last_cast.sb_flags); - - for (i = 0; i < DLM_CALLBACKS_SIZE; i++) { - log_print("cb %x %llu flags %x mode %d sb %d %x", - lkb->lkb_id, - (unsigned long long)lkb->lkb_callbacks[i].seq, - lkb->lkb_callbacks[i].flags, - lkb->lkb_callbacks[i].mode, - lkb->lkb_callbacks[i].sb_status, - lkb->lkb_callbacks[i].sb_flags); - } + struct dlm_callback *cb = container_of(work, struct dlm_callback, work); + + dlm_do_callback(cb); } -int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, - int status, uint32_t sbflags, uint64_t seq) +bool dlm_may_skip_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, + int status, uint32_t sbflags, int *copy_lvb) { - struct dlm_ls *ls = lkb->lkb_resource->res_ls; - uint64_t prev_seq; + struct dlm_rsb *rsb = lkb->lkb_resource; + struct dlm_ls *ls = rsb->res_ls; int prev_mode; - int i, rv; - for (i = 0; i < DLM_CALLBACKS_SIZE; i++) { - if (lkb->lkb_callbacks[i].seq) - continue; + if (copy_lvb) + *copy_lvb = 0; + + if (flags & DLM_CB_BAST) { + /* if cb is a bast, it should be skipped if the blocking mode is + * compatible with the last granted mode + */ + if (lkb->lkb_last_cast_cb_mode != -1) { + if (dlm_modes_compat(mode, lkb->lkb_last_cast_cb_mode)) { + log_debug(ls, "skip %x bast mode %d for cast mode %d", + lkb->lkb_id, mode, + lkb->lkb_last_cast_cb_mode); + return true; + } + } /* * Suppress some redundant basts here, do more on removal. @@ -67,210 +83,130 @@ int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, * is a bast for the same mode or a more restrictive mode. * (the addional > PR check is needed for PR/CW inversion) */ - - if ((i > 0) && (flags & DLM_CB_BAST) && - (lkb->lkb_callbacks[i-1].flags & DLM_CB_BAST)) { - - prev_seq = lkb->lkb_callbacks[i-1].seq; - prev_mode = lkb->lkb_callbacks[i-1].mode; + if (lkb->lkb_last_cb_mode != -1 && + lkb->lkb_last_cb_flags & DLM_CB_BAST) { + prev_mode = lkb->lkb_last_cb_mode; if ((prev_mode == mode) || (prev_mode > mode && prev_mode > DLM_LOCK_PR)) { + log_debug(ls, "skip %x add bast mode %d for bast mode %d", + lkb->lkb_id, mode, prev_mode); + return true; + } + } - log_debug(ls, "skip %x add bast %llu mode %d " - "for bast %llu mode %d", - lkb->lkb_id, - (unsigned long long)seq, - mode, - (unsigned long long)prev_seq, - prev_mode); - rv = 0; - goto out; + lkb->lkb_last_bast_time = ktime_get(); + lkb->lkb_last_bast_cb_mode = mode; + } else if (flags & DLM_CB_CAST) { + if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) { + prev_mode = lkb->lkb_last_cast_cb_mode; + + if (!status && lkb->lkb_lksb->sb_lvbptr && + dlm_lvb_operations[prev_mode + 1][mode + 1]) { + if (copy_lvb) + *copy_lvb = 1; } } - lkb->lkb_callbacks[i].seq = seq; - lkb->lkb_callbacks[i].flags = flags; - lkb->lkb_callbacks[i].mode = mode; - lkb->lkb_callbacks[i].sb_status = status; - lkb->lkb_callbacks[i].sb_flags = (sbflags & 0x000000FF); - rv = 0; - break; + lkb->lkb_last_cast_cb_mode = mode; + lkb->lkb_last_cast_time = ktime_get(); } - if (i == DLM_CALLBACKS_SIZE) { - log_error(ls, "no callbacks %x %llu flags %x mode %d sb %d %x", - lkb->lkb_id, (unsigned long long)seq, - flags, mode, status, sbflags); - dlm_dump_lkb_callbacks(lkb); - rv = -1; - goto out; - } - out: - return rv; + lkb->lkb_last_cb_mode = mode; + lkb->lkb_last_cb_flags = flags; + + return false; } -int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb, - struct dlm_callback *cb, int *resid) +int dlm_get_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, + int status, uint32_t sbflags, + struct dlm_callback **cb) { - int i, rv; + struct dlm_rsb *rsb = lkb->lkb_resource; + struct dlm_ls *ls = rsb->res_ls; - *resid = 0; + *cb = dlm_allocate_cb(); + if (WARN_ON_ONCE(!*cb)) + return -ENOMEM; - if (!lkb->lkb_callbacks[0].seq) { - rv = -ENOENT; - goto out; - } + /* for tracing */ + (*cb)->lkb_id = lkb->lkb_id; + (*cb)->ls_id = ls->ls_global_id; + memcpy((*cb)->res_name, rsb->res_name, rsb->res_length); + (*cb)->res_length = rsb->res_length; - /* oldest undelivered cb is callbacks[0] */ + (*cb)->flags = flags; + (*cb)->mode = mode; + (*cb)->sb_status = status; + (*cb)->sb_flags = (sbflags & 0x000000FF); + (*cb)->lkb_lksb = lkb->lkb_lksb; - memcpy(cb, &lkb->lkb_callbacks[0], sizeof(struct dlm_callback)); - memset(&lkb->lkb_callbacks[0], 0, sizeof(struct dlm_callback)); + return 0; +} - /* shift others down */ +static int dlm_get_queue_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, + int status, uint32_t sbflags, + struct dlm_callback **cb) +{ + int rv; - for (i = 1; i < DLM_CALLBACKS_SIZE; i++) { - if (!lkb->lkb_callbacks[i].seq) - break; - memcpy(&lkb->lkb_callbacks[i-1], &lkb->lkb_callbacks[i], - sizeof(struct dlm_callback)); - memset(&lkb->lkb_callbacks[i], 0, sizeof(struct dlm_callback)); - (*resid)++; - } + rv = dlm_get_cb(lkb, flags, mode, status, sbflags, cb); + if (rv) + return rv; - /* if cb is a bast, it should be skipped if the blocking mode is - compatible with the last granted mode */ - - if ((cb->flags & DLM_CB_BAST) && lkb->lkb_last_cast.seq) { - if (dlm_modes_compat(cb->mode, lkb->lkb_last_cast.mode)) { - cb->flags |= DLM_CB_SKIP; - - log_debug(ls, "skip %x bast %llu mode %d " - "for cast %llu mode %d", - lkb->lkb_id, - (unsigned long long)cb->seq, - cb->mode, - (unsigned long long)lkb->lkb_last_cast.seq, - lkb->lkb_last_cast.mode); - rv = 0; - goto out; - } - } + (*cb)->astfn = lkb->lkb_astfn; + (*cb)->bastfn = lkb->lkb_bastfn; + (*cb)->astparam = lkb->lkb_astparam; + INIT_WORK(&(*cb)->work, dlm_callback_work); - if (cb->flags & DLM_CB_CAST) { - memcpy(&lkb->lkb_last_cast, cb, sizeof(struct dlm_callback)); - lkb->lkb_last_cast_time = ktime_get(); - } - - if (cb->flags & DLM_CB_BAST) { - memcpy(&lkb->lkb_last_bast, cb, sizeof(struct dlm_callback)); - lkb->lkb_last_bast_time = ktime_get(); - } - rv = 0; - out: - return rv; + return 0; } void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, uint32_t sbflags) { - struct dlm_ls *ls = lkb->lkb_resource->res_ls; - uint64_t new_seq, prev_seq; + struct dlm_rsb *rsb = lkb->lkb_resource; + struct dlm_ls *ls = rsb->res_ls; + struct dlm_callback *cb; int rv; - spin_lock(&dlm_cb_seq_spin); - new_seq = ++dlm_cb_seq; - spin_unlock(&dlm_cb_seq_spin); - - if (lkb->lkb_flags & DLM_IFL_USER) { - dlm_user_add_ast(lkb, flags, mode, status, sbflags, new_seq); + if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) { + dlm_user_add_ast(lkb, flags, mode, status, sbflags); return; } - mutex_lock(&lkb->lkb_cb_mutex); - prev_seq = lkb->lkb_callbacks[0].seq; - - rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, new_seq); - if (rv < 0) - goto out; - - if (!prev_seq) { - kref_get(&lkb->lkb_ref); + if (dlm_may_skip_callback(lkb, flags, mode, status, sbflags, NULL)) + return; - if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) { - mutex_lock(&ls->ls_cb_mutex); - list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay); - mutex_unlock(&ls->ls_cb_mutex); + spin_lock_bh(&ls->ls_cb_lock); + if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) { + rv = dlm_get_queue_cb(lkb, flags, mode, status, sbflags, &cb); + if (!rv) + list_add(&cb->list, &ls->ls_cb_delay); + } else { + if (test_bit(LSFL_SOFTIRQ, &ls->ls_flags)) { + dlm_run_callback(ls->ls_global_id, lkb->lkb_id, mode, flags, + sbflags, status, lkb->lkb_lksb, + lkb->lkb_astfn, lkb->lkb_bastfn, + lkb->lkb_astparam, rsb->res_name, + rsb->res_length); } else { - queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); + rv = dlm_get_queue_cb(lkb, flags, mode, status, sbflags, &cb); + if (!rv) + queue_work(ls->ls_callback_wq, &cb->work); } } - out: - mutex_unlock(&lkb->lkb_cb_mutex); -} - -void dlm_callback_work(struct work_struct *work) -{ - struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work); - struct dlm_ls *ls = lkb->lkb_resource->res_ls; - void (*castfn) (void *astparam); - void (*bastfn) (void *astparam, int mode); - struct dlm_callback callbacks[DLM_CALLBACKS_SIZE]; - int i, rv, resid; - - memset(&callbacks, 0, sizeof(callbacks)); - - mutex_lock(&lkb->lkb_cb_mutex); - if (!lkb->lkb_callbacks[0].seq) { - /* no callback work exists, shouldn't happen */ - log_error(ls, "dlm_callback_work %x no work", lkb->lkb_id); - dlm_print_lkb(lkb); - dlm_dump_lkb_callbacks(lkb); - } - - for (i = 0; i < DLM_CALLBACKS_SIZE; i++) { - rv = dlm_rem_lkb_callback(ls, lkb, &callbacks[i], &resid); - if (rv < 0) - break; - } - - if (resid) { - /* cbs remain, loop should have removed all, shouldn't happen */ - log_error(ls, "dlm_callback_work %x resid %d", lkb->lkb_id, - resid); - dlm_print_lkb(lkb); - dlm_dump_lkb_callbacks(lkb); - } - mutex_unlock(&lkb->lkb_cb_mutex); - - castfn = lkb->lkb_astfn; - bastfn = lkb->lkb_bastfn; - - for (i = 0; i < DLM_CALLBACKS_SIZE; i++) { - if (!callbacks[i].seq) - break; - if (callbacks[i].flags & DLM_CB_SKIP) { - continue; - } else if (callbacks[i].flags & DLM_CB_BAST) { - bastfn(lkb->lkb_astparam, callbacks[i].mode); - } else if (callbacks[i].flags & DLM_CB_CAST) { - lkb->lkb_lksb->sb_status = callbacks[i].sb_status; - lkb->lkb_lksb->sb_flags = callbacks[i].sb_flags; - castfn(lkb->lkb_astparam); - } - } - - /* undo kref_get from dlm_add_callback, may cause lkb to be freed */ - dlm_put_lkb(lkb); + spin_unlock_bh(&ls->ls_cb_lock); } int dlm_callback_start(struct dlm_ls *ls) { - ls->ls_callback_wq = alloc_workqueue("dlm_callback", - WQ_UNBOUND | - WQ_MEM_RECLAIM | - WQ_NON_REENTRANT, - 0); + if (!test_bit(LSFL_FS, &ls->ls_flags) || + test_bit(LSFL_SOFTIRQ, &ls->ls_flags)) + return 0; + + ls->ls_callback_wq = alloc_ordered_workqueue("dlm_callback", + WQ_HIGHPRI | WQ_MEM_RECLAIM); if (!ls->ls_callback_wq) { log_print("can't start dlm_callback workqueue"); return -ENOMEM; @@ -286,31 +222,54 @@ void dlm_callback_stop(struct dlm_ls *ls) void dlm_callback_suspend(struct dlm_ls *ls) { + if (!test_bit(LSFL_FS, &ls->ls_flags)) + return; + + spin_lock_bh(&ls->ls_cb_lock); set_bit(LSFL_CB_DELAY, &ls->ls_flags); + spin_unlock_bh(&ls->ls_cb_lock); if (ls->ls_callback_wq) flush_workqueue(ls->ls_callback_wq); } +#define MAX_CB_QUEUE 25 + void dlm_callback_resume(struct dlm_ls *ls) { - struct dlm_lkb *lkb, *safe; - int count = 0; - - clear_bit(LSFL_CB_DELAY, &ls->ls_flags); + struct dlm_callback *cb, *safe; + int count = 0, sum = 0; + bool empty; - if (!ls->ls_callback_wq) + if (!test_bit(LSFL_FS, &ls->ls_flags)) return; - mutex_lock(&ls->ls_cb_mutex); - list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) { - list_del_init(&lkb->lkb_cb_list); - queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); +more: + spin_lock_bh(&ls->ls_cb_lock); + list_for_each_entry_safe(cb, safe, &ls->ls_cb_delay, list) { + list_del(&cb->list); + if (test_bit(LSFL_SOFTIRQ, &ls->ls_flags)) + dlm_do_callback(cb); + else + queue_work(ls->ls_callback_wq, &cb->work); + count++; + if (count == MAX_CB_QUEUE) + break; + } + empty = list_empty(&ls->ls_cb_delay); + if (empty) + clear_bit(LSFL_CB_DELAY, &ls->ls_flags); + spin_unlock_bh(&ls->ls_cb_lock); + + sum += count; + if (!empty) { + count = 0; + cond_resched(); + goto more; } - mutex_unlock(&ls->ls_cb_mutex); - if (count) - log_debug(ls, "dlm_callback_resume %d", count); + if (sum) + log_rinfo(ls, "%s %d", __func__, sum); } diff --git a/fs/dlm/ast.h b/fs/dlm/ast.h index 757b551c6820..e2b86845d331 100644 --- a/fs/dlm/ast.h +++ b/fs/dlm/ast.h @@ -1,11 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** ******************************************************************************* ** ** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -13,15 +11,14 @@ #ifndef __ASTD_DOT_H__ #define __ASTD_DOT_H__ -void dlm_del_ast(struct dlm_lkb *lkb); -int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, - int status, uint32_t sbflags, uint64_t seq); -int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb, - struct dlm_callback *cb, int *resid); +bool dlm_may_skip_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, + int status, uint32_t sbflags, int *copy_lvb); +int dlm_get_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, + int status, uint32_t sbflags, + struct dlm_callback **cb); void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status, uint32_t sbflags); -void dlm_callback_work(struct work_struct *work); int dlm_callback_start(struct dlm_ls *ls); void dlm_callback_stop(struct dlm_ls *ls); void dlm_callback_suspend(struct dlm_ls *ls); diff --git a/fs/dlm/config.c b/fs/dlm/config.c index 76feb4b60fa6..a0d75b5c83c6 100644 --- a/fs/dlm/config.c +++ b/fs/dlm/config.c @@ -1,18 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/configfs.h> #include <linux/slab.h> #include <linux/in.h> @@ -22,12 +20,14 @@ #include <net/sock.h> #include "config.h" +#include "midcomms.h" #include "lowcomms.h" /* - * /config/dlm/<cluster>/spaces/<space>/nodes/<node>/nodeid + * /config/dlm/<cluster>/spaces/<space>/nodes/<node>/nodeid (refers to <node>) * /config/dlm/<cluster>/spaces/<space>/nodes/<node>/weight - * /config/dlm/<cluster>/comms/<comm>/nodeid + * /config/dlm/<cluster>/spaces/<space>/nodes/<node>/release_recover + * /config/dlm/<cluster>/comms/<comm>/nodeid (refers to <comm>) * /config/dlm/<cluster>/comms/<comm>/local * /config/dlm/<cluster>/comms/<comm>/addr (write only) * /config/dlm/<cluster>/comms/<comm>/addr_list (read only) @@ -61,53 +61,29 @@ static struct config_item *make_node(struct config_group *, const char *); static void drop_node(struct config_group *, struct config_item *); static void release_node(struct config_item *); -static ssize_t show_cluster(struct config_item *i, struct configfs_attribute *a, - char *buf); -static ssize_t store_cluster(struct config_item *i, - struct configfs_attribute *a, - const char *buf, size_t len); -static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a, - char *buf); -static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a, - const char *buf, size_t len); -static ssize_t show_node(struct config_item *i, struct configfs_attribute *a, - char *buf); -static ssize_t store_node(struct config_item *i, struct configfs_attribute *a, - const char *buf, size_t len); - -static ssize_t comm_nodeid_read(struct dlm_comm *cm, char *buf); -static ssize_t comm_nodeid_write(struct dlm_comm *cm, const char *buf, - size_t len); -static ssize_t comm_local_read(struct dlm_comm *cm, char *buf); -static ssize_t comm_local_write(struct dlm_comm *cm, const char *buf, - size_t len); -static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf, - size_t len); -static ssize_t comm_addr_list_read(struct dlm_comm *cm, char *buf); -static ssize_t node_nodeid_read(struct dlm_node *nd, char *buf); -static ssize_t node_nodeid_write(struct dlm_node *nd, const char *buf, - size_t len); -static ssize_t node_weight_read(struct dlm_node *nd, char *buf); -static ssize_t node_weight_write(struct dlm_node *nd, const char *buf, - size_t len); +static struct configfs_attribute *comm_attrs[]; +static struct configfs_attribute *node_attrs[]; + +const struct rhashtable_params dlm_rhash_rsb_params = { + .nelem_hint = 3, /* start small */ + .key_len = DLM_RESNAME_MAXLEN, + .key_offset = offsetof(struct dlm_rsb, res_name), + .head_offset = offsetof(struct dlm_rsb, res_node), + .automatic_shrinking = true, +}; struct dlm_cluster { struct config_group group; - unsigned int cl_tcp_port; - unsigned int cl_buffer_size; - unsigned int cl_rsbtbl_size; - unsigned int cl_recover_timer; - unsigned int cl_toss_secs; - unsigned int cl_scan_secs; - unsigned int cl_log_debug; - unsigned int cl_protocol; - unsigned int cl_timewarn_cs; - unsigned int cl_waitwarn_us; - unsigned int cl_new_rsb_count; - unsigned int cl_recover_callbacks; - char cl_cluster_name[DLM_LOCKSPACE_LEN]; + struct dlm_spaces *sps; + struct dlm_comms *cms; }; +static struct dlm_cluster *config_item_to_cluster(struct config_item *i) +{ + return i ? container_of(to_config_group(i), struct dlm_cluster, group) : + NULL; +} + enum { CLUSTER_ATTR_TCP_PORT = 0, CLUSTER_ATTR_BUFFER_SIZE, @@ -116,102 +92,168 @@ enum { CLUSTER_ATTR_TOSS_SECS, CLUSTER_ATTR_SCAN_SECS, CLUSTER_ATTR_LOG_DEBUG, + CLUSTER_ATTR_LOG_INFO, CLUSTER_ATTR_PROTOCOL, - CLUSTER_ATTR_TIMEWARN_CS, - CLUSTER_ATTR_WAITWARN_US, + CLUSTER_ATTR_MARK, CLUSTER_ATTR_NEW_RSB_COUNT, CLUSTER_ATTR_RECOVER_CALLBACKS, CLUSTER_ATTR_CLUSTER_NAME, }; -struct cluster_attribute { - struct configfs_attribute attr; - ssize_t (*show)(struct dlm_cluster *, char *); - ssize_t (*store)(struct dlm_cluster *, const char *, size_t); -}; - -static ssize_t cluster_cluster_name_read(struct dlm_cluster *cl, char *buf) +static ssize_t cluster_cluster_name_show(struct config_item *item, char *buf) { - return sprintf(buf, "%s\n", cl->cl_cluster_name); + return sprintf(buf, "%s\n", dlm_config.ci_cluster_name); } -static ssize_t cluster_cluster_name_write(struct dlm_cluster *cl, +static ssize_t cluster_cluster_name_store(struct config_item *item, const char *buf, size_t len) { - strlcpy(dlm_config.ci_cluster_name, buf, - sizeof(dlm_config.ci_cluster_name)); - strlcpy(cl->cl_cluster_name, buf, sizeof(cl->cl_cluster_name)); + strscpy(dlm_config.ci_cluster_name, buf, + sizeof(dlm_config.ci_cluster_name)); return len; } -static struct cluster_attribute cluster_attr_cluster_name = { - .attr = { .ca_owner = THIS_MODULE, - .ca_name = "cluster_name", - .ca_mode = S_IRUGO | S_IWUSR }, - .show = cluster_cluster_name_read, - .store = cluster_cluster_name_write, -}; +CONFIGFS_ATTR(cluster_, cluster_name); -static ssize_t cluster_set(struct dlm_cluster *cl, unsigned int *cl_field, - int *info_field, int check_zero, - const char *buf, size_t len) +static ssize_t cluster_tcp_port_show(struct config_item *item, char *buf) { - unsigned int x; + return sprintf(buf, "%u\n", be16_to_cpu(dlm_config.ci_tcp_port)); +} + +static int dlm_check_zero_and_dlm_running(unsigned int x) +{ + if (!x) + return -EINVAL; + + if (dlm_lowcomms_is_running()) + return -EBUSY; + + return 0; +} + +static ssize_t cluster_tcp_port_store(struct config_item *item, + const char *buf, size_t len) +{ + int rc; + u16 x; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - x = simple_strtoul(buf, NULL, 0); + rc = kstrtou16(buf, 0, &x); + if (rc) + return rc; - if (check_zero && !x) - return -EINVAL; + rc = dlm_check_zero_and_dlm_running(x); + if (rc) + return rc; + + dlm_config.ci_tcp_port = cpu_to_be16(x); + return len; +} + +CONFIGFS_ATTR(cluster_, tcp_port); + +static ssize_t cluster_set(unsigned int *info_field, + int (*check_cb)(unsigned int x), + const char *buf, size_t len) +{ + unsigned int x; + int rc; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + rc = kstrtouint(buf, 0, &x); + if (rc) + return rc; + + if (check_cb) { + rc = check_cb(x); + if (rc) + return rc; + } - *cl_field = x; *info_field = x; return len; } -#define CLUSTER_ATTR(name, check_zero) \ -static ssize_t name##_write(struct dlm_cluster *cl, const char *buf, size_t len) \ +#define CLUSTER_ATTR(name, check_cb) \ +static ssize_t cluster_##name##_store(struct config_item *item, \ + const char *buf, size_t len) \ { \ - return cluster_set(cl, &cl->cl_##name, &dlm_config.ci_##name, \ - check_zero, buf, len); \ + return cluster_set(&dlm_config.ci_##name, check_cb, buf, len); \ } \ -static ssize_t name##_read(struct dlm_cluster *cl, char *buf) \ +static ssize_t cluster_##name##_show(struct config_item *item, char *buf) \ { \ - return snprintf(buf, PAGE_SIZE, "%u\n", cl->cl_##name); \ + return snprintf(buf, PAGE_SIZE, "%u\n", dlm_config.ci_##name); \ } \ -static struct cluster_attribute cluster_attr_##name = \ -__CONFIGFS_ATTR(name, 0644, name##_read, name##_write) - -CLUSTER_ATTR(tcp_port, 1); -CLUSTER_ATTR(buffer_size, 1); -CLUSTER_ATTR(rsbtbl_size, 1); -CLUSTER_ATTR(recover_timer, 1); -CLUSTER_ATTR(toss_secs, 1); -CLUSTER_ATTR(scan_secs, 1); -CLUSTER_ATTR(log_debug, 0); -CLUSTER_ATTR(protocol, 0); -CLUSTER_ATTR(timewarn_cs, 1); -CLUSTER_ATTR(waitwarn_us, 0); -CLUSTER_ATTR(new_rsb_count, 0); -CLUSTER_ATTR(recover_callbacks, 0); +CONFIGFS_ATTR(cluster_, name); + +static int dlm_check_protocol_and_dlm_running(unsigned int x) +{ + switch (x) { + case 0: + /* TCP */ + break; + case 1: + /* SCTP */ + if (!IS_ENABLED(CONFIG_IP_SCTP)) + return -EOPNOTSUPP; + + break; + default: + return -EINVAL; + } + + if (dlm_lowcomms_is_running()) + return -EBUSY; + + return 0; +} + +static int dlm_check_zero(unsigned int x) +{ + if (!x) + return -EINVAL; + + return 0; +} + +static int dlm_check_buffer_size(unsigned int x) +{ + if (x < DLM_MAX_SOCKET_BUFSIZE) + return -EINVAL; + + return 0; +} + +CLUSTER_ATTR(buffer_size, dlm_check_buffer_size); +CLUSTER_ATTR(rsbtbl_size, dlm_check_zero); +CLUSTER_ATTR(recover_timer, dlm_check_zero); +CLUSTER_ATTR(toss_secs, dlm_check_zero); +CLUSTER_ATTR(scan_secs, dlm_check_zero); +CLUSTER_ATTR(log_debug, NULL); +CLUSTER_ATTR(log_info, NULL); +CLUSTER_ATTR(protocol, dlm_check_protocol_and_dlm_running); +CLUSTER_ATTR(mark, NULL); +CLUSTER_ATTR(new_rsb_count, NULL); +CLUSTER_ATTR(recover_callbacks, NULL); static struct configfs_attribute *cluster_attrs[] = { - [CLUSTER_ATTR_TCP_PORT] = &cluster_attr_tcp_port.attr, - [CLUSTER_ATTR_BUFFER_SIZE] = &cluster_attr_buffer_size.attr, - [CLUSTER_ATTR_RSBTBL_SIZE] = &cluster_attr_rsbtbl_size.attr, - [CLUSTER_ATTR_RECOVER_TIMER] = &cluster_attr_recover_timer.attr, - [CLUSTER_ATTR_TOSS_SECS] = &cluster_attr_toss_secs.attr, - [CLUSTER_ATTR_SCAN_SECS] = &cluster_attr_scan_secs.attr, - [CLUSTER_ATTR_LOG_DEBUG] = &cluster_attr_log_debug.attr, - [CLUSTER_ATTR_PROTOCOL] = &cluster_attr_protocol.attr, - [CLUSTER_ATTR_TIMEWARN_CS] = &cluster_attr_timewarn_cs.attr, - [CLUSTER_ATTR_WAITWARN_US] = &cluster_attr_waitwarn_us.attr, - [CLUSTER_ATTR_NEW_RSB_COUNT] = &cluster_attr_new_rsb_count.attr, - [CLUSTER_ATTR_RECOVER_CALLBACKS] = &cluster_attr_recover_callbacks.attr, - [CLUSTER_ATTR_CLUSTER_NAME] = &cluster_attr_cluster_name.attr, + [CLUSTER_ATTR_TCP_PORT] = &cluster_attr_tcp_port, + [CLUSTER_ATTR_BUFFER_SIZE] = &cluster_attr_buffer_size, + [CLUSTER_ATTR_RSBTBL_SIZE] = &cluster_attr_rsbtbl_size, + [CLUSTER_ATTR_RECOVER_TIMER] = &cluster_attr_recover_timer, + [CLUSTER_ATTR_TOSS_SECS] = &cluster_attr_toss_secs, + [CLUSTER_ATTR_SCAN_SECS] = &cluster_attr_scan_secs, + [CLUSTER_ATTR_LOG_DEBUG] = &cluster_attr_log_debug, + [CLUSTER_ATTR_LOG_INFO] = &cluster_attr_log_info, + [CLUSTER_ATTR_PROTOCOL] = &cluster_attr_protocol, + [CLUSTER_ATTR_MARK] = &cluster_attr_mark, + [CLUSTER_ATTR_NEW_RSB_COUNT] = &cluster_attr_new_rsb_count, + [CLUSTER_ATTR_RECOVER_CALLBACKS] = &cluster_attr_recover_callbacks, + [CLUSTER_ATTR_CLUSTER_NAME] = &cluster_attr_cluster_name, NULL, }; @@ -220,83 +262,13 @@ enum { COMM_ATTR_LOCAL, COMM_ATTR_ADDR, COMM_ATTR_ADDR_LIST, -}; - -struct comm_attribute { - struct configfs_attribute attr; - ssize_t (*show)(struct dlm_comm *, char *); - ssize_t (*store)(struct dlm_comm *, const char *, size_t); -}; - -static struct comm_attribute comm_attr_nodeid = { - .attr = { .ca_owner = THIS_MODULE, - .ca_name = "nodeid", - .ca_mode = S_IRUGO | S_IWUSR }, - .show = comm_nodeid_read, - .store = comm_nodeid_write, -}; - -static struct comm_attribute comm_attr_local = { - .attr = { .ca_owner = THIS_MODULE, - .ca_name = "local", - .ca_mode = S_IRUGO | S_IWUSR }, - .show = comm_local_read, - .store = comm_local_write, -}; - -static struct comm_attribute comm_attr_addr = { - .attr = { .ca_owner = THIS_MODULE, - .ca_name = "addr", - .ca_mode = S_IWUSR }, - .store = comm_addr_write, -}; - -static struct comm_attribute comm_attr_addr_list = { - .attr = { .ca_owner = THIS_MODULE, - .ca_name = "addr_list", - .ca_mode = S_IRUGO }, - .show = comm_addr_list_read, -}; - -static struct configfs_attribute *comm_attrs[] = { - [COMM_ATTR_NODEID] = &comm_attr_nodeid.attr, - [COMM_ATTR_LOCAL] = &comm_attr_local.attr, - [COMM_ATTR_ADDR] = &comm_attr_addr.attr, - [COMM_ATTR_ADDR_LIST] = &comm_attr_addr_list.attr, - NULL, + COMM_ATTR_MARK, }; enum { NODE_ATTR_NODEID = 0, NODE_ATTR_WEIGHT, -}; - -struct node_attribute { - struct configfs_attribute attr; - ssize_t (*show)(struct dlm_node *, char *); - ssize_t (*store)(struct dlm_node *, const char *, size_t); -}; - -static struct node_attribute node_attr_nodeid = { - .attr = { .ca_owner = THIS_MODULE, - .ca_name = "nodeid", - .ca_mode = S_IRUGO | S_IWUSR }, - .show = node_nodeid_read, - .store = node_nodeid_write, -}; - -static struct node_attribute node_attr_weight = { - .attr = { .ca_owner = THIS_MODULE, - .ca_name = "weight", - .ca_mode = S_IRUGO | S_IWUSR }, - .show = node_weight_read, - .store = node_weight_write, -}; - -static struct configfs_attribute *node_attrs[] = { - [NODE_ATTR_NODEID] = &node_attr_nodeid.attr, - [NODE_ATTR_WEIGHT] = &node_attr_weight.attr, - NULL, + NODE_ATTR_RELEASE_RECOVER, }; struct dlm_clusters { @@ -310,8 +282,11 @@ struct dlm_spaces { struct dlm_space { struct config_group group; struct list_head members; + struct list_head members_gone; + int members_gone_count; struct mutex members_lock; int members_count; + struct dlm_nodes *nds; }; struct dlm_comms { @@ -324,6 +299,7 @@ struct dlm_comm { int nodeid; int local; int addr_count; + unsigned int mark; struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT]; }; @@ -338,6 +314,14 @@ struct dlm_node { int weight; int new; int comm_seq; /* copy of cm->seq when nd->nodeid is set */ + unsigned int release_recover; +}; + +struct dlm_member_gone { + int nodeid; + unsigned int release_recover; + + struct list_head list; /* space->members_gone */ }; static struct configfs_group_operations clusters_ops = { @@ -347,8 +331,6 @@ static struct configfs_group_operations clusters_ops = { static struct configfs_item_operations cluster_ops = { .release = release_cluster, - .show_attribute = show_cluster, - .store_attribute = store_cluster, }; static struct configfs_group_operations spaces_ops = { @@ -367,8 +349,6 @@ static struct configfs_group_operations comms_ops = { static struct configfs_item_operations comm_ops = { .release = release_comm, - .show_attribute = show_comm, - .store_attribute = store_comm, }; static struct configfs_group_operations nodes_ops = { @@ -378,59 +358,51 @@ static struct configfs_group_operations nodes_ops = { static struct configfs_item_operations node_ops = { .release = release_node, - .show_attribute = show_node, - .store_attribute = store_node, }; -static struct config_item_type clusters_type = { +static const struct config_item_type clusters_type = { .ct_group_ops = &clusters_ops, .ct_owner = THIS_MODULE, }; -static struct config_item_type cluster_type = { +static const struct config_item_type cluster_type = { .ct_item_ops = &cluster_ops, .ct_attrs = cluster_attrs, .ct_owner = THIS_MODULE, }; -static struct config_item_type spaces_type = { +static const struct config_item_type spaces_type = { .ct_group_ops = &spaces_ops, .ct_owner = THIS_MODULE, }; -static struct config_item_type space_type = { +static const struct config_item_type space_type = { .ct_item_ops = &space_ops, .ct_owner = THIS_MODULE, }; -static struct config_item_type comms_type = { +static const struct config_item_type comms_type = { .ct_group_ops = &comms_ops, .ct_owner = THIS_MODULE, }; -static struct config_item_type comm_type = { +static const struct config_item_type comm_type = { .ct_item_ops = &comm_ops, .ct_attrs = comm_attrs, .ct_owner = THIS_MODULE, }; -static struct config_item_type nodes_type = { +static const struct config_item_type nodes_type = { .ct_group_ops = &nodes_ops, .ct_owner = THIS_MODULE, }; -static struct config_item_type node_type = { +static const struct config_item_type node_type = { .ct_item_ops = &node_ops, .ct_attrs = node_attrs, .ct_owner = THIS_MODULE, }; -static struct dlm_cluster *config_item_to_cluster(struct config_item *i) -{ - return i ? container_of(to_config_group(i), struct dlm_cluster, group) : - NULL; -} - static struct dlm_space *config_item_to_space(struct config_item *i) { return i ? container_of(to_config_group(i), struct dlm_space, group) : @@ -453,39 +425,23 @@ static struct config_group *make_cluster(struct config_group *g, struct dlm_cluster *cl = NULL; struct dlm_spaces *sps = NULL; struct dlm_comms *cms = NULL; - void *gps = NULL; cl = kzalloc(sizeof(struct dlm_cluster), GFP_NOFS); - gps = kcalloc(3, sizeof(struct config_group *), GFP_NOFS); sps = kzalloc(sizeof(struct dlm_spaces), GFP_NOFS); cms = kzalloc(sizeof(struct dlm_comms), GFP_NOFS); - if (!cl || !gps || !sps || !cms) + if (!cl || !sps || !cms) goto fail; + cl->sps = sps; + cl->cms = cms; + config_group_init_type_name(&cl->group, name, &cluster_type); config_group_init_type_name(&sps->ss_group, "spaces", &spaces_type); config_group_init_type_name(&cms->cs_group, "comms", &comms_type); - cl->group.default_groups = gps; - cl->group.default_groups[0] = &sps->ss_group; - cl->group.default_groups[1] = &cms->cs_group; - cl->group.default_groups[2] = NULL; - - cl->cl_tcp_port = dlm_config.ci_tcp_port; - cl->cl_buffer_size = dlm_config.ci_buffer_size; - cl->cl_rsbtbl_size = dlm_config.ci_rsbtbl_size; - cl->cl_recover_timer = dlm_config.ci_recover_timer; - cl->cl_toss_secs = dlm_config.ci_toss_secs; - cl->cl_scan_secs = dlm_config.ci_scan_secs; - cl->cl_log_debug = dlm_config.ci_log_debug; - cl->cl_protocol = dlm_config.ci_protocol; - cl->cl_timewarn_cs = dlm_config.ci_timewarn_cs; - cl->cl_waitwarn_us = dlm_config.ci_waitwarn_us; - cl->cl_new_rsb_count = dlm_config.ci_new_rsb_count; - cl->cl_recover_callbacks = dlm_config.ci_recover_callbacks; - memcpy(cl->cl_cluster_name, dlm_config.ci_cluster_name, - DLM_LOCKSPACE_LEN); + configfs_add_default_group(&sps->ss_group, &cl->group); + configfs_add_default_group(&cms->cs_group, &cl->group); space_list = &sps->ss_group; comm_list = &cms->cs_group; @@ -493,7 +449,6 @@ static struct config_group *make_cluster(struct config_group *g, fail: kfree(cl); - kfree(gps); kfree(sps); kfree(cms); return ERR_PTR(-ENOMEM); @@ -502,14 +457,8 @@ static struct config_group *make_cluster(struct config_group *g, static void drop_cluster(struct config_group *g, struct config_item *i) { struct dlm_cluster *cl = config_item_to_cluster(i); - struct config_item *tmp; - int j; - for (j = 0; cl->group.default_groups[j]; j++) { - tmp = &cl->group.default_groups[j]->cg_item; - cl->group.default_groups[j] = NULL; - config_item_put(tmp); - } + configfs_remove_default_groups(&cl->group); space_list = NULL; comm_list = NULL; @@ -520,7 +469,9 @@ static void drop_cluster(struct config_group *g, struct config_item *i) static void release_cluster(struct config_item *i) { struct dlm_cluster *cl = config_item_to_cluster(i); - kfree(cl->group.default_groups); + + kfree(cl->sps); + kfree(cl->cms); kfree(cl); } @@ -528,30 +479,27 @@ static struct config_group *make_space(struct config_group *g, const char *name) { struct dlm_space *sp = NULL; struct dlm_nodes *nds = NULL; - void *gps = NULL; sp = kzalloc(sizeof(struct dlm_space), GFP_NOFS); - gps = kcalloc(2, sizeof(struct config_group *), GFP_NOFS); nds = kzalloc(sizeof(struct dlm_nodes), GFP_NOFS); - if (!sp || !gps || !nds) + if (!sp || !nds) goto fail; config_group_init_type_name(&sp->group, name, &space_type); - config_group_init_type_name(&nds->ns_group, "nodes", &nodes_type); - sp->group.default_groups = gps; - sp->group.default_groups[0] = &nds->ns_group; - sp->group.default_groups[1] = NULL; + config_group_init_type_name(&nds->ns_group, "nodes", &nodes_type); + configfs_add_default_group(&nds->ns_group, &sp->group); INIT_LIST_HEAD(&sp->members); + INIT_LIST_HEAD(&sp->members_gone); mutex_init(&sp->members_lock); sp->members_count = 0; + sp->nds = nds; return &sp->group; fail: kfree(sp); - kfree(gps); kfree(nds); return ERR_PTR(-ENOMEM); } @@ -559,30 +507,29 @@ static struct config_group *make_space(struct config_group *g, const char *name) static void drop_space(struct config_group *g, struct config_item *i) { struct dlm_space *sp = config_item_to_space(i); - struct config_item *tmp; - int j; /* assert list_empty(&sp->members) */ - for (j = 0; sp->group.default_groups[j]; j++) { - tmp = &sp->group.default_groups[j]->cg_item; - sp->group.default_groups[j] = NULL; - config_item_put(tmp); - } - + configfs_remove_default_groups(&sp->group); config_item_put(i); } static void release_space(struct config_item *i) { struct dlm_space *sp = config_item_to_space(i); - kfree(sp->group.default_groups); + kfree(sp->nds); kfree(sp); } static struct config_item *make_comm(struct config_group *g, const char *name) { struct dlm_comm *cm; + unsigned int nodeid; + int rv; + + rv = kstrtouint(name, 0, &nodeid); + if (rv) + return ERR_PTR(rv); cm = kzalloc(sizeof(struct dlm_comm), GFP_NOFS); if (!cm) @@ -594,9 +541,10 @@ static struct config_item *make_comm(struct config_group *g, const char *name) if (!cm->seq) cm->seq = dlm_comm_count++; - cm->nodeid = -1; + cm->nodeid = nodeid; cm->local = 0; cm->addr_count = 0; + cm->mark = 0; return &cm->item; } @@ -605,7 +553,7 @@ static void drop_comm(struct config_group *g, struct config_item *i) struct dlm_comm *cm = config_item_to_comm(i); if (local_comm == cm) local_comm = NULL; - dlm_lowcomms_close(cm->nodeid); + dlm_midcomms_close(cm->nodeid); while (cm->addr_count--) kfree(cm->addr[cm->addr_count]); config_item_put(i); @@ -620,16 +568,25 @@ static void release_comm(struct config_item *i) static struct config_item *make_node(struct config_group *g, const char *name) { struct dlm_space *sp = config_item_to_space(g->cg_item.ci_parent); + unsigned int nodeid; struct dlm_node *nd; + uint32_t seq = 0; + int rv; + + rv = kstrtouint(name, 0, &nodeid); + if (rv) + return ERR_PTR(rv); nd = kzalloc(sizeof(struct dlm_node), GFP_NOFS); if (!nd) return ERR_PTR(-ENOMEM); config_item_init_type_name(&nd->item, name, &node_type); - nd->nodeid = -1; + nd->nodeid = nodeid; nd->weight = 1; /* default weight of 1 if none is set */ nd->new = 1; /* set to 0 once it's been read by dlm_nodeid_list() */ + dlm_comm_seq(nodeid, &seq, true); + nd->comm_seq = seq; mutex_lock(&sp->members_lock); list_add(&nd->list, &sp->members); @@ -643,10 +600,20 @@ static void drop_node(struct config_group *g, struct config_item *i) { struct dlm_space *sp = config_item_to_space(g->cg_item.ci_parent); struct dlm_node *nd = config_item_to_node(i); + struct dlm_member_gone *mb_gone; + + mb_gone = kzalloc(sizeof(*mb_gone), GFP_KERNEL); + if (!mb_gone) + return; mutex_lock(&sp->members_lock); list_del(&nd->list); sp->members_count--; + + mb_gone->nodeid = nd->nodeid; + mb_gone->release_recover = nd->release_recover; + list_add(&mb_gone->list, &sp->members_gone); + sp->members_gone_count++; mutex_unlock(&sp->members_lock); config_item_put(i); @@ -685,71 +652,46 @@ void dlm_config_exit(void) * Functions for user space to read/write attributes */ -static ssize_t show_cluster(struct config_item *i, struct configfs_attribute *a, - char *buf) -{ - struct dlm_cluster *cl = config_item_to_cluster(i); - struct cluster_attribute *cla = - container_of(a, struct cluster_attribute, attr); - return cla->show ? cla->show(cl, buf) : 0; -} - -static ssize_t store_cluster(struct config_item *i, - struct configfs_attribute *a, - const char *buf, size_t len) +static ssize_t comm_nodeid_show(struct config_item *item, char *buf) { - struct dlm_cluster *cl = config_item_to_cluster(i); - struct cluster_attribute *cla = - container_of(a, struct cluster_attribute, attr); - return cla->store ? cla->store(cl, buf, len) : -EINVAL; -} - -static ssize_t show_comm(struct config_item *i, struct configfs_attribute *a, - char *buf) -{ - struct dlm_comm *cm = config_item_to_comm(i); - struct comm_attribute *cma = - container_of(a, struct comm_attribute, attr); - return cma->show ? cma->show(cm, buf) : 0; -} + unsigned int nodeid; + int rv; -static ssize_t store_comm(struct config_item *i, struct configfs_attribute *a, - const char *buf, size_t len) -{ - struct dlm_comm *cm = config_item_to_comm(i); - struct comm_attribute *cma = - container_of(a, struct comm_attribute, attr); - return cma->store ? cma->store(cm, buf, len) : -EINVAL; -} + rv = kstrtouint(config_item_name(item), 0, &nodeid); + if (WARN_ON(rv)) + return rv; -static ssize_t comm_nodeid_read(struct dlm_comm *cm, char *buf) -{ - return sprintf(buf, "%d\n", cm->nodeid); + return sprintf(buf, "%u\n", nodeid); } -static ssize_t comm_nodeid_write(struct dlm_comm *cm, const char *buf, +static ssize_t comm_nodeid_store(struct config_item *item, const char *buf, size_t len) { - cm->nodeid = simple_strtol(buf, NULL, 0); return len; } -static ssize_t comm_local_read(struct dlm_comm *cm, char *buf) +static ssize_t comm_local_show(struct config_item *item, char *buf) { - return sprintf(buf, "%d\n", cm->local); + return sprintf(buf, "%d\n", config_item_to_comm(item)->local); } -static ssize_t comm_local_write(struct dlm_comm *cm, const char *buf, +static ssize_t comm_local_store(struct config_item *item, const char *buf, size_t len) { - cm->local= simple_strtol(buf, NULL, 0); + struct dlm_comm *cm = config_item_to_comm(item); + int rc = kstrtoint(buf, 0, &cm->local); + + if (rc) + return rc; if (cm->local && !local_comm) local_comm = cm; return len; } -static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf, size_t len) +static ssize_t comm_addr_store(struct config_item *item, const char *buf, + size_t len) { + struct dlm_comm *cm = config_item_to_comm(item); struct sockaddr_storage *addr; int rv; @@ -765,7 +707,7 @@ static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf, size_t len) memcpy(addr, buf, len); - rv = dlm_lowcomms_addr(cm->nodeid, addr, len); + rv = dlm_midcomms_addr(cm->nodeid, addr); if (rv) { kfree(addr); return rv; @@ -775,8 +717,9 @@ static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf, size_t len) return len; } -static ssize_t comm_addr_list_read(struct dlm_comm *cm, char *buf) +static ssize_t comm_addr_list_show(struct config_item *item, char *buf) { + struct dlm_comm *cm = config_item_to_comm(item); ssize_t s; ssize_t allowance; int i; @@ -819,51 +762,113 @@ static ssize_t comm_addr_list_read(struct dlm_comm *cm, char *buf) return 4096 - allowance; } -static ssize_t show_node(struct config_item *i, struct configfs_attribute *a, - char *buf) +static ssize_t comm_mark_show(struct config_item *item, char *buf) { - struct dlm_node *nd = config_item_to_node(i); - struct node_attribute *nda = - container_of(a, struct node_attribute, attr); - return nda->show ? nda->show(nd, buf) : 0; + return sprintf(buf, "%u\n", config_item_to_comm(item)->mark); } -static ssize_t store_node(struct config_item *i, struct configfs_attribute *a, - const char *buf, size_t len) +static ssize_t comm_mark_store(struct config_item *item, const char *buf, + size_t len) { - struct dlm_node *nd = config_item_to_node(i); - struct node_attribute *nda = - container_of(a, struct node_attribute, attr); - return nda->store ? nda->store(nd, buf, len) : -EINVAL; + struct dlm_comm *comm; + unsigned int mark; + int rc; + + rc = kstrtouint(buf, 0, &mark); + if (rc) + return rc; + + if (mark == 0) + mark = dlm_config.ci_mark; + + comm = config_item_to_comm(item); + rc = dlm_lowcomms_nodes_set_mark(comm->nodeid, mark); + if (rc) + return rc; + + comm->mark = mark; + return len; } -static ssize_t node_nodeid_read(struct dlm_node *nd, char *buf) +CONFIGFS_ATTR(comm_, nodeid); +CONFIGFS_ATTR(comm_, local); +CONFIGFS_ATTR(comm_, mark); +CONFIGFS_ATTR_WO(comm_, addr); +CONFIGFS_ATTR_RO(comm_, addr_list); + +static struct configfs_attribute *comm_attrs[] = { + [COMM_ATTR_NODEID] = &comm_attr_nodeid, + [COMM_ATTR_LOCAL] = &comm_attr_local, + [COMM_ATTR_ADDR] = &comm_attr_addr, + [COMM_ATTR_ADDR_LIST] = &comm_attr_addr_list, + [COMM_ATTR_MARK] = &comm_attr_mark, + NULL, +}; + +static ssize_t node_nodeid_show(struct config_item *item, char *buf) { - return sprintf(buf, "%d\n", nd->nodeid); + unsigned int nodeid; + int rv; + + rv = kstrtouint(config_item_name(item), 0, &nodeid); + if (WARN_ON(rv)) + return rv; + + return sprintf(buf, "%u\n", nodeid); } -static ssize_t node_nodeid_write(struct dlm_node *nd, const char *buf, +static ssize_t node_nodeid_store(struct config_item *item, const char *buf, size_t len) { - uint32_t seq = 0; - nd->nodeid = simple_strtol(buf, NULL, 0); - dlm_comm_seq(nd->nodeid, &seq); - nd->comm_seq = seq; return len; } -static ssize_t node_weight_read(struct dlm_node *nd, char *buf) +static ssize_t node_weight_show(struct config_item *item, char *buf) { - return sprintf(buf, "%d\n", nd->weight); + return sprintf(buf, "%d\n", config_item_to_node(item)->weight); } -static ssize_t node_weight_write(struct dlm_node *nd, const char *buf, +static ssize_t node_weight_store(struct config_item *item, const char *buf, size_t len) { - nd->weight = simple_strtol(buf, NULL, 0); + int rc = kstrtoint(buf, 0, &config_item_to_node(item)->weight); + + if (rc) + return rc; return len; } +static ssize_t node_release_recover_show(struct config_item *item, char *buf) +{ + struct dlm_node *n = config_item_to_node(item); + + return sprintf(buf, "%u\n", n->release_recover); +} + +static ssize_t node_release_recover_store(struct config_item *item, + const char *buf, size_t len) +{ + struct dlm_node *n = config_item_to_node(item); + int rc; + + rc = kstrtouint(buf, 0, &n->release_recover); + if (rc) + return rc; + + return len; +} + +CONFIGFS_ATTR(node_, nodeid); +CONFIGFS_ATTR(node_, weight); +CONFIGFS_ATTR(node_, release_recover); + +static struct configfs_attribute *node_attrs[] = { + [NODE_ATTR_NODEID] = &node_attr_nodeid, + [NODE_ATTR_WEIGHT] = &node_attr_weight, + [NODE_ATTR_RELEASE_RECOVER] = &node_attr_release_recover, + NULL, +}; + /* * Functions for the dlm to get the info that's been configured */ @@ -896,7 +901,7 @@ static struct dlm_comm *get_comm(int nodeid) if (!comm_list) return NULL; - mutex_lock(&clusters_root.subsys.su_mutex); + WARN_ON_ONCE(!mutex_is_locked(&clusters_root.subsys.su_mutex)); list_for_each_entry(i, &comm_list->cg_children, ci_entry) { cm = config_item_to_comm(i); @@ -907,7 +912,6 @@ static struct dlm_comm *get_comm(int nodeid) config_item_get(i); break; } - mutex_unlock(&clusters_root.subsys.su_mutex); if (!found) cm = NULL; @@ -923,9 +927,10 @@ static void put_comm(struct dlm_comm *cm) int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out, int *count_out) { + struct dlm_member_gone *mb_gone, *mb_safe; + struct dlm_config_node *nodes, *node; struct dlm_space *sp; struct dlm_node *nd; - struct dlm_config_node *nodes, *node; int rv, count; sp = get_space(lsname); @@ -939,7 +944,7 @@ int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out, goto out; } - count = sp->members_count; + count = sp->members_count + sp->members_gone_count; nodes = kcalloc(count, sizeof(struct dlm_config_node), GFP_NOFS); if (!nodes) { @@ -958,6 +963,20 @@ int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out, nd->new = 0; } + /* we delay the remove on nodes until here as configfs does + * not support addtional attributes for rmdir(). + */ + list_for_each_entry_safe(mb_gone, mb_safe, &sp->members_gone, list) { + node->nodeid = mb_gone->nodeid; + node->release_recover = mb_gone->release_recover; + node->gone = true; + node++; + + list_del(&mb_gone->list); + sp->members_gone_count--; + kfree(mb_gone); + } + *count_out = count; *nodes_out = nodes; rv = 0; @@ -967,11 +986,20 @@ int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out, return rv; } -int dlm_comm_seq(int nodeid, uint32_t *seq) +int dlm_comm_seq(int nodeid, uint32_t *seq, bool locked) { - struct dlm_comm *cm = get_comm(nodeid); + struct dlm_comm *cm; + + if (locked) { + cm = get_comm(nodeid); + } else { + mutex_lock(&clusters_root.subsys.su_mutex); + cm = get_comm(nodeid); + mutex_unlock(&clusters_root.subsys.su_mutex); + } if (!cm) - return -EEXIST; + return -ENOENT; + *seq = cm->seq; put_comm(cm); return 0; @@ -979,7 +1007,7 @@ int dlm_comm_seq(int nodeid, uint32_t *seq) int dlm_our_nodeid(void) { - return local_comm ? local_comm->nodeid : 0; + return local_comm->nodeid; } /* num 0 is first addr, num 1 is second addr */ @@ -995,30 +1023,29 @@ int dlm_our_addr(struct sockaddr_storage *addr, int num) /* Config file defaults */ #define DEFAULT_TCP_PORT 21064 -#define DEFAULT_BUFFER_SIZE 4096 #define DEFAULT_RSBTBL_SIZE 1024 #define DEFAULT_RECOVER_TIMER 5 #define DEFAULT_TOSS_SECS 10 #define DEFAULT_SCAN_SECS 5 #define DEFAULT_LOG_DEBUG 0 -#define DEFAULT_PROTOCOL 0 -#define DEFAULT_TIMEWARN_CS 500 /* 5 sec = 500 centiseconds */ -#define DEFAULT_WAITWARN_US 0 +#define DEFAULT_LOG_INFO 1 +#define DEFAULT_PROTOCOL DLM_PROTO_TCP +#define DEFAULT_MARK 0 #define DEFAULT_NEW_RSB_COUNT 128 #define DEFAULT_RECOVER_CALLBACKS 0 #define DEFAULT_CLUSTER_NAME "" struct dlm_config_info dlm_config = { - .ci_tcp_port = DEFAULT_TCP_PORT, - .ci_buffer_size = DEFAULT_BUFFER_SIZE, + .ci_tcp_port = cpu_to_be16(DEFAULT_TCP_PORT), + .ci_buffer_size = DLM_MAX_SOCKET_BUFSIZE, .ci_rsbtbl_size = DEFAULT_RSBTBL_SIZE, .ci_recover_timer = DEFAULT_RECOVER_TIMER, .ci_toss_secs = DEFAULT_TOSS_SECS, .ci_scan_secs = DEFAULT_SCAN_SECS, .ci_log_debug = DEFAULT_LOG_DEBUG, + .ci_log_info = DEFAULT_LOG_INFO, .ci_protocol = DEFAULT_PROTOCOL, - .ci_timewarn_cs = DEFAULT_TIMEWARN_CS, - .ci_waitwarn_us = DEFAULT_WAITWARN_US, + .ci_mark = DEFAULT_MARK, .ci_new_rsb_count = DEFAULT_NEW_RSB_COUNT, .ci_recover_callbacks = DEFAULT_RECOVER_CALLBACKS, .ci_cluster_name = DEFAULT_CLUSTER_NAME diff --git a/fs/dlm/config.h b/fs/dlm/config.h index f30697bc2780..4ebd45f75276 100644 --- a/fs/dlm/config.h +++ b/fs/dlm/config.h @@ -1,12 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -14,28 +12,37 @@ #ifndef __CONFIG_DOT_H__ #define __CONFIG_DOT_H__ +#define DLM_MAX_SOCKET_BUFSIZE 4096 + struct dlm_config_node { int nodeid; int weight; + bool gone; int new; uint32_t comm_seq; + unsigned int release_recover; }; -#define DLM_MAX_ADDR_COUNT 3 +extern const struct rhashtable_params dlm_rhash_rsb_params; + +#define DLM_MAX_ADDR_COUNT 8 + +#define DLM_PROTO_TCP 0 +#define DLM_PROTO_SCTP 1 struct dlm_config_info { - int ci_tcp_port; - int ci_buffer_size; - int ci_rsbtbl_size; - int ci_recover_timer; - int ci_toss_secs; - int ci_scan_secs; - int ci_log_debug; - int ci_protocol; - int ci_timewarn_cs; - int ci_waitwarn_us; - int ci_new_rsb_count; - int ci_recover_callbacks; + __be16 ci_tcp_port; + unsigned int ci_buffer_size; + unsigned int ci_rsbtbl_size; + unsigned int ci_recover_timer; + unsigned int ci_toss_secs; + unsigned int ci_scan_secs; + unsigned int ci_log_debug; + unsigned int ci_log_info; + unsigned int ci_protocol; + unsigned int ci_mark; + unsigned int ci_new_rsb_count; + unsigned int ci_recover_callbacks; char ci_cluster_name[DLM_LOCKSPACE_LEN]; }; @@ -45,7 +52,7 @@ int dlm_config_init(void); void dlm_config_exit(void); int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out, int *count_out); -int dlm_comm_seq(int nodeid, uint32_t *seq); +int dlm_comm_seq(int nodeid, uint32_t *seq, bool locked); int dlm_our_nodeid(void); int dlm_our_addr(struct sockaddr_storage *addr, int num); diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c index b969deef9ebb..700a0cbb2f14 100644 --- a/fs/dlm/debug_fs.c +++ b/fs/dlm/debug_fs.c @@ -1,30 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) 2005-2009 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ #include <linux/pagemap.h> #include <linux/seq_file.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/slab.h> #include "dlm_internal.h" +#include "midcomms.h" #include "lock.h" +#include "ast.h" #define DLM_DEBUG_BUF_LEN 4096 static char debug_buf[DLM_DEBUG_BUF_LEN]; static struct mutex debug_buf_lock; static struct dentry *dlm_root; +static struct dentry *dlm_comms; static char *print_lockmode(int mode) { @@ -48,8 +49,8 @@ static char *print_lockmode(int mode) } } -static int print_format1_lock(struct seq_file *s, struct dlm_lkb *lkb, - struct dlm_rsb *res) +static void print_format1_lock(struct seq_file *s, struct dlm_lkb *lkb, + struct dlm_rsb *res) { seq_printf(s, "%08x %s", lkb->lkb_id, print_lockmode(lkb->lkb_grmode)); @@ -68,21 +69,17 @@ static int print_format1_lock(struct seq_file *s, struct dlm_lkb *lkb, if (lkb->lkb_wait_type) seq_printf(s, " wait_type: %d", lkb->lkb_wait_type); - return seq_printf(s, "\n"); + seq_putc(s, '\n'); } -static int print_format1(struct dlm_rsb *res, struct seq_file *s) +static void print_format1(struct dlm_rsb *res, struct seq_file *s) { struct dlm_lkb *lkb; int i, lvblen = res->res_ls->ls_lvblen, recover_list, root_list; - int rv; lock_rsb(res); - rv = seq_printf(s, "\nResource %p Name (len=%d) \"", - res, res->res_length); - if (rv) - goto out; + seq_printf(s, "\nResource %p Name (len=%d) \"", res, res->res_length); for (i = 0; i < res->res_length; i++) { if (isprint(res->res_name[i])) @@ -92,32 +89,31 @@ static int print_format1(struct dlm_rsb *res, struct seq_file *s) } if (res->res_nodeid > 0) - rv = seq_printf(s, "\" \nLocal Copy, Master is node %d\n", - res->res_nodeid); + seq_printf(s, "\"\nLocal Copy, Master is node %d\n", + res->res_nodeid); else if (res->res_nodeid == 0) - rv = seq_printf(s, "\" \nMaster Copy\n"); + seq_puts(s, "\"\nMaster Copy\n"); else if (res->res_nodeid == -1) - rv = seq_printf(s, "\" \nLooking up master (lkid %x)\n", - res->res_first_lkid); + seq_printf(s, "\"\nLooking up master (lkid %x)\n", + res->res_first_lkid); else - rv = seq_printf(s, "\" \nInvalid master %d\n", - res->res_nodeid); - if (rv) + seq_printf(s, "\"\nInvalid master %d\n", res->res_nodeid); + if (seq_has_overflowed(s)) goto out; /* Print the LVB: */ if (res->res_lvbptr) { - seq_printf(s, "LVB: "); + seq_puts(s, "LVB: "); for (i = 0; i < lvblen; i++) { if (i == lvblen / 2) - seq_printf(s, "\n "); + seq_puts(s, "\n "); seq_printf(s, "%02x ", (unsigned char) res->res_lvbptr[i]); } if (rsb_flag(res, RSB_VALNOTVALID)) - seq_printf(s, " (INVALID)"); - rv = seq_printf(s, "\n"); - if (rv) + seq_puts(s, " (INVALID)"); + seq_putc(s, '\n'); + if (seq_has_overflowed(s)) goto out; } @@ -125,59 +121,57 @@ static int print_format1(struct dlm_rsb *res, struct seq_file *s) recover_list = !list_empty(&res->res_recover_list); if (root_list || recover_list) { - rv = seq_printf(s, "Recovery: root %d recover %d flags %lx " - "count %d\n", root_list, recover_list, - res->res_flags, res->res_recover_locks_count); - if (rv) - goto out; + seq_printf(s, "Recovery: root %d recover %d flags %lx count %d\n", + root_list, recover_list, + res->res_flags, res->res_recover_locks_count); } /* Print the locks attached to this resource */ - seq_printf(s, "Granted Queue\n"); + seq_puts(s, "Granted Queue\n"); list_for_each_entry(lkb, &res->res_grantqueue, lkb_statequeue) { - rv = print_format1_lock(s, lkb, res); - if (rv) + print_format1_lock(s, lkb, res); + if (seq_has_overflowed(s)) goto out; } - seq_printf(s, "Conversion Queue\n"); + seq_puts(s, "Conversion Queue\n"); list_for_each_entry(lkb, &res->res_convertqueue, lkb_statequeue) { - rv = print_format1_lock(s, lkb, res); - if (rv) + print_format1_lock(s, lkb, res); + if (seq_has_overflowed(s)) goto out; } - seq_printf(s, "Waiting Queue\n"); + seq_puts(s, "Waiting Queue\n"); list_for_each_entry(lkb, &res->res_waitqueue, lkb_statequeue) { - rv = print_format1_lock(s, lkb, res); - if (rv) + print_format1_lock(s, lkb, res); + if (seq_has_overflowed(s)) goto out; } if (list_empty(&res->res_lookup)) goto out; - seq_printf(s, "Lookup Queue\n"); + seq_puts(s, "Lookup Queue\n"); list_for_each_entry(lkb, &res->res_lookup, lkb_rsb_lookup) { - rv = seq_printf(s, "%08x %s", lkb->lkb_id, - print_lockmode(lkb->lkb_rqmode)); + seq_printf(s, "%08x %s", + lkb->lkb_id, print_lockmode(lkb->lkb_rqmode)); if (lkb->lkb_wait_type) seq_printf(s, " wait_type: %d", lkb->lkb_wait_type); - rv = seq_printf(s, "\n"); + seq_putc(s, '\n'); + if (seq_has_overflowed(s)) + goto out; } out: unlock_rsb(res); - return rv; } -static int print_format2_lock(struct seq_file *s, struct dlm_lkb *lkb, - struct dlm_rsb *r) +static void print_format2_lock(struct seq_file *s, struct dlm_lkb *lkb, + struct dlm_rsb *r) { u64 xid = 0; u64 us; - int rv; - if (lkb->lkb_flags & DLM_IFL_USER) { + if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) { if (lkb->lkb_ua) xid = lkb->lkb_ua->xid; } @@ -188,103 +182,97 @@ static int print_format2_lock(struct seq_file *s, struct dlm_lkb *lkb, /* id nodeid remid pid xid exflags flags sts grmode rqmode time_us r_nodeid r_len r_name */ - rv = seq_printf(s, "%x %d %x %u %llu %x %x %d %d %d %llu %u %d \"%s\"\n", - lkb->lkb_id, - lkb->lkb_nodeid, - lkb->lkb_remid, - lkb->lkb_ownpid, - (unsigned long long)xid, - lkb->lkb_exflags, - lkb->lkb_flags, - lkb->lkb_status, - lkb->lkb_grmode, - lkb->lkb_rqmode, - (unsigned long long)us, - r->res_nodeid, - r->res_length, - r->res_name); - return rv; + seq_printf(s, "%x %d %x %u %llu %x %x %d %d %d %llu %u %d \"%s\"\n", + lkb->lkb_id, + lkb->lkb_nodeid, + lkb->lkb_remid, + lkb->lkb_ownpid, + (unsigned long long)xid, + lkb->lkb_exflags, + dlm_iflags_val(lkb), + lkb->lkb_status, + lkb->lkb_grmode, + lkb->lkb_rqmode, + (unsigned long long)us, + r->res_nodeid, + r->res_length, + r->res_name); } -static int print_format2(struct dlm_rsb *r, struct seq_file *s) +static void print_format2(struct dlm_rsb *r, struct seq_file *s) { struct dlm_lkb *lkb; - int rv = 0; lock_rsb(r); list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { - rv = print_format2_lock(s, lkb, r); - if (rv) + print_format2_lock(s, lkb, r); + if (seq_has_overflowed(s)) goto out; } list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { - rv = print_format2_lock(s, lkb, r); - if (rv) + print_format2_lock(s, lkb, r); + if (seq_has_overflowed(s)) goto out; } list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue) { - rv = print_format2_lock(s, lkb, r); - if (rv) + print_format2_lock(s, lkb, r); + if (seq_has_overflowed(s)) goto out; } out: unlock_rsb(r); - return rv; } -static int print_format3_lock(struct seq_file *s, struct dlm_lkb *lkb, +static void print_format3_lock(struct seq_file *s, struct dlm_lkb *lkb, int rsb_lookup) { u64 xid = 0; - int rv; - if (lkb->lkb_flags & DLM_IFL_USER) { + if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) { if (lkb->lkb_ua) xid = lkb->lkb_ua->xid; } - rv = seq_printf(s, "lkb %x %d %x %u %llu %x %x %d %d %d %d %d %d %u %llu %llu\n", - lkb->lkb_id, - lkb->lkb_nodeid, - lkb->lkb_remid, - lkb->lkb_ownpid, - (unsigned long long)xid, - lkb->lkb_exflags, - lkb->lkb_flags, - lkb->lkb_status, - lkb->lkb_grmode, - lkb->lkb_rqmode, - lkb->lkb_last_bast.mode, - rsb_lookup, - lkb->lkb_wait_type, - lkb->lkb_lvbseq, - (unsigned long long)ktime_to_ns(lkb->lkb_timestamp), - (unsigned long long)ktime_to_ns(lkb->lkb_last_bast_time)); - return rv; + seq_printf(s, "lkb %x %d %x %u %llu %x %x %d %d %d %d %d %d %u %llu %llu\n", + lkb->lkb_id, + lkb->lkb_nodeid, + lkb->lkb_remid, + lkb->lkb_ownpid, + (unsigned long long)xid, + lkb->lkb_exflags, + dlm_iflags_val(lkb), + lkb->lkb_status, + lkb->lkb_grmode, + lkb->lkb_rqmode, + lkb->lkb_last_bast_cb_mode, + rsb_lookup, + lkb->lkb_wait_type, + lkb->lkb_lvbseq, + (unsigned long long)ktime_to_ns(lkb->lkb_timestamp), + (unsigned long long)ktime_to_ns(lkb->lkb_last_bast_time)); } -static int print_format3(struct dlm_rsb *r, struct seq_file *s) +static void print_format3(struct dlm_rsb *r, struct seq_file *s) { struct dlm_lkb *lkb; int i, lvblen = r->res_ls->ls_lvblen; int print_name = 1; - int rv; lock_rsb(r); - rv = seq_printf(s, "rsb %p %d %x %lx %d %d %u %d ", - r, - r->res_nodeid, - r->res_first_lkid, - r->res_flags, - !list_empty(&r->res_root_list), - !list_empty(&r->res_recover_list), - r->res_recover_locks_count, - r->res_length); - if (rv) + seq_printf(s, "rsb %p %d %x %lx %d %d %u %d ", + r, + r->res_nodeid, + r->res_first_lkid, + r->res_flags, + !list_empty(&r->res_root_list), + !list_empty(&r->res_recover_list), + r->res_recover_locks_count, + r->res_length); + if (seq_has_overflowed(s)) goto out; for (i = 0; i < r->res_length; i++) { @@ -292,7 +280,7 @@ static int print_format3(struct dlm_rsb *r, struct seq_file *s) print_name = 0; } - seq_printf(s, "%s", print_name ? "str " : "hex"); + seq_puts(s, print_name ? "str " : "hex"); for (i = 0; i < r->res_length; i++) { if (print_name) @@ -300,8 +288,8 @@ static int print_format3(struct dlm_rsb *r, struct seq_file *s) else seq_printf(s, " %02x", (unsigned char)r->res_name[i]); } - rv = seq_printf(s, "\n"); - if (rv) + seq_putc(s, '\n'); + if (seq_has_overflowed(s)) goto out; if (!r->res_lvbptr) @@ -311,65 +299,62 @@ static int print_format3(struct dlm_rsb *r, struct seq_file *s) for (i = 0; i < lvblen; i++) seq_printf(s, " %02x", (unsigned char)r->res_lvbptr[i]); - rv = seq_printf(s, "\n"); - if (rv) + seq_putc(s, '\n'); + if (seq_has_overflowed(s)) goto out; do_locks: list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { - rv = print_format3_lock(s, lkb, 0); - if (rv) + print_format3_lock(s, lkb, 0); + if (seq_has_overflowed(s)) goto out; } list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { - rv = print_format3_lock(s, lkb, 0); - if (rv) + print_format3_lock(s, lkb, 0); + if (seq_has_overflowed(s)) goto out; } list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue) { - rv = print_format3_lock(s, lkb, 0); - if (rv) + print_format3_lock(s, lkb, 0); + if (seq_has_overflowed(s)) goto out; } list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup) { - rv = print_format3_lock(s, lkb, 1); - if (rv) + print_format3_lock(s, lkb, 1); + if (seq_has_overflowed(s)) goto out; } out: unlock_rsb(r); - return rv; } -static int print_format4(struct dlm_rsb *r, struct seq_file *s) +static void print_format4(struct dlm_rsb *r, struct seq_file *s) { int our_nodeid = dlm_our_nodeid(); int print_name = 1; - int i, rv; + int i; lock_rsb(r); - rv = seq_printf(s, "rsb %p %d %d %d %d %lu %lx %d ", - r, - r->res_nodeid, - r->res_master_nodeid, - r->res_dir_nodeid, - our_nodeid, - r->res_toss_time, - r->res_flags, - r->res_length); - if (rv) - goto out; + seq_printf(s, "rsb %p %d %d %d %d %lu %lx %d ", + r, + r->res_nodeid, + r->res_master_nodeid, + r->res_dir_nodeid, + our_nodeid, + r->res_toss_time, + r->res_flags, + r->res_length); for (i = 0; i < r->res_length; i++) { if (!isascii(r->res_name[i]) || !isprint(r->res_name[i])) print_name = 0; } - seq_printf(s, "%s", print_name ? "str " : "hex"); + seq_puts(s, print_name ? "str " : "hex"); for (i = 0; i < r->res_length; i++) { if (print_name) @@ -377,220 +362,79 @@ static int print_format4(struct dlm_rsb *r, struct seq_file *s) else seq_printf(s, " %02x", (unsigned char)r->res_name[i]); } - rv = seq_printf(s, "\n"); - out: + seq_putc(s, '\n'); unlock_rsb(r); - return rv; } -struct rsbtbl_iter { - struct dlm_rsb *rsb; - unsigned bucket; - int format; - int header; -}; +static const struct seq_operations format1_seq_ops; +static const struct seq_operations format2_seq_ops; +static const struct seq_operations format3_seq_ops; +static const struct seq_operations format4_seq_ops; -/* seq_printf returns -1 if the buffer is full, and 0 otherwise. - If the buffer is full, seq_printf can be called again, but it - does nothing and just returns -1. So, the these printing routines - periodically check the return value to avoid wasting too much time - trying to print to a full buffer. */ +/* + * If the buffer is full, seq_printf can be called again, but it + * does nothing. So, the these printing routines periodically check + * seq_has_overflowed to avoid wasting too much time trying to print to + * a full buffer. + */ static int table_seq_show(struct seq_file *seq, void *iter_ptr) { - struct rsbtbl_iter *ri = iter_ptr; - int rv = 0; - - switch (ri->format) { - case 1: - rv = print_format1(ri->rsb, seq); - break; - case 2: - if (ri->header) { - seq_printf(seq, "id nodeid remid pid xid exflags " - "flags sts grmode rqmode time_ms " - "r_nodeid r_len r_name\n"); - ri->header = 0; - } - rv = print_format2(ri->rsb, seq); - break; - case 3: - if (ri->header) { - seq_printf(seq, "version rsb 1.1 lvb 1.1 lkb 1.1\n"); - ri->header = 0; - } - rv = print_format3(ri->rsb, seq); - break; - case 4: - if (ri->header) { - seq_printf(seq, "version 4 rsb 2\n"); - ri->header = 0; - } - rv = print_format4(ri->rsb, seq); - break; - } + struct dlm_rsb *rsb = list_entry(iter_ptr, struct dlm_rsb, res_slow_list); - return rv; -} + if (seq->op == &format1_seq_ops) + print_format1(rsb, seq); + else if (seq->op == &format2_seq_ops) + print_format2(rsb, seq); + else if (seq->op == &format3_seq_ops) + print_format3(rsb, seq); + else if (seq->op == &format4_seq_ops) + print_format4(rsb, seq); -static const struct seq_operations format1_seq_ops; -static const struct seq_operations format2_seq_ops; -static const struct seq_operations format3_seq_ops; -static const struct seq_operations format4_seq_ops; + return 0; +} static void *table_seq_start(struct seq_file *seq, loff_t *pos) { - struct rb_root *tree; - struct rb_node *node; struct dlm_ls *ls = seq->private; - struct rsbtbl_iter *ri; - struct dlm_rsb *r; - loff_t n = *pos; - unsigned bucket, entry; - int toss = (seq->op == &format4_seq_ops); - - bucket = n >> 32; - entry = n & ((1LL << 32) - 1); - - if (bucket >= ls->ls_rsbtbl_size) - return NULL; - - ri = kzalloc(sizeof(struct rsbtbl_iter), GFP_NOFS); - if (!ri) - return NULL; - if (n == 0) - ri->header = 1; - if (seq->op == &format1_seq_ops) - ri->format = 1; - if (seq->op == &format2_seq_ops) - ri->format = 2; - if (seq->op == &format3_seq_ops) - ri->format = 3; - if (seq->op == &format4_seq_ops) - ri->format = 4; - - tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; - - spin_lock(&ls->ls_rsbtbl[bucket].lock); - if (!RB_EMPTY_ROOT(tree)) { - for (node = rb_first(tree); node; node = rb_next(node)) { - r = rb_entry(node, struct dlm_rsb, res_hashnode); - if (!entry--) { - dlm_hold_rsb(r); - ri->rsb = r; - ri->bucket = bucket; - spin_unlock(&ls->ls_rsbtbl[bucket].lock); - return ri; - } - } + struct list_head *list; + + if (!*pos) { + if (seq->op == &format2_seq_ops) + seq_puts(seq, "id nodeid remid pid xid exflags flags sts grmode rqmode time_ms r_nodeid r_len r_name\n"); + else if (seq->op == &format3_seq_ops) + seq_puts(seq, "rsb ptr nodeid first_lkid flags !root_list_empty !recover_list_empty recover_locks_count len\n"); + else if (seq->op == &format4_seq_ops) + seq_puts(seq, "rsb ptr nodeid master_nodeid dir_nodeid our_nodeid toss_time flags len str|hex name\n"); } - spin_unlock(&ls->ls_rsbtbl[bucket].lock); - - /* - * move to the first rsb in the next non-empty bucket - */ - /* zero the entry */ - n &= ~((1LL << 32) - 1); - - while (1) { - bucket++; - n += 1LL << 32; + if (seq->op == &format4_seq_ops) + list = &ls->ls_slow_inactive; + else + list = &ls->ls_slow_active; - if (bucket >= ls->ls_rsbtbl_size) { - kfree(ri); - return NULL; - } - tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; - - spin_lock(&ls->ls_rsbtbl[bucket].lock); - if (!RB_EMPTY_ROOT(tree)) { - node = rb_first(tree); - r = rb_entry(node, struct dlm_rsb, res_hashnode); - dlm_hold_rsb(r); - ri->rsb = r; - ri->bucket = bucket; - spin_unlock(&ls->ls_rsbtbl[bucket].lock); - *pos = n; - return ri; - } - spin_unlock(&ls->ls_rsbtbl[bucket].lock); - } + read_lock_bh(&ls->ls_rsbtbl_lock); + return seq_list_start(list, *pos); } static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos) { struct dlm_ls *ls = seq->private; - struct rsbtbl_iter *ri = iter_ptr; - struct rb_root *tree; - struct rb_node *next; - struct dlm_rsb *r, *rp; - loff_t n = *pos; - unsigned bucket; - int toss = (seq->op == &format4_seq_ops); - - bucket = n >> 32; - - /* - * move to the next rsb in the same bucket - */ - - spin_lock(&ls->ls_rsbtbl[bucket].lock); - rp = ri->rsb; - next = rb_next(&rp->res_hashnode); - - if (next) { - r = rb_entry(next, struct dlm_rsb, res_hashnode); - dlm_hold_rsb(r); - ri->rsb = r; - spin_unlock(&ls->ls_rsbtbl[bucket].lock); - dlm_put_rsb(rp); - ++*pos; - return ri; - } - spin_unlock(&ls->ls_rsbtbl[bucket].lock); - dlm_put_rsb(rp); + struct list_head *list; - /* - * move to the first rsb in the next non-empty bucket - */ - - /* zero the entry */ - n &= ~((1LL << 32) - 1); - - while (1) { - bucket++; - n += 1LL << 32; + if (seq->op == &format4_seq_ops) + list = &ls->ls_slow_inactive; + else + list = &ls->ls_slow_active; - if (bucket >= ls->ls_rsbtbl_size) { - kfree(ri); - return NULL; - } - tree = toss ? &ls->ls_rsbtbl[bucket].toss : &ls->ls_rsbtbl[bucket].keep; - - spin_lock(&ls->ls_rsbtbl[bucket].lock); - if (!RB_EMPTY_ROOT(tree)) { - next = rb_first(tree); - r = rb_entry(next, struct dlm_rsb, res_hashnode); - dlm_hold_rsb(r); - ri->rsb = r; - ri->bucket = bucket; - spin_unlock(&ls->ls_rsbtbl[bucket].lock); - *pos = n; - return ri; - } - spin_unlock(&ls->ls_rsbtbl[bucket].lock); - } + return seq_list_next(iter_ptr, list, pos); } static void table_seq_stop(struct seq_file *seq, void *iter_ptr) { - struct rsbtbl_iter *ri = iter_ptr; + struct dlm_ls *ls = seq->private; - if (ri) { - dlm_put_rsb(ri->rsb); - kfree(ri); - } + read_unlock_bh(&ls->ls_rsbtbl_lock); } static const struct seq_operations format1_seq_ops = { @@ -626,20 +470,83 @@ static const struct file_operations format2_fops; static const struct file_operations format3_fops; static const struct file_operations format4_fops; -static int table_open(struct inode *inode, struct file *file) +static int table_open1(struct inode *inode, struct file *file) +{ + struct seq_file *seq; + int ret; + + ret = seq_open(file, &format1_seq_ops); + if (ret) + return ret; + + seq = file->private_data; + seq->private = inode->i_private; /* the dlm_ls */ + return 0; +} + +static int table_open2(struct inode *inode, struct file *file) +{ + struct seq_file *seq; + int ret; + + ret = seq_open(file, &format2_seq_ops); + if (ret) + return ret; + + seq = file->private_data; + seq->private = inode->i_private; /* the dlm_ls */ + return 0; +} + +static ssize_t table_write2(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *seq = file->private_data; + int n, len, lkb_nodeid, lkb_status, error; + char name[DLM_RESNAME_MAXLEN + 1] = {}; + struct dlm_ls *ls = seq->private; + unsigned int lkb_flags; + char buf[256] = {}; + uint32_t lkb_id; + + if (copy_from_user(buf, user_buf, + min_t(size_t, sizeof(buf) - 1, count))) + return -EFAULT; + + n = sscanf(buf, "%x %" __stringify(DLM_RESNAME_MAXLEN) "s %x %d %d", + &lkb_id, name, &lkb_flags, &lkb_nodeid, &lkb_status); + if (n != 5) + return -EINVAL; + + len = strnlen(name, DLM_RESNAME_MAXLEN); + error = dlm_debug_add_lkb(ls, lkb_id, name, len, lkb_flags, + lkb_nodeid, lkb_status); + if (error) + return error; + + return count; +} + +static int table_open3(struct inode *inode, struct file *file) { struct seq_file *seq; - int ret = -1; + int ret; - if (file->f_op == &format1_fops) - ret = seq_open(file, &format1_seq_ops); - else if (file->f_op == &format2_fops) - ret = seq_open(file, &format2_seq_ops); - else if (file->f_op == &format3_fops) - ret = seq_open(file, &format3_seq_ops); - else if (file->f_op == &format4_fops) - ret = seq_open(file, &format4_seq_ops); + ret = seq_open(file, &format3_seq_ops); + if (ret) + return ret; + seq = file->private_data; + seq->private = inode->i_private; /* the dlm_ls */ + return 0; +} + +static int table_open4(struct inode *inode, struct file *file) +{ + struct seq_file *seq; + int ret; + + ret = seq_open(file, &format4_seq_ops); if (ret) return ret; @@ -650,7 +557,7 @@ static int table_open(struct inode *inode, struct file *file) static const struct file_operations format1_fops = { .owner = THIS_MODULE, - .open = table_open, + .open = table_open1, .read = seq_read, .llseek = seq_lseek, .release = seq_release @@ -658,15 +565,16 @@ static const struct file_operations format1_fops = { static const struct file_operations format2_fops = { .owner = THIS_MODULE, - .open = table_open, + .open = table_open2, .read = seq_read, + .write = table_write2, .llseek = seq_lseek, .release = seq_release }; static const struct file_operations format3_fops = { .owner = THIS_MODULE, - .open = table_open, + .open = table_open3, .read = seq_read, .llseek = seq_lseek, .release = seq_release @@ -674,7 +582,7 @@ static const struct file_operations format3_fops = { static const struct file_operations format4_fops = { .owner = THIS_MODULE, - .open = table_open, + .open = table_open4, .read = seq_read, .llseek = seq_lseek, .release = seq_release @@ -691,7 +599,13 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf, size_t len = DLM_DEBUG_BUF_LEN, pos = 0, ret, rv; mutex_lock(&debug_buf_lock); - mutex_lock(&ls->ls_waiters_mutex); + ret = dlm_lock_recovery_try(ls); + if (!ret) { + rv = -EAGAIN; + goto out; + } + + spin_lock_bh(&ls->ls_waiters_lock); memset(debug_buf, 0, sizeof(debug_buf)); list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) { @@ -702,37 +616,153 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf, break; pos += ret; } - mutex_unlock(&ls->ls_waiters_mutex); + spin_unlock_bh(&ls->ls_waiters_lock); + dlm_unlock_recovery(ls); rv = simple_read_from_buffer(userbuf, count, ppos, debug_buf, pos); +out: mutex_unlock(&debug_buf_lock); return rv; } +static ssize_t waiters_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dlm_ls *ls = file->private_data; + int mstype, to_nodeid; + char buf[128] = {}; + uint32_t lkb_id; + int n, error; + + if (copy_from_user(buf, user_buf, + min_t(size_t, sizeof(buf) - 1, count))) + return -EFAULT; + + n = sscanf(buf, "%x %d %d", &lkb_id, &mstype, &to_nodeid); + if (n != 3) + return -EINVAL; + + error = dlm_lock_recovery_try(ls); + if (!error) + return -EAGAIN; + + error = dlm_debug_add_lkb_to_waiters(ls, lkb_id, mstype, to_nodeid); + dlm_unlock_recovery(ls); + if (error) + return error; + + return count; +} + static const struct file_operations waiters_fops = { .owner = THIS_MODULE, .open = simple_open, .read = waiters_read, + .write = waiters_write, .llseek = default_llseek, }; void dlm_delete_debug_file(struct dlm_ls *ls) { - if (ls->ls_debug_rsb_dentry) - debugfs_remove(ls->ls_debug_rsb_dentry); - if (ls->ls_debug_waiters_dentry) - debugfs_remove(ls->ls_debug_waiters_dentry); - if (ls->ls_debug_locks_dentry) - debugfs_remove(ls->ls_debug_locks_dentry); - if (ls->ls_debug_all_dentry) - debugfs_remove(ls->ls_debug_all_dentry); - if (ls->ls_debug_toss_dentry) - debugfs_remove(ls->ls_debug_toss_dentry); + debugfs_remove(ls->ls_debug_rsb_dentry); + debugfs_remove(ls->ls_debug_waiters_dentry); + debugfs_remove(ls->ls_debug_locks_dentry); + debugfs_remove(ls->ls_debug_all_dentry); + debugfs_remove(ls->ls_debug_toss_dentry); + debugfs_remove(ls->ls_debug_queued_asts_dentry); +} + +static int dlm_state_show(struct seq_file *file, void *offset) +{ + seq_printf(file, "%s\n", dlm_midcomms_state(file->private)); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(dlm_state); + +static int dlm_flags_show(struct seq_file *file, void *offset) +{ + seq_printf(file, "%lu\n", dlm_midcomms_flags(file->private)); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(dlm_flags); + +static int dlm_send_queue_cnt_show(struct seq_file *file, void *offset) +{ + seq_printf(file, "%d\n", dlm_midcomms_send_queue_cnt(file->private)); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(dlm_send_queue_cnt); + +static int dlm_version_show(struct seq_file *file, void *offset) +{ + seq_printf(file, "0x%08x\n", dlm_midcomms_version(file->private)); + return 0; } +DEFINE_SHOW_ATTRIBUTE(dlm_version); -int dlm_create_debug_file(struct dlm_ls *ls) +static ssize_t dlm_rawmsg_write(struct file *fp, const char __user *user_buf, + size_t count, loff_t *ppos) { - char name[DLM_LOCKSPACE_LEN+8]; + void *buf; + int ret; + + if (count > PAGE_SIZE || count < sizeof(struct dlm_header)) + return -EINVAL; + + buf = kmalloc(PAGE_SIZE, GFP_NOFS); + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, user_buf, count)) { + ret = -EFAULT; + goto out; + } + + ret = dlm_midcomms_rawmsg_send(fp->private_data, buf, count); + if (ret) + goto out; + + kfree(buf); + return count; + +out: + kfree(buf); + return ret; +} + +static const struct file_operations dlm_rawmsg_fops = { + .open = simple_open, + .write = dlm_rawmsg_write, +}; + +void *dlm_create_debug_comms_file(int nodeid, void *data) +{ + struct dentry *d_node; + char name[256]; + + memset(name, 0, sizeof(name)); + snprintf(name, 256, "%d", nodeid); + + d_node = debugfs_create_dir(name, dlm_comms); + debugfs_create_file("state", 0444, d_node, data, &dlm_state_fops); + debugfs_create_file("flags", 0444, d_node, data, &dlm_flags_fops); + debugfs_create_file("send_queue_count", 0444, d_node, data, + &dlm_send_queue_cnt_fops); + debugfs_create_file("version", 0444, d_node, data, &dlm_version_fops); + debugfs_create_file("rawmsg", 0200, d_node, data, &dlm_rawmsg_fops); + + return d_node; +} + +void dlm_delete_debug_comms_file(void *ctx) +{ + debugfs_remove(ctx); +} + +void dlm_create_debug_file(struct dlm_ls *ls) +{ + /* Reserve enough space for the longest file name */ + char name[DLM_LOCKSPACE_LEN + sizeof("_queued_asts")]; /* format 1 */ @@ -741,71 +771,51 @@ int dlm_create_debug_file(struct dlm_ls *ls) dlm_root, ls, &format1_fops); - if (!ls->ls_debug_rsb_dentry) - goto fail; /* format 2 */ - memset(name, 0, sizeof(name)); - snprintf(name, DLM_LOCKSPACE_LEN+8, "%s_locks", ls->ls_name); + snprintf(name, sizeof(name), "%s_locks", ls->ls_name); ls->ls_debug_locks_dentry = debugfs_create_file(name, - S_IFREG | S_IRUGO, + 0644, dlm_root, ls, &format2_fops); - if (!ls->ls_debug_locks_dentry) - goto fail; /* format 3 */ - memset(name, 0, sizeof(name)); - snprintf(name, DLM_LOCKSPACE_LEN+8, "%s_all", ls->ls_name); + snprintf(name, sizeof(name), "%s_all", ls->ls_name); ls->ls_debug_all_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, dlm_root, ls, &format3_fops); - if (!ls->ls_debug_all_dentry) - goto fail; /* format 4 */ - memset(name, 0, sizeof(name)); - snprintf(name, DLM_LOCKSPACE_LEN+8, "%s_toss", ls->ls_name); + snprintf(name, sizeof(name), "%s_toss", ls->ls_name); ls->ls_debug_toss_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, dlm_root, ls, &format4_fops); - if (!ls->ls_debug_toss_dentry) - goto fail; - memset(name, 0, sizeof(name)); - snprintf(name, DLM_LOCKSPACE_LEN+8, "%s_waiters", ls->ls_name); + snprintf(name, sizeof(name), "%s_waiters", ls->ls_name); ls->ls_debug_waiters_dentry = debugfs_create_file(name, - S_IFREG | S_IRUGO, + 0644, dlm_root, ls, &waiters_fops); - if (!ls->ls_debug_waiters_dentry) - goto fail; - - return 0; - - fail: - dlm_delete_debug_file(ls); - return -ENOMEM; } -int __init dlm_register_debugfs(void) +void __init dlm_register_debugfs(void) { mutex_init(&debug_buf_lock); dlm_root = debugfs_create_dir("dlm", NULL); - return dlm_root ? 0 : -ENOMEM; + dlm_comms = debugfs_create_dir("comms", dlm_root); } void dlm_unregister_debugfs(void) diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c index 278a75cda446..b1ab0adbd9d0 100644 --- a/fs/dlm/dir.c +++ b/fs/dlm/dir.c @@ -1,12 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -49,18 +47,16 @@ int dlm_dir_nodeid(struct dlm_rsb *r) return r->res_dir_nodeid; } -void dlm_recover_dir_nodeid(struct dlm_ls *ls) +void dlm_recover_dir_nodeid(struct dlm_ls *ls, const struct list_head *root_list) { struct dlm_rsb *r; - down_read(&ls->ls_root_sem); - list_for_each_entry(r, &ls->ls_root_list, res_root_list) { + list_for_each_entry(r, root_list, res_root_list) { r->res_dir_nodeid = dlm_hash2nodeid(ls, r->res_hash); } - up_read(&ls->ls_root_sem); } -int dlm_recover_directory(struct dlm_ls *ls) +int dlm_recover_directory(struct dlm_ls *ls, uint64_t seq) { struct dlm_member *memb; char *b, *last_name = NULL; @@ -68,7 +64,7 @@ int dlm_recover_directory(struct dlm_ls *ls) uint16_t namelen; unsigned int count = 0, count_match = 0, count_bad = 0, count_add = 0; - log_debug(ls, "dlm_recover_directory"); + log_rinfo(ls, "dlm_recover_directory"); if (dlm_no_directory(ls)) goto out_status; @@ -86,12 +82,13 @@ int dlm_recover_directory(struct dlm_ls *ls) for (;;) { int left; - error = dlm_recovery_stopped(ls); - if (error) + if (dlm_recovery_stopped(ls)) { + error = -EINTR; goto out_free; + } error = dlm_rcom_names(ls, memb->nodeid, - last_name, last_len); + last_name, last_len, seq); if (error) goto out_free; @@ -102,7 +99,7 @@ int dlm_recover_directory(struct dlm_ls *ls) */ b = ls->ls_recover_buf->rc_buf; - left = ls->ls_recover_buf->rc_header.h_length; + left = le16_to_cpu(ls->ls_recover_buf->rc_header.h_length); left -= sizeof(struct dlm_rcom); for (;;) { @@ -189,7 +186,7 @@ int dlm_recover_directory(struct dlm_ls *ls) error = 0; dlm_set_recover_status(ls, DLM_RS_DIR); - log_debug(ls, "dlm_recover_directory %u in %u new", + log_rinfo(ls, "dlm_recover_directory %u in %u new", count, count_add); out_free: kfree(last_name); @@ -197,70 +194,156 @@ int dlm_recover_directory(struct dlm_ls *ls) return error; } -static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, char *name, int len) +static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, const char *name, + int len) { struct dlm_rsb *r; - uint32_t hash, bucket; int rv; - hash = jhash(name, len, 0); - bucket = hash & (ls->ls_rsbtbl_size - 1); - - spin_lock(&ls->ls_rsbtbl[bucket].lock); - rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, &r); - if (rv) - rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].toss, - name, len, &r); - spin_unlock(&ls->ls_rsbtbl[bucket].lock); - + read_lock_bh(&ls->ls_rsbtbl_lock); + rv = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); + read_unlock_bh(&ls->ls_rsbtbl_lock); if (!rv) return r; - down_read(&ls->ls_root_sem); - list_for_each_entry(r, &ls->ls_root_list, res_root_list) { + list_for_each_entry(r, &ls->ls_masters_list, res_masters_list) { if (len == r->res_length && !memcmp(name, r->res_name, len)) { - up_read(&ls->ls_root_sem); log_debug(ls, "find_rsb_root revert to root_list %s", r->res_name); return r; } } - up_read(&ls->ls_root_sem); return NULL; } +struct dlm_dir_dump { + /* init values to match if whole + * dump fits to one seq. Sanity check only. + */ + uint64_t seq_init; + uint64_t nodeid_init; + /* compare local pointer with last lookup, + * just a sanity check. + */ + struct list_head *last; + + unsigned int sent_res; /* for log info */ + unsigned int sent_msg; /* for log info */ + + struct list_head list; +}; + +static void drop_dir_ctx(struct dlm_ls *ls, int nodeid) +{ + struct dlm_dir_dump *dd, *safe; + + write_lock_bh(&ls->ls_dir_dump_lock); + list_for_each_entry_safe(dd, safe, &ls->ls_dir_dump_list, list) { + if (dd->nodeid_init == nodeid) { + log_error(ls, "drop dump seq %llu", + (unsigned long long)dd->seq_init); + list_del(&dd->list); + kfree(dd); + } + } + write_unlock_bh(&ls->ls_dir_dump_lock); +} + +static struct dlm_dir_dump *lookup_dir_dump(struct dlm_ls *ls, int nodeid) +{ + struct dlm_dir_dump *iter, *dd = NULL; + + read_lock_bh(&ls->ls_dir_dump_lock); + list_for_each_entry(iter, &ls->ls_dir_dump_list, list) { + if (iter->nodeid_init == nodeid) { + dd = iter; + break; + } + } + read_unlock_bh(&ls->ls_dir_dump_lock); + + return dd; +} + +static struct dlm_dir_dump *init_dir_dump(struct dlm_ls *ls, int nodeid) +{ + struct dlm_dir_dump *dd; + + dd = lookup_dir_dump(ls, nodeid); + if (dd) { + log_error(ls, "found ongoing dir dump for node %d, will drop it", + nodeid); + drop_dir_ctx(ls, nodeid); + } + + dd = kzalloc(sizeof(*dd), GFP_ATOMIC); + if (!dd) + return NULL; + + dd->seq_init = ls->ls_recover_seq; + dd->nodeid_init = nodeid; + + write_lock_bh(&ls->ls_dir_dump_lock); + list_add(&dd->list, &ls->ls_dir_dump_list); + write_unlock_bh(&ls->ls_dir_dump_lock); + + return dd; +} + /* Find the rsb where we left off (or start again), then send rsb names for rsb's we're master of and whose directory node matches the requesting node. inbuf is the rsb name last sent, inlen is the name's length */ -void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen, +void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen, char *outbuf, int outlen, int nodeid) { struct list_head *list; struct dlm_rsb *r; int offset = 0, dir_nodeid; + struct dlm_dir_dump *dd; __be16 be_namelen; - down_read(&ls->ls_root_sem); + read_lock_bh(&ls->ls_masters_lock); if (inlen > 1) { + dd = lookup_dir_dump(ls, nodeid); + if (!dd) { + log_error(ls, "failed to lookup dir dump context nodeid: %d", + nodeid); + goto out; + } + + /* next chunk in dump */ r = find_rsb_root(ls, inbuf, inlen); if (!r) { - inbuf[inlen - 1] = '\0'; - log_error(ls, "copy_master_names from %d start %d %s", - nodeid, inlen, inbuf); + log_error(ls, "copy_master_names from %d start %d %.*s", + nodeid, inlen, inlen, inbuf); + goto out; + } + list = r->res_masters_list.next; + + /* sanity checks */ + if (dd->last != &r->res_masters_list || + dd->seq_init != ls->ls_recover_seq) { + log_error(ls, "failed dir dump sanity check seq_init: %llu seq: %llu", + (unsigned long long)dd->seq_init, + (unsigned long long)ls->ls_recover_seq); goto out; } - list = r->res_root_list.next; } else { - list = ls->ls_root_list.next; - } + dd = init_dir_dump(ls, nodeid); + if (!dd) { + log_error(ls, "failed to allocate dir dump context"); + goto out; + } - for (offset = 0; list != &ls->ls_root_list; list = list->next) { - r = list_entry(list, struct dlm_rsb, res_root_list); - if (r->res_nodeid) - continue; + /* start dump */ + list = ls->ls_masters_list.next; + dd->last = list; + } + for (offset = 0; list != &ls->ls_masters_list; list = list->next) { + r = list_entry(list, struct dlm_rsb, res_masters_list); dir_nodeid = dlm_dir_nodeid(r); if (dir_nodeid != nodeid) continue; @@ -278,7 +361,7 @@ void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen, be_namelen = cpu_to_be16(0); memcpy(outbuf + offset, &be_namelen, sizeof(__be16)); offset += sizeof(__be16); - ls->ls_recover_dir_sent_msg++; + dd->sent_msg++; goto out; } @@ -287,7 +370,8 @@ void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen, offset += sizeof(__be16); memcpy(outbuf + offset, r->res_name, r->res_length); offset += r->res_length; - ls->ls_recover_dir_sent_res++; + dd->sent_res++; + dd->last = list; } /* @@ -295,14 +379,22 @@ void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen, * terminating record. */ - if ((list == &ls->ls_root_list) && + if ((list == &ls->ls_masters_list) && (offset + sizeof(uint16_t) <= outlen)) { + /* end dump */ be_namelen = cpu_to_be16(0xFFFF); memcpy(outbuf + offset, &be_namelen, sizeof(__be16)); offset += sizeof(__be16); - ls->ls_recover_dir_sent_msg++; + dd->sent_msg++; + log_rinfo(ls, "dlm_recover_directory nodeid %d sent %u res out %u messages", + nodeid, dd->sent_res, dd->sent_msg); + + write_lock_bh(&ls->ls_dir_dump_lock); + list_del_init(&dd->list); + write_unlock_bh(&ls->ls_dir_dump_lock); + kfree(dd); } out: - up_read(&ls->ls_root_sem); + read_unlock_bh(&ls->ls_masters_lock); } diff --git a/fs/dlm/dir.h b/fs/dlm/dir.h index 417506344456..5b2a7ee3762d 100644 --- a/fs/dlm/dir.h +++ b/fs/dlm/dir.h @@ -1,12 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -16,10 +14,11 @@ int dlm_dir_nodeid(struct dlm_rsb *rsb); int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash); -void dlm_recover_dir_nodeid(struct dlm_ls *ls); -int dlm_recover_directory(struct dlm_ls *ls); -void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen, - char *outbuf, int outlen, int nodeid); +void dlm_recover_dir_nodeid(struct dlm_ls *ls, + const struct list_head *root_list); +int dlm_recover_directory(struct dlm_ls *ls, uint64_t seq); +void dlm_copy_master_names(struct dlm_ls *ls, const char *inbuf, int inlen, + char *outbuf, int outlen, int nodeid); #endif /* __DIR_DOT_H__ */ diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h index e7665c31f7b1..d534a4bc162b 100644 --- a/fs/dlm/dlm_internal.h +++ b/fs/dlm/dlm_internal.h @@ -1,12 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -18,7 +16,7 @@ * This is the main header file to be included in each DLM source file. */ -#include <linux/module.h> +#include <uapi/linux/dlm_device.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/types.h> @@ -36,20 +34,15 @@ #include <linux/kernel.h> #include <linux/jhash.h> #include <linux/miscdevice.h> +#include <linux/rhashtable.h> #include <linux/mutex.h> -#include <linux/idr.h> +#include <linux/xarray.h> #include <linux/ratelimit.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include <linux/dlm.h> #include "config.h" -/* Size of the temp buffer midcomms allocates on the stack. - We try to make this large enough so most messages fit. - FIXME: should sctp make this unnecessary? */ - -#define DLM_INBUF_LEN 148 - struct dlm_ls; struct dlm_lkb; struct dlm_rsb; @@ -60,12 +53,25 @@ struct dlm_header; struct dlm_message; struct dlm_rcom; struct dlm_mhandle; +struct dlm_msg; #define log_print(fmt, args...) \ printk(KERN_ERR "dlm: "fmt"\n" , ##args) +#define log_print_ratelimited(fmt, args...) \ + printk_ratelimited(KERN_ERR "dlm: "fmt"\n", ##args) #define log_error(ls, fmt, args...) \ printk(KERN_ERR "dlm: %s: " fmt "\n", (ls)->ls_name , ##args) +#define log_rinfo(ls, fmt, args...) \ +do { \ + if (dlm_config.ci_log_info) \ + printk(KERN_INFO "dlm: %s: " fmt "\n", \ + (ls)->ls_name, ##args); \ + else if (dlm_config.ci_log_debug) \ + printk(KERN_DEBUG "dlm: %s: " fmt "\n", \ + (ls)->ls_name , ##args); \ +} while (0) + #define log_debug(ls, fmt, args...) \ do { \ if (dlm_config.ci_log_debug) \ @@ -90,22 +96,10 @@ do { \ __LINE__, __FILE__, #x, jiffies); \ {do} \ printk("\n"); \ - BUG(); \ panic("DLM: Record message above and reboot.\n"); \ } \ } - -#define DLM_RTF_SHRINK 0x00000001 - -struct dlm_rsbtable { - struct rb_root keep; - struct rb_root toss; - spinlock_t lock; - uint32_t flags; -}; - - /* * Lockspace member (per node in a ls) */ @@ -142,7 +136,6 @@ struct dlm_args { void (*bastfn) (void *astparam, int mode); int mode; struct dlm_lksb *lksb; - unsigned long timeout; }; @@ -192,33 +185,69 @@ struct dlm_args { #define DLM_LKSTS_GRANTED 2 #define DLM_LKSTS_CONVERT 3 -/* lkb_flags */ +/* lkb_iflags */ + +#define DLM_IFL_MSTCPY_BIT 16 +#define __DLM_IFL_MIN_BIT DLM_IFL_MSTCPY_BIT +#define DLM_IFL_RESEND_BIT 17 +#define DLM_IFL_DEAD_BIT 18 +#define DLM_IFL_OVERLAP_UNLOCK_BIT 19 +#define DLM_IFL_OVERLAP_CANCEL_BIT 20 +#define DLM_IFL_ENDOFLIFE_BIT 21 +#define DLM_IFL_DEADLOCK_CANCEL_BIT 24 +#define __DLM_IFL_MAX_BIT DLM_IFL_DEADLOCK_CANCEL_BIT -#define DLM_IFL_MSTCPY 0x00010000 -#define DLM_IFL_RESEND 0x00020000 -#define DLM_IFL_DEAD 0x00040000 -#define DLM_IFL_OVERLAP_UNLOCK 0x00080000 -#define DLM_IFL_OVERLAP_CANCEL 0x00100000 -#define DLM_IFL_ENDOFLIFE 0x00200000 -#define DLM_IFL_WATCH_TIMEWARN 0x00400000 -#define DLM_IFL_TIMEOUT_CANCEL 0x00800000 -#define DLM_IFL_DEADLOCK_CANCEL 0x01000000 -#define DLM_IFL_STUB_MS 0x02000000 /* magic number for m_flags */ -#define DLM_IFL_USER 0x00000001 -#define DLM_IFL_ORPHAN 0x00000002 +/* lkb_dflags */ -#define DLM_CALLBACKS_SIZE 6 +#define DLM_DFL_USER_BIT 0 +#define __DLM_DFL_MIN_BIT DLM_DFL_USER_BIT +#define DLM_DFL_ORPHAN_BIT 1 +#define __DLM_DFL_MAX_BIT DLM_DFL_ORPHAN_BIT #define DLM_CB_CAST 0x00000001 #define DLM_CB_BAST 0x00000002 -#define DLM_CB_SKIP 0x00000004 + +/* much of this is just saving user space pointers associated with the + * lock that we pass back to the user lib with an ast + */ + +struct dlm_user_args { + struct dlm_user_proc *proc; /* each process that opens the lockspace + * device has private data + * (dlm_user_proc) on the struct file, + * the process's locks point back to it + */ + struct dlm_lksb lksb; + struct dlm_lksb __user *user_lksb; + void __user *castparam; + void __user *castaddr; + void __user *bastparam; + void __user *bastaddr; + uint64_t xid; +}; struct dlm_callback { - uint64_t seq; uint32_t flags; /* DLM_CBF_ */ int sb_status; /* copy to lksb status */ uint8_t sb_flags; /* copy to lksb flags */ int8_t mode; /* rq mode of bast, gr mode of cast */ + bool copy_lvb; + struct dlm_lksb *lkb_lksb; + unsigned char lvbptr[DLM_USER_LVB_LEN]; + + union { + void *astparam; /* caller's ast arg */ + struct dlm_user_args ua; + }; + struct work_struct work; + void (*bastfn)(void *astparam, int mode); + void (*astfn)(void *astparam); + char res_name[DLM_RESNAME_MAXLEN]; + size_t res_length; + uint32_t ls_id; + uint32_t lkb_id; + + struct list_head list; }; struct dlm_lkb { @@ -229,8 +258,9 @@ struct dlm_lkb { uint32_t lkb_id; /* our lock ID */ uint32_t lkb_remid; /* lock ID on remote partner */ uint32_t lkb_exflags; /* external flags from caller */ - uint32_t lkb_sbflags; /* lksb flags */ - uint32_t lkb_flags; /* internal flags */ + unsigned long lkb_sbflags; /* lksb flags */ + unsigned long lkb_dflags; /* distributed flags */ + unsigned long lkb_iflags; /* internal flags */ uint32_t lkb_lvbseq; /* lvb sequence number */ int8_t lkb_status; /* granted, waiting, convert */ @@ -246,17 +276,12 @@ struct dlm_lkb { struct list_head lkb_rsb_lookup; /* waiting for rsb lookup */ struct list_head lkb_wait_reply; /* waiting for remote reply */ struct list_head lkb_ownqueue; /* list of locks for a process */ - struct list_head lkb_time_list; ktime_t lkb_timestamp; - ktime_t lkb_wait_time; - unsigned long lkb_timeout_cs; - - struct mutex lkb_cb_mutex; - struct work_struct lkb_cb_work; - struct list_head lkb_cb_list; /* for ls_cb_delay or proc->asts */ - struct dlm_callback lkb_callbacks[DLM_CALLBACKS_SIZE]; - struct dlm_callback lkb_last_cast; - struct dlm_callback lkb_last_bast; + + int8_t lkb_last_cast_cb_mode; + int8_t lkb_last_bast_cb_mode; + int8_t lkb_last_cb_mode; + uint8_t lkb_last_cb_flags; ktime_t lkb_last_cast_time; /* for debugging */ ktime_t lkb_last_bast_time; /* for debugging */ @@ -270,6 +295,7 @@ struct dlm_lkb { void *lkb_astparam; /* caller's ast arg */ struct dlm_user_args *lkb_ua; }; + struct rcu_head rcu; }; /* @@ -285,30 +311,30 @@ struct dlm_lkb { struct dlm_rsb { struct dlm_ls *res_ls; /* the lockspace */ struct kref res_ref; - struct mutex res_mutex; + spinlock_t res_lock; unsigned long res_flags; int res_length; /* length of rsb name */ int res_nodeid; int res_master_nodeid; int res_dir_nodeid; - int res_id; /* for ls_recover_idr */ + unsigned long res_id; /* for ls_recover_xa */ uint32_t res_lvbseq; uint32_t res_hash; - uint32_t res_bucket; /* rsbtbl */ unsigned long res_toss_time; uint32_t res_first_lkid; struct list_head res_lookup; /* lkbs waiting on first */ - union { - struct list_head res_hashchain; - struct rb_node res_hashnode; /* rsbtbl */ - }; + struct rhash_head res_node; /* rsbtbl */ struct list_head res_grantqueue; struct list_head res_convertqueue; struct list_head res_waitqueue; + struct list_head res_slow_list; /* ls_slow_* */ + struct list_head res_scan_list; struct list_head res_root_list; /* used for recovery */ + struct list_head res_masters_list; /* used for recovery */ struct list_head res_recover_list; /* used for recovery */ int res_recover_locks_count; + struct rcu_head rcu; char *res_lvbptr; char res_name[DLM_RESNAME_MAXLEN+1]; @@ -341,6 +367,8 @@ enum rsb_flags { RSB_RECOVER_CONVERT, RSB_RECOVER_GRANT, RSB_RECOVER_LVB_INVAL, + RSB_INACTIVE, + RSB_HASHED, /* set while rsb is on ls_rsbtbl */ }; static inline void rsb_set_flag(struct dlm_rsb *r, enum rsb_flags flag) @@ -362,23 +390,33 @@ static inline int rsb_flag(struct dlm_rsb *r, enum rsb_flags flag) /* dlm_header is first element of all structs sent between nodes */ #define DLM_HEADER_MAJOR 0x00030000 -#define DLM_HEADER_MINOR 0x00000001 +#define DLM_HEADER_MINOR 0x00000002 + +#define DLM_VERSION_3_1 0x00030001 +#define DLM_VERSION_3_2 0x00030002 #define DLM_HEADER_SLOTS 0x00000001 #define DLM_MSG 1 #define DLM_RCOM 2 +#define DLM_OPTS 3 +#define DLM_ACK 4 +#define DLM_FIN 5 struct dlm_header { - uint32_t h_version; - uint32_t h_lockspace; - uint32_t h_nodeid; /* nodeid of sender */ - uint16_t h_length; + __le32 h_version; + union { + /* for DLM_MSG and DLM_RCOM */ + __le32 h_lockspace; + /* for DLM_ACK and DLM_OPTS */ + __le32 h_seq; + } u; + __le32 h_nodeid; /* nodeid of sender */ + __le16 h_length; uint8_t h_cmd; /* DLM_MSG, DLM_RCOM */ uint8_t h_pad; }; - #define DLM_MSG_REQUEST 1 #define DLM_MSG_CONVERT 2 #define DLM_MSG_UNLOCK 3 @@ -396,25 +434,25 @@ struct dlm_header { struct dlm_message { struct dlm_header m_header; - uint32_t m_type; /* DLM_MSG_ */ - uint32_t m_nodeid; - uint32_t m_pid; - uint32_t m_lkid; /* lkid on sender */ - uint32_t m_remid; /* lkid on receiver */ - uint32_t m_parent_lkid; - uint32_t m_parent_remid; - uint32_t m_exflags; - uint32_t m_sbflags; - uint32_t m_flags; - uint32_t m_lvbseq; - uint32_t m_hash; - int m_status; - int m_grmode; - int m_rqmode; - int m_bastmode; - int m_asts; - int m_result; /* 0 or -EXXX */ - char m_extra[0]; /* name or lvb */ + __le32 m_type; /* DLM_MSG_ */ + __le32 m_nodeid; + __le32 m_pid; + __le32 m_lkid; /* lkid on sender */ + __le32 m_remid; /* lkid on receiver */ + __le32 m_parent_lkid; + __le32 m_parent_remid; + __le32 m_exflags; + __le32 m_sbflags; + __le32 m_flags; + __le32 m_lvbseq; + __le32 m_hash; + __le32 m_status; + __le32 m_grmode; + __le32 m_rqmode; + __le32 m_bastmode; + __le32 m_asts; + __le32 m_result; /* 0 or -EXXX */ + char m_extra[]; /* name or lvb */ }; @@ -438,18 +476,37 @@ struct dlm_message { struct dlm_rcom { struct dlm_header rc_header; - uint32_t rc_type; /* DLM_RCOM_ */ - int rc_result; /* multi-purpose */ - uint64_t rc_id; /* match reply with request */ - uint64_t rc_seq; /* sender's ls_recover_seq */ - uint64_t rc_seq_reply; /* remote ls_recover_seq */ - char rc_buf[0]; + __le32 rc_type; /* DLM_RCOM_ */ + __le32 rc_result; /* multi-purpose */ + __le64 rc_id; /* match reply with request */ + __le64 rc_seq; /* sender's ls_recover_seq */ + __le64 rc_seq_reply; /* remote ls_recover_seq */ + char rc_buf[]; +}; + +struct dlm_opt_header { + __le16 t_type; + __le16 t_length; + __le32 t_pad; + /* need to be 8 byte aligned */ + char t_value[]; +}; + +/* encapsulation header */ +struct dlm_opts { + struct dlm_header o_header; + uint8_t o_nextcmd; + uint8_t o_pad; + __le16 o_optlen; + __le32 o_pad2; + char o_opts[]; }; union dlm_packet { struct dlm_header header; /* common to other two */ struct dlm_message message; struct dlm_rcom rcom; + struct dlm_opts opts; }; #define DLM_RSF_NEED_SLOTS 0x00000001 @@ -499,55 +556,42 @@ struct rcom_lock { __le16 rl_wait_type; __le16 rl_namelen; char rl_name[DLM_RESNAME_MAXLEN]; - char rl_lvb[0]; + char rl_lvb[]; }; -/* - * The max number of resources per rsbtbl bucket that shrink will attempt - * to remove in each iteration. - */ - -#define DLM_REMOVE_NAMES_MAX 8 - struct dlm_ls { struct list_head ls_list; /* list of lockspaces */ - dlm_lockspace_t *ls_local_handle; uint32_t ls_global_id; /* global unique lockspace ID */ uint32_t ls_generation; uint32_t ls_exflags; int ls_lvblen; - int ls_count; /* refcount of processes in + atomic_t ls_count; /* refcount of processes in the dlm using this ls */ + wait_queue_head_t ls_count_wait; int ls_create_count; /* create/release refcount */ unsigned long ls_flags; /* LSFL_ */ - unsigned long ls_scan_time; struct kobject ls_kobj; - struct idr ls_lkbidr; - spinlock_t ls_lkbidr_spin; + struct xarray ls_lkbxa; + rwlock_t ls_lkbxa_lock; + + /* an rsb is on rsbtl for primary locking functions, + and on a slow list for recovery/dump iteration */ + struct rhashtable ls_rsbtbl; + rwlock_t ls_rsbtbl_lock; /* for ls_rsbtbl and ls_slow */ + struct list_head ls_slow_inactive; /* to iterate rsbtbl */ + struct list_head ls_slow_active; /* to iterate rsbtbl */ - struct dlm_rsbtable *ls_rsbtbl; - uint32_t ls_rsbtbl_size; + struct timer_list ls_scan_timer; /* based on first scan_list rsb toss_time */ + struct list_head ls_scan_list; /* rsbs ordered by res_toss_time */ + spinlock_t ls_scan_lock; - struct mutex ls_waiters_mutex; + spinlock_t ls_waiters_lock; struct list_head ls_waiters; /* lkbs needing a reply */ - struct mutex ls_orphans_mutex; + spinlock_t ls_orphans_lock; struct list_head ls_orphans; - struct mutex ls_timeout_mutex; - struct list_head ls_timeout; - - spinlock_t ls_new_rsb_spin; - int ls_new_rsb_count; - struct list_head ls_new_rsb; /* new rsb structs */ - - spinlock_t ls_remove_spin; - char ls_remove_name[DLM_RESNAME_MAXLEN+1]; - char *ls_remove_names[DLM_REMOVE_NAMES_MAX]; - int ls_remove_len; - int ls_remove_lens[DLM_REMOVE_NAMES_MAX]; - struct list_head ls_nodes; /* current nodes in ls */ struct list_head ls_nodes_gone; /* dead node list, recovery */ int ls_num_nodes; /* number of nodes in ls */ @@ -560,20 +604,21 @@ struct dlm_ls { int ls_slots_size; struct dlm_slot *ls_slots; - struct dlm_rsb ls_stub_rsb; /* for returning errors */ - struct dlm_lkb ls_stub_lkb; /* for returning errors */ - struct dlm_message ls_stub_ms; /* for faking a reply */ + struct dlm_rsb ls_local_rsb; /* for returning errors */ + struct dlm_lkb ls_local_lkb; /* for returning errors */ + struct dlm_message ls_local_ms; /* for faking a reply */ struct dentry *ls_debug_rsb_dentry; /* debugfs */ struct dentry *ls_debug_waiters_dentry; /* debugfs */ struct dentry *ls_debug_locks_dentry; /* debugfs */ struct dentry *ls_debug_all_dentry; /* debugfs */ struct dentry *ls_debug_toss_dentry; /* debugfs */ + struct dentry *ls_debug_queued_asts_dentry; /* debugfs */ wait_queue_head_t ls_uevent_wait; /* user part of join/leave */ int ls_uevent_result; - struct completion ls_members_done; - int ls_members_result; + struct completion ls_recovery_done; + int ls_recovery_result; struct miscdevice ls_device; @@ -581,9 +626,8 @@ struct dlm_ls { /* recovery related */ - struct mutex ls_cb_mutex; + spinlock_t ls_cb_lock; struct list_head ls_cb_delay; /* save for queue_work later */ - struct timer_list ls_timer; struct task_struct *ls_recoverd_task; struct mutex ls_recoverd_active; spinlock_t ls_recover_lock; @@ -592,33 +636,35 @@ struct dlm_ls { uint64_t ls_recover_seq; struct dlm_recover *ls_recover_args; struct rw_semaphore ls_in_recovery; /* block local requests */ - struct rw_semaphore ls_recv_active; /* block dlm_recv */ + rwlock_t ls_recv_active; /* block dlm_recv */ struct list_head ls_requestqueue;/* queue remote requests */ - struct mutex ls_requestqueue_mutex; + rwlock_t ls_requestqueue_lock; struct dlm_rcom *ls_recover_buf; int ls_recover_nodeid; /* for debugging */ - unsigned int ls_recover_dir_sent_res; /* for log info */ - unsigned int ls_recover_dir_sent_msg; /* for log info */ unsigned int ls_recover_locks_in; /* for log info */ uint64_t ls_rcom_seq; spinlock_t ls_rcom_spin; struct list_head ls_recover_list; spinlock_t ls_recover_list_lock; int ls_recover_list_count; - struct idr ls_recover_idr; - spinlock_t ls_recover_idr_lock; + struct xarray ls_recover_xa; + spinlock_t ls_recover_xa_lock; wait_queue_head_t ls_wait_general; wait_queue_head_t ls_recover_lock_wait; - struct mutex ls_clear_proc_locks; + spinlock_t ls_clear_proc_locks; - struct list_head ls_root_list; /* root resources */ - struct rw_semaphore ls_root_sem; /* protect root_list */ + struct list_head ls_masters_list; /* root resources */ + rwlock_t ls_masters_lock; /* protect root_list */ + struct list_head ls_dir_dump_list; /* root resources */ + rwlock_t ls_dir_dump_lock; /* protect root_list */ const struct dlm_lockspace_ops *ls_ops; void *ls_ops_arg; + struct work_struct ls_free_work; + int ls_namelen; - char ls_name[1]; + char ls_name[DLM_LOCKSPACE_LEN + 1]; }; /* @@ -652,26 +698,11 @@ struct dlm_ls { #define LSFL_RCOM_READY 5 #define LSFL_RCOM_WAIT 6 #define LSFL_UEVENT_WAIT 7 -#define LSFL_TIMEWARN 8 #define LSFL_CB_DELAY 9 #define LSFL_NODIR 10 - -/* much of this is just saving user space pointers associated with the - lock that we pass back to the user lib with an ast */ - -struct dlm_user_args { - struct dlm_user_proc *proc; /* each process that opens the lockspace - device has private data - (dlm_user_proc) on the struct file, - the process's locks point back to it*/ - struct dlm_lksb lksb; - struct dlm_lksb __user *user_lksb; - void __user *castparam; - void __user *castaddr; - void __user *bastparam; - void __user *bastaddr; - uint64_t xid; -}; +#define LSFL_RECV_MSG_BLOCKED 11 +#define LSFL_FS 12 +#define LSFL_SOFTIRQ 13 #define DLM_PROC_FLAGS_CLOSING 1 #define DLM_PROC_FLAGS_COMPAT 2 @@ -705,22 +736,95 @@ static inline int dlm_no_directory(struct dlm_ls *ls) return test_bit(LSFL_NODIR, &ls->ls_flags); } -int dlm_netlink_init(void); -void dlm_netlink_exit(void); -void dlm_timeout_warn(struct dlm_lkb *lkb); +/* takes a snapshot from dlm atomic flags */ +static inline uint32_t dlm_flags_val(const unsigned long *addr, + uint32_t min, uint32_t max) +{ + uint32_t bit = min, val = 0; + + for_each_set_bit_from(bit, addr, max + 1) { + val |= BIT(bit); + } + + return val; +} + +static inline uint32_t dlm_iflags_val(const struct dlm_lkb *lkb) +{ + return dlm_flags_val(&lkb->lkb_iflags, __DLM_IFL_MIN_BIT, + __DLM_IFL_MAX_BIT); +} + +static inline uint32_t dlm_dflags_val(const struct dlm_lkb *lkb) +{ + return dlm_flags_val(&lkb->lkb_dflags, __DLM_DFL_MIN_BIT, + __DLM_DFL_MAX_BIT); +} + +/* coming from UAPI header + * + * TODO: + * Move this to UAPI header and let other values point to them and use BIT() + */ +#define DLM_SBF_DEMOTED_BIT 0 +#define __DLM_SBF_MIN_BIT DLM_SBF_DEMOTED_BIT +#define DLM_SBF_VALNOTVALID_BIT 1 +#define DLM_SBF_ALTMODE_BIT 2 +#define __DLM_SBF_MAX_BIT DLM_SBF_ALTMODE_BIT + +static inline uint32_t dlm_sbflags_val(const struct dlm_lkb *lkb) +{ + /* be sure the next person updates this */ + BUILD_BUG_ON(BIT(__DLM_SBF_MAX_BIT) != DLM_SBF_ALTMODE); + + return dlm_flags_val(&lkb->lkb_sbflags, __DLM_SBF_MIN_BIT, + __DLM_SBF_MAX_BIT); +} + +static inline void dlm_set_flags_val(unsigned long *addr, uint32_t val, + uint32_t min, uint32_t max) +{ + uint32_t bit; + + for (bit = min; bit < (max + 1); bit++) { + if (val & BIT(bit)) + set_bit(bit, addr); + else + clear_bit(bit, addr); + } +} + +static inline void dlm_set_dflags_val(struct dlm_lkb *lkb, uint32_t val) +{ + dlm_set_flags_val(&lkb->lkb_dflags, val, __DLM_DFL_MIN_BIT, + __DLM_DFL_MAX_BIT); +} + +static inline void dlm_set_sbflags_val(struct dlm_lkb *lkb, uint32_t val) +{ + dlm_set_flags_val(&lkb->lkb_sbflags, val, __DLM_SBF_MIN_BIT, + __DLM_SBF_MAX_BIT); +} + +extern struct workqueue_struct *dlm_wq; + int dlm_plock_init(void); void dlm_plock_exit(void); #ifdef CONFIG_DLM_DEBUG -int dlm_register_debugfs(void); +void dlm_register_debugfs(void); void dlm_unregister_debugfs(void); -int dlm_create_debug_file(struct dlm_ls *ls); +void dlm_create_debug_file(struct dlm_ls *ls); void dlm_delete_debug_file(struct dlm_ls *ls); +void *dlm_create_debug_comms_file(int nodeid, void *data); +void dlm_delete_debug_comms_file(void *ctx); #else -static inline int dlm_register_debugfs(void) { return 0; } +static inline void dlm_register_debugfs(void) { } static inline void dlm_unregister_debugfs(void) { } -static inline int dlm_create_debug_file(struct dlm_ls *ls) { return 0; } +static inline void dlm_create_debug_file(struct dlm_ls *ls) { } static inline void dlm_delete_debug_file(struct dlm_ls *ls) { } +static inline void *dlm_create_debug_comms_file(int nodeid, void *data) { return NULL; } +static inline void dlm_delete_debug_comms_file(void *ctx) { } #endif #endif /* __DLM_INTERNAL_DOT_H__ */ diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index e223a911a834..be938fdf17d9 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -1,11 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -55,13 +53,15 @@ R: do_xxxx() L: receive_xxxx_reply() <- R: send_xxxx_reply() */ +#include <trace/events/dlm.h> + #include <linux/types.h> #include <linux/rbtree.h> #include <linux/slab.h> #include "dlm_internal.h" #include <linux/dlm_device.h> #include "memory.h" -#include "lowcomms.h" +#include "midcomms.h" #include "requestqueue.h" #include "util.h" #include "dir.h" @@ -86,11 +86,10 @@ static int send_remove(struct dlm_rsb *r); static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb); static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb); static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, - struct dlm_message *ms); -static int receive_extralen(struct dlm_message *ms); + const struct dlm_message *ms, bool local); +static int receive_extralen(const struct dlm_message *ms); static void do_purge(struct dlm_ls *ls, int nodeid, int pid); -static void del_timeout(struct dlm_lkb *lkb); -static void toss_rsb(struct kref *kref); +static void deactivate_rsb(struct kref *kref); /* * Lock compatibilty matrix - thanks Steve @@ -164,7 +163,7 @@ void dlm_print_lkb(struct dlm_lkb *lkb) printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x " "sts %d rq %d gr %d wait_type %d wait_nodeid %d seq %llu\n", lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags, - lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode, + dlm_iflags_val(lkb), lkb->lkb_status, lkb->lkb_rqmode, lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid, (unsigned long long)lkb->lkb_recover_seq); } @@ -202,7 +201,7 @@ void dlm_dump_rsb(struct dlm_rsb *r) /* Threads cannot use the lockspace while it's being recovered */ -static inline void dlm_lock_recovery(struct dlm_ls *ls) +void dlm_lock_recovery(struct dlm_ls *ls) { down_read(&ls->ls_in_recovery); } @@ -229,12 +228,12 @@ static inline int force_blocking_asts(struct dlm_lkb *lkb) static inline int is_demoted(struct dlm_lkb *lkb) { - return (lkb->lkb_sbflags & DLM_SBF_DEMOTED); + return test_bit(DLM_SBF_DEMOTED_BIT, &lkb->lkb_sbflags); } static inline int is_altmode(struct dlm_lkb *lkb) { - return (lkb->lkb_sbflags & DLM_SBF_ALTMODE); + return test_bit(DLM_SBF_ALTMODE_BIT, &lkb->lkb_sbflags); } static inline int is_granted(struct dlm_lkb *lkb) @@ -250,12 +249,13 @@ static inline int is_remote(struct dlm_rsb *r) static inline int is_process_copy(struct dlm_lkb *lkb) { - return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY)); + return lkb->lkb_nodeid && + !test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags); } static inline int is_master_copy(struct dlm_lkb *lkb) { - return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0; + return test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags); } static inline int middle_conversion(struct dlm_lkb *lkb) @@ -273,18 +273,18 @@ static inline int down_conversion(struct dlm_lkb *lkb) static inline int is_overlap_unlock(struct dlm_lkb *lkb) { - return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK; + return test_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); } static inline int is_overlap_cancel(struct dlm_lkb *lkb) { - return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL; + return test_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); } static inline int is_overlap(struct dlm_lkb *lkb) { - return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK | - DLM_IFL_OVERLAP_CANCEL)); + return test_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags) || + test_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); } static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv) @@ -292,23 +292,13 @@ static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv) if (is_master_copy(lkb)) return; - del_timeout(lkb); - DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb);); - /* if the operation was a cancel, then return -DLM_ECANCEL, if a - timeout caused the cancel then return -ETIMEDOUT */ - if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) { - lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL; - rv = -ETIMEDOUT; - } - - if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) { - lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL; + if (rv == -DLM_ECANCEL && + test_and_clear_bit(DLM_IFL_DEADLOCK_CANCEL_BIT, &lkb->lkb_iflags)) rv = -EDEADLK; - } - dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags); + dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, dlm_sbflags_val(lkb)); } static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb) @@ -330,11 +320,18 @@ static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode) * Basic operations on rsb's and lkb's */ +static inline unsigned long rsb_toss_jiffies(void) +{ + return jiffies + (READ_ONCE(dlm_config.ci_toss_secs) * HZ); +} + /* This is only called to add a reference when the code already holds a valid reference to the rsb, so there's no need for locking. */ static inline void hold_rsb(struct dlm_rsb *r) { + /* inactive rsbs are not ref counted */ + WARN_ON(rsb_flag(r, RSB_INACTIVE)); kref_get(&r->res_ref); } @@ -343,17 +340,45 @@ void dlm_hold_rsb(struct dlm_rsb *r) hold_rsb(r); } -/* When all references to the rsb are gone it's transferred to - the tossed list for later disposal. */ +/* TODO move this to lib/refcount.c */ +static __must_check bool +dlm_refcount_dec_and_write_lock_bh(refcount_t *r, rwlock_t *lock) +__cond_acquires(lock) +{ + if (refcount_dec_not_one(r)) + return false; + + write_lock_bh(lock); + if (!refcount_dec_and_test(r)) { + write_unlock_bh(lock); + return false; + } + + return true; +} + +/* TODO move this to include/linux/kref.h */ +static inline int dlm_kref_put_write_lock_bh(struct kref *kref, + void (*release)(struct kref *kref), + rwlock_t *lock) +{ + if (dlm_refcount_dec_and_write_lock_bh(&kref->refcount, lock)) { + release(kref); + return 1; + } + + return 0; +} static void put_rsb(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; - uint32_t bucket = r->res_bucket; + int rv; - spin_lock(&ls->ls_rsbtbl[bucket].lock); - kref_put(&r->res_ref, toss_rsb); - spin_unlock(&ls->ls_rsbtbl[bucket].lock); + rv = dlm_kref_put_write_lock_bh(&r->res_ref, deactivate_rsb, + &ls->ls_rsbtbl_lock); + if (rv) + write_unlock_bh(&ls->ls_rsbtbl_lock); } void dlm_put_rsb(struct dlm_rsb *r) @@ -361,141 +386,265 @@ void dlm_put_rsb(struct dlm_rsb *r) put_rsb(r); } -static int pre_rsb_struct(struct dlm_ls *ls) +/* connected with timer_delete_sync() in dlm_ls_stop() to stop + * new timers when recovery is triggered and don't run them + * again until a resume_scan_timer() tries it again. + */ +static void enable_scan_timer(struct dlm_ls *ls, unsigned long jiffies) { - struct dlm_rsb *r1, *r2; - int count = 0; + if (!dlm_locking_stopped(ls)) + mod_timer(&ls->ls_scan_timer, jiffies); +} - spin_lock(&ls->ls_new_rsb_spin); - if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) { - spin_unlock(&ls->ls_new_rsb_spin); - return 0; - } - spin_unlock(&ls->ls_new_rsb_spin); +/* This function tries to resume the timer callback if a rsb + * is on the scan list and no timer is pending. It might that + * the first entry is on currently executed as timer callback + * but we don't care if a timer queued up again and does + * nothing. Should be a rare case. + */ +void resume_scan_timer(struct dlm_ls *ls) +{ + struct dlm_rsb *r; + + spin_lock_bh(&ls->ls_scan_lock); + r = list_first_entry_or_null(&ls->ls_scan_list, struct dlm_rsb, + res_scan_list); + if (r && !timer_pending(&ls->ls_scan_timer)) + enable_scan_timer(ls, r->res_toss_time); + spin_unlock_bh(&ls->ls_scan_lock); +} + +/* ls_rsbtbl_lock must be held */ + +static void del_scan(struct dlm_ls *ls, struct dlm_rsb *r) +{ + struct dlm_rsb *first; + + /* active rsbs should never be on the scan list */ + WARN_ON(!rsb_flag(r, RSB_INACTIVE)); - r1 = dlm_allocate_rsb(ls); - r2 = dlm_allocate_rsb(ls); + spin_lock_bh(&ls->ls_scan_lock); + r->res_toss_time = 0; - spin_lock(&ls->ls_new_rsb_spin); - if (r1) { - list_add(&r1->res_hashchain, &ls->ls_new_rsb); - ls->ls_new_rsb_count++; + /* if the rsb is not queued do nothing */ + if (list_empty(&r->res_scan_list)) + goto out; + + /* get the first element before delete */ + first = list_first_entry(&ls->ls_scan_list, struct dlm_rsb, + res_scan_list); + list_del_init(&r->res_scan_list); + /* check if the first element was the rsb we deleted */ + if (first == r) { + /* try to get the new first element, if the list + * is empty now try to delete the timer, if we are + * too late we don't care. + * + * if the list isn't empty and a new first element got + * in place, set the new timer expire time. + */ + first = list_first_entry_or_null(&ls->ls_scan_list, struct dlm_rsb, + res_scan_list); + if (!first) + timer_delete(&ls->ls_scan_timer); + else + enable_scan_timer(ls, first->res_toss_time); } - if (r2) { - list_add(&r2->res_hashchain, &ls->ls_new_rsb); - ls->ls_new_rsb_count++; + +out: + spin_unlock_bh(&ls->ls_scan_lock); +} + +static void add_scan(struct dlm_ls *ls, struct dlm_rsb *r) +{ + int our_nodeid = dlm_our_nodeid(); + struct dlm_rsb *first; + + /* A dir record for a remote master rsb should never be on the scan list. */ + WARN_ON(!dlm_no_directory(ls) && + (r->res_master_nodeid != our_nodeid) && + (dlm_dir_nodeid(r) == our_nodeid)); + + /* An active rsb should never be on the scan list. */ + WARN_ON(!rsb_flag(r, RSB_INACTIVE)); + + /* An rsb should not already be on the scan list. */ + WARN_ON(!list_empty(&r->res_scan_list)); + + spin_lock_bh(&ls->ls_scan_lock); + /* set the new rsb absolute expire time in the rsb */ + r->res_toss_time = rsb_toss_jiffies(); + if (list_empty(&ls->ls_scan_list)) { + /* if the queue is empty add the element and it's + * our new expire time + */ + list_add_tail(&r->res_scan_list, &ls->ls_scan_list); + enable_scan_timer(ls, r->res_toss_time); + } else { + /* try to get the maybe new first element and then add + * to this rsb with the oldest expire time to the end + * of the queue. If the list was empty before this + * rsb expire time is our next expiration if it wasn't + * the now new first elemet is our new expiration time + */ + first = list_first_entry_or_null(&ls->ls_scan_list, struct dlm_rsb, + res_scan_list); + list_add_tail(&r->res_scan_list, &ls->ls_scan_list); + if (!first) + enable_scan_timer(ls, r->res_toss_time); + else + enable_scan_timer(ls, first->res_toss_time); } - count = ls->ls_new_rsb_count; - spin_unlock(&ls->ls_new_rsb_spin); + spin_unlock_bh(&ls->ls_scan_lock); +} - if (!count) - return -ENOMEM; - return 0; +/* if we hit contention we do in 250 ms a retry to trylock. + * if there is any other mod_timer in between we don't care + * about that it expires earlier again this is only for the + * unlikely case nothing happened in this time. + */ +#define DLM_TOSS_TIMER_RETRY (jiffies + msecs_to_jiffies(250)) + +/* Called by lockspace scan_timer to free unused rsb's. */ + +void dlm_rsb_scan(struct timer_list *timer) +{ + struct dlm_ls *ls = timer_container_of(ls, timer, ls_scan_timer); + int our_nodeid = dlm_our_nodeid(); + struct dlm_rsb *r; + int rv; + + while (1) { + /* interrupting point to leave iteration when + * recovery waits for timer_delete_sync(), recovery + * will take care to delete everything in scan list. + */ + if (dlm_locking_stopped(ls)) + break; + + rv = spin_trylock(&ls->ls_scan_lock); + if (!rv) { + /* rearm again try timer */ + enable_scan_timer(ls, DLM_TOSS_TIMER_RETRY); + break; + } + + r = list_first_entry_or_null(&ls->ls_scan_list, struct dlm_rsb, + res_scan_list); + if (!r) { + /* the next add_scan will enable the timer again */ + spin_unlock(&ls->ls_scan_lock); + break; + } + + /* + * If the first rsb is not yet expired, then stop because the + * list is sorted with nearest expiration first. + */ + if (time_before(jiffies, r->res_toss_time)) { + /* rearm with the next rsb to expire in the future */ + enable_scan_timer(ls, r->res_toss_time); + spin_unlock(&ls->ls_scan_lock); + break; + } + + /* in find_rsb_dir/nodir there is a reverse order of this + * lock, however this is only a trylock if we hit some + * possible contention we try it again. + */ + rv = write_trylock(&ls->ls_rsbtbl_lock); + if (!rv) { + spin_unlock(&ls->ls_scan_lock); + /* rearm again try timer */ + enable_scan_timer(ls, DLM_TOSS_TIMER_RETRY); + break; + } + + list_del(&r->res_slow_list); + rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, + dlm_rhash_rsb_params); + rsb_clear_flag(r, RSB_HASHED); + + /* ls_rsbtbl_lock is not needed when calling send_remove() */ + write_unlock(&ls->ls_rsbtbl_lock); + + list_del_init(&r->res_scan_list); + spin_unlock(&ls->ls_scan_lock); + + /* An rsb that is a dir record for a remote master rsb + * cannot be removed, and should not have a timer enabled. + */ + WARN_ON(!dlm_no_directory(ls) && + (r->res_master_nodeid != our_nodeid) && + (dlm_dir_nodeid(r) == our_nodeid)); + + /* We're the master of this rsb but we're not + * the directory record, so we need to tell the + * dir node to remove the dir record + */ + if (!dlm_no_directory(ls) && + (r->res_master_nodeid == our_nodeid) && + (dlm_dir_nodeid(r) != our_nodeid)) + send_remove(r); + + free_inactive_rsb(r); + } } /* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can unlock any spinlocks, go back and call pre_rsb_struct again. Otherwise, take an rsb off the list and return it. */ -static int get_rsb_struct(struct dlm_ls *ls, char *name, int len, +static int get_rsb_struct(struct dlm_ls *ls, const void *name, int len, struct dlm_rsb **r_ret) { struct dlm_rsb *r; - int count; - - spin_lock(&ls->ls_new_rsb_spin); - if (list_empty(&ls->ls_new_rsb)) { - count = ls->ls_new_rsb_count; - spin_unlock(&ls->ls_new_rsb_spin); - log_debug(ls, "find_rsb retry %d %d %s", - count, dlm_config.ci_new_rsb_count, name); - return -EAGAIN; - } - r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain); - list_del(&r->res_hashchain); - /* Convert the empty list_head to a NULL rb_node for tree usage: */ - memset(&r->res_hashnode, 0, sizeof(struct rb_node)); - ls->ls_new_rsb_count--; - spin_unlock(&ls->ls_new_rsb_spin); + r = dlm_allocate_rsb(); + if (!r) + return -ENOMEM; r->res_ls = ls; r->res_length = len; memcpy(r->res_name, name, len); - mutex_init(&r->res_mutex); + spin_lock_init(&r->res_lock); INIT_LIST_HEAD(&r->res_lookup); INIT_LIST_HEAD(&r->res_grantqueue); INIT_LIST_HEAD(&r->res_convertqueue); INIT_LIST_HEAD(&r->res_waitqueue); INIT_LIST_HEAD(&r->res_root_list); + INIT_LIST_HEAD(&r->res_scan_list); INIT_LIST_HEAD(&r->res_recover_list); + INIT_LIST_HEAD(&r->res_masters_list); *r_ret = r; return 0; } -static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen) +int dlm_search_rsb_tree(struct rhashtable *rhash, const void *name, int len, + struct dlm_rsb **r_ret) { - char maxname[DLM_RESNAME_MAXLEN]; + char key[DLM_RESNAME_MAXLEN] = {}; - memset(maxname, 0, DLM_RESNAME_MAXLEN); - memcpy(maxname, name, nlen); - return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN); -} + memcpy(key, name, len); + *r_ret = rhashtable_lookup_fast(rhash, &key, dlm_rhash_rsb_params); + if (*r_ret) + return 0; -int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len, - struct dlm_rsb **r_ret) -{ - struct rb_node *node = tree->rb_node; - struct dlm_rsb *r; - int rc; - - while (node) { - r = rb_entry(node, struct dlm_rsb, res_hashnode); - rc = rsb_cmp(r, name, len); - if (rc < 0) - node = node->rb_left; - else if (rc > 0) - node = node->rb_right; - else - goto found; - } - *r_ret = NULL; return -EBADR; - - found: - *r_ret = r; - return 0; } -static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree) +static int rsb_insert(struct dlm_rsb *rsb, struct rhashtable *rhash) { - struct rb_node **newn = &tree->rb_node; - struct rb_node *parent = NULL; - int rc; - - while (*newn) { - struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb, - res_hashnode); + int rv; - parent = *newn; - rc = rsb_cmp(cur, rsb->res_name, rsb->res_length); - if (rc < 0) - newn = &parent->rb_left; - else if (rc > 0) - newn = &parent->rb_right; - else { - log_print("rsb_insert match"); - dlm_dump_rsb(rsb); - dlm_dump_rsb(cur); - return -EEXIST; - } - } + rv = rhashtable_insert_fast(rhash, &rsb->res_node, + dlm_rhash_rsb_params); + if (!rv) + rsb_set_flag(rsb, RSB_HASHED); - rb_link_node(&rsb->res_hashnode, parent, newn); - rb_insert_color(&rsb->res_hashnode, tree); - return 0; + return rv; } /* @@ -525,7 +674,7 @@ static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree) * So, if the given rsb is on the toss list, it is moved to the keep list * before being returned. * - * toss_rsb() happens when all local usage of the rsb is done, i.e. no + * deactivate_rsb() happens when all local usage of the rsb is done, i.e. no * more refcounts exist, so the rsb is moved from the keep list to the * toss list. * @@ -542,9 +691,8 @@ static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree) * while that rsb has a potentially stale master.) */ -static int find_rsb_dir(struct dlm_ls *ls, char *name, int len, - uint32_t hash, uint32_t b, - int dir_nodeid, int from_nodeid, +static int find_rsb_dir(struct dlm_ls *ls, const void *name, int len, + uint32_t hash, int dir_nodeid, int from_nodeid, unsigned int flags, struct dlm_rsb **r_ret) { struct dlm_rsb *r = NULL; @@ -574,9 +722,9 @@ static int find_rsb_dir(struct dlm_ls *ls, char *name, int len, * * If someone sends us a request, we are the dir node, and we do * not find the rsb anywhere, then recreate it. This happens if - * someone sends us a request after we have removed/freed an rsb - * from our toss list. (They sent a request instead of lookup - * because they are using an rsb from their toss list.) + * someone sends us a request after we have removed/freed an rsb. + * (They sent a request instead of lookup because they are using + * an rsb taken from their scan list.) */ if (from_local || from_dir || @@ -585,52 +733,83 @@ static int find_rsb_dir(struct dlm_ls *ls, char *name, int len, } retry: - if (create) { - error = pre_rsb_struct(ls); - if (error < 0) - goto out; - } - - spin_lock(&ls->ls_rsbtbl[b].lock); - - error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); + error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); if (error) - goto do_toss; + goto do_new; + + /* check if the rsb is active under read lock - likely path */ + read_lock_bh(&ls->ls_rsbtbl_lock); + if (!rsb_flag(r, RSB_HASHED)) { + read_unlock_bh(&ls->ls_rsbtbl_lock); + error = -EBADR; + goto do_new; + } /* * rsb is active, so we can't check master_nodeid without lock_rsb. */ + if (rsb_flag(r, RSB_INACTIVE)) { + read_unlock_bh(&ls->ls_rsbtbl_lock); + goto do_inactive; + } + kref_get(&r->res_ref); - error = 0; - goto out_unlock; + read_unlock_bh(&ls->ls_rsbtbl_lock); + goto out; - do_toss: - error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); - if (error) + do_inactive: + write_lock_bh(&ls->ls_rsbtbl_lock); + + /* + * The expectation here is that the rsb will have HASHED and + * INACTIVE flags set, and that the rsb can be moved from + * inactive back to active again. However, between releasing + * the read lock and acquiring the write lock, this rsb could + * have been removed from rsbtbl, and had HASHED cleared, to + * be freed. To deal with this case, we would normally need + * to repeat dlm_search_rsb_tree while holding the write lock, + * but rcu allows us to simply check the HASHED flag, because + * the rcu read lock means the rsb will not be freed yet. + * If the HASHED flag is not set, then the rsb is being freed, + * so we add a new rsb struct. If the HASHED flag is set, + * and INACTIVE is not set, it means another thread has + * made the rsb active, as we're expecting to do here, and + * we just repeat the lookup (this will be very unlikely.) + */ + if (rsb_flag(r, RSB_HASHED)) { + if (!rsb_flag(r, RSB_INACTIVE)) { + write_unlock_bh(&ls->ls_rsbtbl_lock); + goto retry; + } + } else { + write_unlock_bh(&ls->ls_rsbtbl_lock); + error = -EBADR; goto do_new; + } /* * rsb found inactive (master_nodeid may be out of date unless * we are the dir_nodeid or were the master) No other thread - * is using this rsb because it's on the toss list, so we can + * is using this rsb because it's inactive, so we can * look at or update res_master_nodeid without lock_rsb. */ if ((r->res_master_nodeid != our_nodeid) && from_other) { /* our rsb was not master, and another node (not the dir node) has sent us a request */ - log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s", + log_debug(ls, "find_rsb inactive from_other %d master %d dir %d %s", from_nodeid, r->res_master_nodeid, dir_nodeid, r->res_name); + write_unlock_bh(&ls->ls_rsbtbl_lock); error = -ENOTBLK; - goto out_unlock; + goto out; } if ((r->res_master_nodeid != our_nodeid) && from_dir) { /* don't think this should ever happen */ - log_error(ls, "find_rsb toss from_dir %d master %d", + log_error(ls, "find_rsb inactive from_dir %d master %d", from_nodeid, r->res_master_nodeid); dlm_print_rsb(r); /* fix it and go on */ @@ -647,9 +826,18 @@ static int find_rsb_dir(struct dlm_ls *ls, char *name, int len, r->res_first_lkid = 0; } - rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); - error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); - goto out_unlock; + /* we always deactivate scan timer for the rsb, when + * we move it out of the inactive state as rsb state + * can be changed and scan timers are only for inactive + * rsbs. + */ + del_scan(ls, r); + list_move(&r->res_slow_list, &ls->ls_slow_active); + rsb_clear_flag(r, RSB_INACTIVE); + kref_init(&r->res_ref); /* ref is now used in active state */ + write_unlock_bh(&ls->ls_rsbtbl_lock); + + goto out; do_new: @@ -658,18 +846,13 @@ static int find_rsb_dir(struct dlm_ls *ls, char *name, int len, */ if (error == -EBADR && !create) - goto out_unlock; + goto out; error = get_rsb_struct(ls, name, len, &r); - if (error == -EAGAIN) { - spin_unlock(&ls->ls_rsbtbl[b].lock); - goto retry; - } - if (error) - goto out_unlock; + if (WARN_ON_ONCE(error)) + goto out; r->res_hash = hash; - r->res_bucket = b; r->res_dir_nodeid = dir_nodeid; kref_init(&r->res_ref); @@ -687,8 +870,9 @@ static int find_rsb_dir(struct dlm_ls *ls, char *name, int len, log_error(ls, "find_rsb new from_other %d dir %d our %d %s", from_nodeid, dir_nodeid, our_nodeid, r->res_name); dlm_free_rsb(r); + r = NULL; error = -ENOTBLK; - goto out_unlock; + goto out; } if (from_other) { @@ -708,9 +892,20 @@ static int find_rsb_dir(struct dlm_ls *ls, char *name, int len, } out_add: - error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); - out_unlock: - spin_unlock(&ls->ls_rsbtbl[b].lock); + + write_lock_bh(&ls->ls_rsbtbl_lock); + error = rsb_insert(r, &ls->ls_rsbtbl); + if (error == -EEXIST) { + /* somebody else was faster and it seems the + * rsb exists now, we do a whole relookup + */ + write_unlock_bh(&ls->ls_rsbtbl_lock); + dlm_free_rsb(r); + goto retry; + } else if (!error) { + list_add(&r->res_slow_list, &ls->ls_slow_active); + } + write_unlock_bh(&ls->ls_rsbtbl_lock); out: *r_ret = r; return error; @@ -720,9 +915,8 @@ static int find_rsb_dir(struct dlm_ls *ls, char *name, int len, dlm_recover_locks) before we've made ourself master (in dlm_recover_masters). */ -static int find_rsb_nodir(struct dlm_ls *ls, char *name, int len, - uint32_t hash, uint32_t b, - int dir_nodeid, int from_nodeid, +static int find_rsb_nodir(struct dlm_ls *ls, const void *name, int len, + uint32_t hash, int dir_nodeid, int from_nodeid, unsigned int flags, struct dlm_rsb **r_ret) { struct dlm_rsb *r = NULL; @@ -731,59 +925,82 @@ static int find_rsb_nodir(struct dlm_ls *ls, char *name, int len, int error; retry: - error = pre_rsb_struct(ls); - if (error < 0) - goto out; + error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); + if (error) + goto do_new; - spin_lock(&ls->ls_rsbtbl[b].lock); + /* check if the rsb is in active state under read lock - likely path */ + read_lock_bh(&ls->ls_rsbtbl_lock); + if (!rsb_flag(r, RSB_HASHED)) { + read_unlock_bh(&ls->ls_rsbtbl_lock); + goto do_new; + } - error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); - if (error) - goto do_toss; + if (rsb_flag(r, RSB_INACTIVE)) { + read_unlock_bh(&ls->ls_rsbtbl_lock); + goto do_inactive; + } /* * rsb is active, so we can't check master_nodeid without lock_rsb. */ kref_get(&r->res_ref); - goto out_unlock; + read_unlock_bh(&ls->ls_rsbtbl_lock); + goto out; - do_toss: - error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); - if (error) + + do_inactive: + write_lock_bh(&ls->ls_rsbtbl_lock); + + /* See comment in find_rsb_dir. */ + if (rsb_flag(r, RSB_HASHED)) { + if (!rsb_flag(r, RSB_INACTIVE)) { + write_unlock_bh(&ls->ls_rsbtbl_lock); + goto retry; + } + } else { + write_unlock_bh(&ls->ls_rsbtbl_lock); goto do_new; + } + /* * rsb found inactive. No other thread is using this rsb because - * it's on the toss list, so we can look at or update - * res_master_nodeid without lock_rsb. + * it's inactive, so we can look at or update res_master_nodeid + * without lock_rsb. */ if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) { /* our rsb is not master, and another node has sent us a request; this should never happen */ - log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d", + log_error(ls, "find_rsb inactive from_nodeid %d master %d dir %d", from_nodeid, r->res_master_nodeid, dir_nodeid); dlm_print_rsb(r); + write_unlock_bh(&ls->ls_rsbtbl_lock); error = -ENOTBLK; - goto out_unlock; + goto out; } if (!recover && (r->res_master_nodeid != our_nodeid) && (dir_nodeid == our_nodeid)) { /* our rsb is not master, and we are dir; may as well fix it; this should never happen */ - log_error(ls, "find_rsb toss our %d master %d dir %d", + log_error(ls, "find_rsb inactive our %d master %d dir %d", our_nodeid, r->res_master_nodeid, dir_nodeid); dlm_print_rsb(r); r->res_master_nodeid = our_nodeid; r->res_nodeid = 0; } - rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); - error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); - goto out_unlock; + del_scan(ls, r); + list_move(&r->res_slow_list, &ls->ls_slow_active); + rsb_clear_flag(r, RSB_INACTIVE); + kref_init(&r->res_ref); + write_unlock_bh(&ls->ls_rsbtbl_lock); + + goto out; do_new: @@ -792,48 +1009,98 @@ static int find_rsb_nodir(struct dlm_ls *ls, char *name, int len, */ error = get_rsb_struct(ls, name, len, &r); - if (error == -EAGAIN) { - spin_unlock(&ls->ls_rsbtbl[b].lock); - goto retry; - } - if (error) - goto out_unlock; + if (WARN_ON_ONCE(error)) + goto out; r->res_hash = hash; - r->res_bucket = b; r->res_dir_nodeid = dir_nodeid; r->res_master_nodeid = dir_nodeid; r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid; kref_init(&r->res_ref); - error = rsb_insert(r, &ls->ls_rsbtbl[b].keep); - out_unlock: - spin_unlock(&ls->ls_rsbtbl[b].lock); + write_lock_bh(&ls->ls_rsbtbl_lock); + error = rsb_insert(r, &ls->ls_rsbtbl); + if (error == -EEXIST) { + /* somebody else was faster and it seems the + * rsb exists now, we do a whole relookup + */ + write_unlock_bh(&ls->ls_rsbtbl_lock); + dlm_free_rsb(r); + goto retry; + } else if (!error) { + list_add(&r->res_slow_list, &ls->ls_slow_active); + } + write_unlock_bh(&ls->ls_rsbtbl_lock); + out: *r_ret = r; return error; } -static int find_rsb(struct dlm_ls *ls, char *name, int len, int from_nodeid, - unsigned int flags, struct dlm_rsb **r_ret) +/* + * rsb rcu usage + * + * While rcu read lock is held, the rsb cannot be freed, + * which allows a lookup optimization. + * + * Two threads are accessing the same rsb concurrently, + * the first (A) is trying to use the rsb, the second (B) + * is trying to free the rsb. + * + * thread A thread B + * (trying to use rsb) (trying to free rsb) + * + * A1. rcu read lock + * A2. rsbtbl read lock + * A3. look up rsb in rsbtbl + * A4. rsbtbl read unlock + * B1. rsbtbl write lock + * B2. look up rsb in rsbtbl + * B3. remove rsb from rsbtbl + * B4. clear rsb HASHED flag + * B5. rsbtbl write unlock + * B6. begin freeing rsb using rcu... + * + * (rsb is inactive, so try to make it active again) + * A5. read rsb HASHED flag (safe because rsb is not freed yet) + * A6. the rsb HASHED flag is not set, which it means the rsb + * is being removed from rsbtbl and freed, so don't use it. + * A7. rcu read unlock + * + * B7. ...finish freeing rsb using rcu + * A8. create a new rsb + * + * Without the rcu optimization, steps A5-8 would need to do + * an extra rsbtbl lookup: + * A5. rsbtbl write lock + * A6. look up rsb in rsbtbl, not found + * A7. rsbtbl write unlock + * A8. create a new rsb + */ + +static int find_rsb(struct dlm_ls *ls, const void *name, int len, + int from_nodeid, unsigned int flags, + struct dlm_rsb **r_ret) { - uint32_t hash, b; int dir_nodeid; + uint32_t hash; + int rv; if (len > DLM_RESNAME_MAXLEN) return -EINVAL; hash = jhash(name, len, 0); - b = hash & (ls->ls_rsbtbl_size - 1); - dir_nodeid = dlm_hash2nodeid(ls, hash); + rcu_read_lock(); if (dlm_no_directory(ls)) - return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid, + rv = find_rsb_nodir(ls, name, len, hash, dir_nodeid, from_nodeid, flags, r_ret); else - return find_rsb_dir(ls, name, len, hash, b, dir_nodeid, - from_nodeid, flags, r_ret); + rv = find_rsb_dir(ls, name, len, hash, dir_nodeid, + from_nodeid, flags, r_ret); + rcu_read_unlock(); + return rv; } /* we have received a request and found that res_master_nodeid != our_nodeid, @@ -879,6 +1146,88 @@ static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r, } } +static void __dlm_master_lookup(struct dlm_ls *ls, struct dlm_rsb *r, int our_nodeid, + int from_nodeid, bool is_inactive, unsigned int flags, + int *r_nodeid, int *result) +{ + int fix_master = (flags & DLM_LU_RECOVER_MASTER); + int from_master = (flags & DLM_LU_RECOVER_DIR); + + if (r->res_dir_nodeid != our_nodeid) { + /* should not happen, but may as well fix it and carry on */ + log_error(ls, "%s res_dir %d our %d %s", __func__, + r->res_dir_nodeid, our_nodeid, r->res_name); + r->res_dir_nodeid = our_nodeid; + } + + if (fix_master && r->res_master_nodeid && dlm_is_removed(ls, r->res_master_nodeid)) { + /* Recovery uses this function to set a new master when + * the previous master failed. Setting NEW_MASTER will + * force dlm_recover_masters to call recover_master on this + * rsb even though the res_nodeid is no longer removed. + */ + + r->res_master_nodeid = from_nodeid; + r->res_nodeid = from_nodeid; + rsb_set_flag(r, RSB_NEW_MASTER); + + if (is_inactive) { + /* I don't think we should ever find it inactive. */ + log_error(ls, "%s fix_master inactive", __func__); + dlm_dump_rsb(r); + } + } + + if (from_master && (r->res_master_nodeid != from_nodeid)) { + /* this will happen if from_nodeid became master during + * a previous recovery cycle, and we aborted the previous + * cycle before recovering this master value + */ + + log_limit(ls, "%s from_master %d master_nodeid %d res_nodeid %d first %x %s", + __func__, from_nodeid, r->res_master_nodeid, + r->res_nodeid, r->res_first_lkid, r->res_name); + + if (r->res_master_nodeid == our_nodeid) { + log_error(ls, "from_master %d our_master", from_nodeid); + dlm_dump_rsb(r); + goto ret_assign; + } + + r->res_master_nodeid = from_nodeid; + r->res_nodeid = from_nodeid; + rsb_set_flag(r, RSB_NEW_MASTER); + } + + if (!r->res_master_nodeid) { + /* this will happen if recovery happens while we're looking + * up the master for this rsb + */ + + log_debug(ls, "%s master 0 to %d first %x %s", __func__, + from_nodeid, r->res_first_lkid, r->res_name); + r->res_master_nodeid = from_nodeid; + r->res_nodeid = from_nodeid; + } + + if (!from_master && !fix_master && + (r->res_master_nodeid == from_nodeid)) { + /* this can happen when the master sends remove, the dir node + * finds the rsb on the active list and ignores the remove, + * and the former master sends a lookup + */ + + log_limit(ls, "%s from master %d flags %x first %x %s", + __func__, from_nodeid, flags, r->res_first_lkid, + r->res_name); + } + + ret_assign: + *r_nodeid = r->res_master_nodeid; + if (result) + *result = DLM_LU_MATCH; +} + /* * We're the dir node for this res and another node wants to know the * master nodeid. During normal operation (non recovery) this is only @@ -908,15 +1257,13 @@ static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r, * . dlm_master_lookup RECOVER_MASTER (fix_master 1, from_master 0) */ -int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len, - unsigned int flags, int *r_nodeid, int *result) +static int _dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, + int len, unsigned int flags, int *r_nodeid, int *result) { struct dlm_rsb *r = NULL; - uint32_t hash, b; - int from_master = (flags & DLM_LU_RECOVER_DIR); - int fix_master = (flags & DLM_LU_RECOVER_MASTER); + uint32_t hash; int our_nodeid = dlm_our_nodeid(); - int dir_nodeid, error, toss_list = 0; + int dir_nodeid, error; if (len > DLM_RESNAME_MAXLEN) return -EINVAL; @@ -928,8 +1275,6 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len, } hash = jhash(name, len, 0); - b = hash & (ls->ls_rsbtbl_size - 1); - dir_nodeid = dlm_hash2nodeid(ls, hash); if (dir_nodeid != our_nodeid) { log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d", @@ -940,227 +1285,199 @@ int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len, } retry: - error = pre_rsb_struct(ls); - if (error < 0) - return error; - - spin_lock(&ls->ls_rsbtbl[b].lock); - error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); - if (!error) { - /* because the rsb is active, we need to lock_rsb before - checking/changing re_master_nodeid */ - - hold_rsb(r); - spin_unlock(&ls->ls_rsbtbl[b].lock); - lock_rsb(r); - goto found; - } - - error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); + error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); if (error) goto not_found; - /* because the rsb is inactive (on toss list), it's not refcounted - and lock_rsb is not used, but is protected by the rsbtbl lock */ + /* check if the rsb is active under read lock - likely path */ + read_lock_bh(&ls->ls_rsbtbl_lock); + if (!rsb_flag(r, RSB_HASHED)) { + read_unlock_bh(&ls->ls_rsbtbl_lock); + goto not_found; + } - toss_list = 1; - found: - if (r->res_dir_nodeid != our_nodeid) { - /* should not happen, but may as well fix it and carry on */ - log_error(ls, "dlm_master_lookup res_dir %d our %d %s", - r->res_dir_nodeid, our_nodeid, r->res_name); - r->res_dir_nodeid = our_nodeid; + if (rsb_flag(r, RSB_INACTIVE)) { + read_unlock_bh(&ls->ls_rsbtbl_lock); + goto do_inactive; } - if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) { - /* Recovery uses this function to set a new master when - the previous master failed. Setting NEW_MASTER will - force dlm_recover_masters to call recover_master on this - rsb even though the res_nodeid is no longer removed. */ + /* because the rsb is active, we need to lock_rsb before + * checking/changing re_master_nodeid + */ - r->res_master_nodeid = from_nodeid; - r->res_nodeid = from_nodeid; - rsb_set_flag(r, RSB_NEW_MASTER); + hold_rsb(r); + read_unlock_bh(&ls->ls_rsbtbl_lock); + lock_rsb(r); - if (toss_list) { - /* I don't think we should ever find it on toss list. */ - log_error(ls, "dlm_master_lookup fix_master on toss"); - dlm_dump_rsb(r); - } - } + __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, false, + flags, r_nodeid, result); - if (from_master && (r->res_master_nodeid != from_nodeid)) { - /* this will happen if from_nodeid became master during - a previous recovery cycle, and we aborted the previous - cycle before recovering this master value */ + /* the rsb was active */ + unlock_rsb(r); + put_rsb(r); - log_limit(ls, "dlm_master_lookup from_master %d " - "master_nodeid %d res_nodeid %d first %x %s", - from_nodeid, r->res_master_nodeid, r->res_nodeid, - r->res_first_lkid, r->res_name); + return 0; - if (r->res_master_nodeid == our_nodeid) { - log_error(ls, "from_master %d our_master", from_nodeid); - dlm_dump_rsb(r); - dlm_send_rcom_lookup_dump(r, from_nodeid); - goto out_found; + do_inactive: + /* unlikely path - check if still part of ls_rsbtbl */ + write_lock_bh(&ls->ls_rsbtbl_lock); + + /* see comment in find_rsb_dir */ + if (rsb_flag(r, RSB_HASHED)) { + if (!rsb_flag(r, RSB_INACTIVE)) { + write_unlock_bh(&ls->ls_rsbtbl_lock); + /* something as changed, very unlikely but + * try again + */ + goto retry; } - - r->res_master_nodeid = from_nodeid; - r->res_nodeid = from_nodeid; - rsb_set_flag(r, RSB_NEW_MASTER); + } else { + write_unlock_bh(&ls->ls_rsbtbl_lock); + goto not_found; } - if (!r->res_master_nodeid) { - /* this will happen if recovery happens while we're looking - up the master for this rsb */ + /* because the rsb is inactive, it's not refcounted and lock_rsb + is not used, but is protected by the rsbtbl lock */ - log_debug(ls, "dlm_master_lookup master 0 to %d first %x %s", - from_nodeid, r->res_first_lkid, r->res_name); - r->res_master_nodeid = from_nodeid; - r->res_nodeid = from_nodeid; - } + __dlm_master_lookup(ls, r, our_nodeid, from_nodeid, true, flags, + r_nodeid, result); - if (!from_master && !fix_master && - (r->res_master_nodeid == from_nodeid)) { - /* this can happen when the master sends remove, the dir node - finds the rsb on the keep list and ignores the remove, - and the former master sends a lookup */ + /* A dir record rsb should never be on scan list. + * Except when we are the dir and master node. + * This function should only be called by the dir + * node. + */ + WARN_ON(!list_empty(&r->res_scan_list) && + r->res_master_nodeid != our_nodeid); - log_limit(ls, "dlm_master_lookup from master %d flags %x " - "first %x %s", from_nodeid, flags, - r->res_first_lkid, r->res_name); - } + write_unlock_bh(&ls->ls_rsbtbl_lock); - out_found: - *r_nodeid = r->res_master_nodeid; - if (result) - *result = DLM_LU_MATCH; - - if (toss_list) { - r->res_toss_time = jiffies; - /* the rsb was inactive (on toss list) */ - spin_unlock(&ls->ls_rsbtbl[b].lock); - } else { - /* the rsb was active */ - unlock_rsb(r); - put_rsb(r); - } return 0; not_found: error = get_rsb_struct(ls, name, len, &r); - if (error == -EAGAIN) { - spin_unlock(&ls->ls_rsbtbl[b].lock); - goto retry; - } - if (error) - goto out_unlock; + if (WARN_ON_ONCE(error)) + goto out; r->res_hash = hash; - r->res_bucket = b; r->res_dir_nodeid = our_nodeid; r->res_master_nodeid = from_nodeid; r->res_nodeid = from_nodeid; - kref_init(&r->res_ref); - r->res_toss_time = jiffies; - - error = rsb_insert(r, &ls->ls_rsbtbl[b].toss); - if (error) { + rsb_set_flag(r, RSB_INACTIVE); + + write_lock_bh(&ls->ls_rsbtbl_lock); + error = rsb_insert(r, &ls->ls_rsbtbl); + if (error == -EEXIST) { + /* somebody else was faster and it seems the + * rsb exists now, we do a whole relookup + */ + write_unlock_bh(&ls->ls_rsbtbl_lock); + dlm_free_rsb(r); + goto retry; + } else if (error) { + write_unlock_bh(&ls->ls_rsbtbl_lock); /* should never happen */ dlm_free_rsb(r); - spin_unlock(&ls->ls_rsbtbl[b].lock); goto retry; } + list_add(&r->res_slow_list, &ls->ls_slow_inactive); + write_unlock_bh(&ls->ls_rsbtbl_lock); + if (result) *result = DLM_LU_ADD; *r_nodeid = from_nodeid; - error = 0; - out_unlock: - spin_unlock(&ls->ls_rsbtbl[b].lock); + out: return error; } +int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, + int len, unsigned int flags, int *r_nodeid, int *result) +{ + int rv; + rcu_read_lock(); + rv = _dlm_master_lookup(ls, from_nodeid, name, len, flags, r_nodeid, result); + rcu_read_unlock(); + return rv; +} + static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash) { - struct rb_node *n; struct dlm_rsb *r; - int i; - for (i = 0; i < ls->ls_rsbtbl_size; i++) { - spin_lock(&ls->ls_rsbtbl[i].lock); - for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { - r = rb_entry(n, struct dlm_rsb, res_hashnode); - if (r->res_hash == hash) - dlm_dump_rsb(r); - } - spin_unlock(&ls->ls_rsbtbl[i].lock); + read_lock_bh(&ls->ls_rsbtbl_lock); + list_for_each_entry(r, &ls->ls_slow_active, res_slow_list) { + if (r->res_hash == hash) + dlm_dump_rsb(r); } + read_unlock_bh(&ls->ls_rsbtbl_lock); } -void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len) +void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len) { struct dlm_rsb *r = NULL; - uint32_t hash, b; int error; - hash = jhash(name, len, 0); - b = hash & (ls->ls_rsbtbl_size - 1); - - spin_lock(&ls->ls_rsbtbl[b].lock); - error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); + rcu_read_lock(); + error = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); if (!error) - goto out_dump; - - error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); - if (error) goto out; - out_dump: + dlm_dump_rsb(r); out: - spin_unlock(&ls->ls_rsbtbl[b].lock); + rcu_read_unlock(); } -static void toss_rsb(struct kref *kref) +static void deactivate_rsb(struct kref *kref) { struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref); struct dlm_ls *ls = r->res_ls; + int our_nodeid = dlm_our_nodeid(); DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r);); - kref_init(&r->res_ref); - rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep); - rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss); - r->res_toss_time = jiffies; - ls->ls_rsbtbl[r->res_bucket].flags |= DLM_RTF_SHRINK; + rsb_set_flag(r, RSB_INACTIVE); + list_move(&r->res_slow_list, &ls->ls_slow_inactive); + + /* + * When the rsb becomes unused, there are two possibilities: + * 1. Leave the inactive rsb in place (don't remove it). + * 2. Add it to the scan list to be removed. + * + * 1 is done when the rsb is acting as the dir record + * for a remotely mastered rsb. The rsb must be left + * in place as an inactive rsb to act as the dir record. + * + * 2 is done when a) the rsb is not the master and not the + * dir record, b) when the rsb is both the master and the + * dir record, c) when the rsb is master but not dir record. + * + * (If no directory is used, the rsb can always be removed.) + */ + if (dlm_no_directory(ls) || + (r->res_master_nodeid == our_nodeid || + dlm_dir_nodeid(r) != our_nodeid)) + add_scan(ls, r); + if (r->res_lvbptr) { dlm_free_lvb(r->res_lvbptr); r->res_lvbptr = NULL; } } -/* See comment for unhold_lkb */ - -static void unhold_rsb(struct dlm_rsb *r) -{ - int rv; - rv = kref_put(&r->res_ref, toss_rsb); - DLM_ASSERT(!rv, dlm_dump_rsb(r);); -} - -static void kill_rsb(struct kref *kref) +void free_inactive_rsb(struct dlm_rsb *r) { - struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref); - - /* All work is done after the return from kref_put() so we - can release the write_lock before the remove and free. */ + WARN_ON_ONCE(!rsb_flag(r, RSB_INACTIVE)); DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r);); DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r);); DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r);); DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r);); DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r);); + DLM_ASSERT(list_empty(&r->res_scan_list), dlm_dump_rsb(r);); DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r);); + DLM_ASSERT(list_empty(&r->res_masters_list), dlm_dump_rsb(r);); + + dlm_free_rsb(r); } /* Attaching/detaching lkb's from rsb's is for rsb reference counting. @@ -1180,35 +1497,36 @@ static void detach_lkb(struct dlm_lkb *lkb) } } -static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret) +static int _create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret, + unsigned long start, unsigned long end) { + struct xa_limit limit; struct dlm_lkb *lkb; int rv; - lkb = dlm_allocate_lkb(ls); + limit.max = end; + limit.min = start; + + lkb = dlm_allocate_lkb(); if (!lkb) return -ENOMEM; + lkb->lkb_last_bast_cb_mode = DLM_LOCK_IV; + lkb->lkb_last_cast_cb_mode = DLM_LOCK_IV; + lkb->lkb_last_cb_mode = DLM_LOCK_IV; lkb->lkb_nodeid = -1; lkb->lkb_grmode = DLM_LOCK_IV; kref_init(&lkb->lkb_ref); INIT_LIST_HEAD(&lkb->lkb_ownqueue); INIT_LIST_HEAD(&lkb->lkb_rsb_lookup); - INIT_LIST_HEAD(&lkb->lkb_time_list); - INIT_LIST_HEAD(&lkb->lkb_cb_list); - mutex_init(&lkb->lkb_cb_mutex); - INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work); - - idr_preload(GFP_NOFS); - spin_lock(&ls->ls_lkbidr_spin); - rv = idr_alloc(&ls->ls_lkbidr, lkb, 1, 0, GFP_NOWAIT); - if (rv >= 0) - lkb->lkb_id = rv; - spin_unlock(&ls->ls_lkbidr_spin); - idr_preload_end(); + + write_lock_bh(&ls->ls_lkbxa_lock); + rv = xa_alloc(&ls->ls_lkbxa, &lkb->lkb_id, lkb, limit, GFP_ATOMIC); + write_unlock_bh(&ls->ls_lkbxa_lock); if (rv < 0) { - log_error(ls, "create_lkb idr error %d", rv); + log_error(ls, "create_lkb xa error %d", rv); + dlm_free_lkb(lkb); return rv; } @@ -1216,15 +1534,30 @@ static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret) return 0; } +static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret) +{ + return _create_lkb(ls, lkb_ret, 1, ULONG_MAX); +} + static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret) { struct dlm_lkb *lkb; - spin_lock(&ls->ls_lkbidr_spin); - lkb = idr_find(&ls->ls_lkbidr, lkid); - if (lkb) - kref_get(&lkb->lkb_ref); - spin_unlock(&ls->ls_lkbidr_spin); + rcu_read_lock(); + lkb = xa_load(&ls->ls_lkbxa, lkid); + if (lkb) { + /* check if lkb is still part of lkbxa under lkbxa_lock as + * the lkb_ref is tight to the lkbxa data structure, see + * __put_lkb(). + */ + read_lock_bh(&ls->ls_lkbxa_lock); + if (kref_read(&lkb->lkb_ref)) + kref_get(&lkb->lkb_ref); + else + lkb = NULL; + read_unlock_bh(&ls->ls_lkbxa_lock); + } + rcu_read_unlock(); *lkb_ret = lkb; return lkb ? 0 : -ENOENT; @@ -1246,11 +1579,13 @@ static void kill_lkb(struct kref *kref) static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb) { uint32_t lkid = lkb->lkb_id; + int rv; - spin_lock(&ls->ls_lkbidr_spin); - if (kref_put(&lkb->lkb_ref, kill_lkb)) { - idr_remove(&ls->ls_lkbidr, lkid); - spin_unlock(&ls->ls_lkbidr_spin); + rv = dlm_kref_put_write_lock_bh(&lkb->lkb_ref, kill_lkb, + &ls->ls_lkbxa_lock); + if (rv) { + xa_erase(&ls->ls_lkbxa, lkid); + write_unlock_bh(&ls->ls_lkbxa_lock); detach_lkb(lkb); @@ -1258,11 +1593,9 @@ static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb) if (lkb->lkb_lvbptr && is_master_copy(lkb)) dlm_free_lvb(lkb->lkb_lvbptr); dlm_free_lkb(lkb); - return 1; - } else { - spin_unlock(&ls->ls_lkbidr_spin); - return 0; } + + return rv; } int dlm_put_lkb(struct dlm_lkb *lkb) @@ -1284,6 +1617,13 @@ static inline void hold_lkb(struct dlm_lkb *lkb) kref_get(&lkb->lkb_ref); } +static void unhold_lkb_assert(struct kref *kref) +{ + struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref); + + DLM_ASSERT(false, dlm_print_lkb(lkb);); +} + /* This is called when we need to remove a reference and are certain it's not the last ref. e.g. del_lkb is always called between a find_lkb/put_lkb and is always the inverse of a previous add_lkb. @@ -1291,21 +1631,23 @@ static inline void hold_lkb(struct dlm_lkb *lkb) static inline void unhold_lkb(struct dlm_lkb *lkb) { - int rv; - rv = kref_put(&lkb->lkb_ref, kill_lkb); - DLM_ASSERT(!rv, dlm_print_lkb(lkb);); + kref_put(&lkb->lkb_ref, unhold_lkb_assert); } static void lkb_add_ordered(struct list_head *new, struct list_head *head, int mode) { - struct dlm_lkb *lkb = NULL; + struct dlm_lkb *lkb = NULL, *iter; - list_for_each_entry(lkb, head, lkb_statequeue) - if (lkb->lkb_rqmode < mode) + list_for_each_entry(iter, head, lkb_statequeue) + if (iter->lkb_rqmode < mode) { + lkb = iter; + list_add_tail(new, &iter->lkb_statequeue); break; + } - __list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue); + if (!lkb) + list_add_tail(new, head); } /* add/remove lkb to rsb's grant/convert/wait queue */ @@ -1353,10 +1695,8 @@ static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb) static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts) { - hold_lkb(lkb); del_lkb(r, lkb); add_lkb(r, lkb, sts); - unhold_lkb(lkb); } static int msg_reply_type(int mstype) @@ -1376,102 +1716,28 @@ static int msg_reply_type(int mstype) return -1; } -static int nodeid_warned(int nodeid, int num_nodes, int *warned) -{ - int i; - - for (i = 0; i < num_nodes; i++) { - if (!warned[i]) { - warned[i] = nodeid; - return 0; - } - if (warned[i] == nodeid) - return 1; - } - return 0; -} - -void dlm_scan_waiters(struct dlm_ls *ls) -{ - struct dlm_lkb *lkb; - ktime_t zero = ktime_set(0, 0); - s64 us; - s64 debug_maxus = 0; - u32 debug_scanned = 0; - u32 debug_expired = 0; - int num_nodes = 0; - int *warned = NULL; - - if (!dlm_config.ci_waitwarn_us) - return; - - mutex_lock(&ls->ls_waiters_mutex); - - list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) { - if (ktime_equal(lkb->lkb_wait_time, zero)) - continue; - - debug_scanned++; - - us = ktime_to_us(ktime_sub(ktime_get(), lkb->lkb_wait_time)); - - if (us < dlm_config.ci_waitwarn_us) - continue; - - lkb->lkb_wait_time = zero; - - debug_expired++; - if (us > debug_maxus) - debug_maxus = us; - - if (!num_nodes) { - num_nodes = ls->ls_num_nodes; - warned = kzalloc(num_nodes * sizeof(int), GFP_KERNEL); - } - if (!warned) - continue; - if (nodeid_warned(lkb->lkb_wait_nodeid, num_nodes, warned)) - continue; - - log_error(ls, "waitwarn %x %lld %d us check connection to " - "node %d", lkb->lkb_id, (long long)us, - dlm_config.ci_waitwarn_us, lkb->lkb_wait_nodeid); - } - mutex_unlock(&ls->ls_waiters_mutex); - kfree(warned); - - if (debug_expired) - log_debug(ls, "scan_waiters %u warn %u over %d us max %lld us", - debug_scanned, debug_expired, - dlm_config.ci_waitwarn_us, (long long)debug_maxus); -} - /* add/remove lkb from global waiters list of lkb's waiting for a reply from a remote node */ -static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid) +static void add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid) { struct dlm_ls *ls = lkb->lkb_resource->res_ls; - int error = 0; - - mutex_lock(&ls->ls_waiters_mutex); - - if (is_overlap_unlock(lkb) || - (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) { - error = -EINVAL; - goto out; - } + spin_lock_bh(&ls->ls_waiters_lock); if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) { switch (mstype) { case DLM_MSG_UNLOCK: - lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK; + set_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); break; case DLM_MSG_CANCEL: - lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL; + set_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); break; default: - error = -EBUSY; + /* should never happen as validate_lock_args() checks + * on lkb_wait_type and validate_unlock_args() only + * creates UNLOCK or CANCEL messages. + */ + WARN_ON_ONCE(1); goto out; } lkb->lkb_wait_count++; @@ -1479,7 +1745,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid) log_debug(ls, "addwait %x cur %d overlap %d count %d f %x", lkb->lkb_id, lkb->lkb_wait_type, mstype, - lkb->lkb_wait_count, lkb->lkb_flags); + lkb->lkb_wait_count, dlm_iflags_val(lkb)); goto out; } @@ -1489,17 +1755,11 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid) lkb->lkb_wait_count++; lkb->lkb_wait_type = mstype; - lkb->lkb_wait_time = ktime_get(); lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */ hold_lkb(lkb); list_add(&lkb->lkb_wait_reply, &ls->ls_waiters); out: - if (error) - log_error(ls, "addwait error %x %d flags %x %d %d %s", - lkb->lkb_id, error, lkb->lkb_flags, mstype, - lkb->lkb_wait_type, lkb->lkb_resource->res_name); - mutex_unlock(&ls->ls_waiters_mutex); - return error; + spin_unlock_bh(&ls->ls_waiters_lock); } /* We clear the RESEND flag because we might be taking an lkb off the waiters @@ -1508,21 +1768,21 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid) set RESEND and dlm_recover_waiters_post() */ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype, - struct dlm_message *ms) + const struct dlm_message *ms) { struct dlm_ls *ls = lkb->lkb_resource->res_ls; int overlap_done = 0; - if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) { + if (mstype == DLM_MSG_UNLOCK_REPLY && + test_and_clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags)) { log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id); - lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK; overlap_done = 1; goto out_del; } - if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) { + if (mstype == DLM_MSG_CANCEL_REPLY && + test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags)) { log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id); - lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL; overlap_done = 1; goto out_del; } @@ -1546,13 +1806,13 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype, lingering state of the cancel and fail with -EBUSY. */ if ((mstype == DLM_MSG_CONVERT_REPLY) && - (lkb->lkb_wait_type == DLM_MSG_CONVERT) && - is_overlap_cancel(lkb) && ms && !ms->m_result) { + (lkb->lkb_wait_type == DLM_MSG_CONVERT) && ms && !ms->m_result && + test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags)) { log_debug(ls, "remwait %x convert_reply zap overlap_cancel", lkb->lkb_id); lkb->lkb_wait_type = 0; - lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL; lkb->lkb_wait_count--; + unhold_lkb(lkb); goto out_del; } @@ -1565,8 +1825,8 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype, } log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait", - lkb->lkb_id, ms ? ms->m_header.h_nodeid : 0, lkb->lkb_remid, - mstype, lkb->lkb_flags); + lkb->lkb_id, ms ? le32_to_cpu(ms->m_header.h_nodeid) : 0, + lkb->lkb_remid, mstype, dlm_iflags_val(lkb)); return -1; out_del: @@ -1579,12 +1839,13 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype, log_error(ls, "remwait error %x reply %d wait_type %d overlap", lkb->lkb_id, mstype, lkb->lkb_wait_type); lkb->lkb_wait_count--; + unhold_lkb(lkb); lkb->lkb_wait_type = 0; } DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb);); - lkb->lkb_flags &= ~DLM_IFL_RESEND; + clear_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags); lkb->lkb_wait_count--; if (!lkb->lkb_wait_count) list_del_init(&lkb->lkb_wait_reply); @@ -1597,349 +1858,34 @@ static int remove_from_waiters(struct dlm_lkb *lkb, int mstype) struct dlm_ls *ls = lkb->lkb_resource->res_ls; int error; - mutex_lock(&ls->ls_waiters_mutex); + spin_lock_bh(&ls->ls_waiters_lock); error = _remove_from_waiters(lkb, mstype, NULL); - mutex_unlock(&ls->ls_waiters_mutex); + spin_unlock_bh(&ls->ls_waiters_lock); return error; } -/* Handles situations where we might be processing a "fake" or "stub" reply in - which we can't try to take waiters_mutex again. */ +/* Handles situations where we might be processing a "fake" or "local" reply in + * the recovery context which stops any locking activity. Only debugfs might + * change the lockspace waiters but they will held the recovery lock to ensure + * remove_from_waiters_ms() in local case will be the only user manipulating the + * lockspace waiters in recovery context. + */ -static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms) +static int remove_from_waiters_ms(struct dlm_lkb *lkb, + const struct dlm_message *ms, bool local) { struct dlm_ls *ls = lkb->lkb_resource->res_ls; int error; - if (ms->m_flags != DLM_IFL_STUB_MS) - mutex_lock(&ls->ls_waiters_mutex); - error = _remove_from_waiters(lkb, ms->m_type, ms); - if (ms->m_flags != DLM_IFL_STUB_MS) - mutex_unlock(&ls->ls_waiters_mutex); - return error; -} - -/* If there's an rsb for the same resource being removed, ensure - that the remove message is sent before the new lookup message. - It should be rare to need a delay here, but if not, then it may - be worthwhile to add a proper wait mechanism rather than a delay. */ - -static void wait_pending_remove(struct dlm_rsb *r) -{ - struct dlm_ls *ls = r->res_ls; - restart: - spin_lock(&ls->ls_remove_spin); - if (ls->ls_remove_len && - !rsb_cmp(r, ls->ls_remove_name, ls->ls_remove_len)) { - log_debug(ls, "delay lookup for remove dir %d %s", - r->res_dir_nodeid, r->res_name); - spin_unlock(&ls->ls_remove_spin); - msleep(1); - goto restart; - } - spin_unlock(&ls->ls_remove_spin); -} - -/* - * ls_remove_spin protects ls_remove_name and ls_remove_len which are - * read by other threads in wait_pending_remove. ls_remove_names - * and ls_remove_lens are only used by the scan thread, so they do - * not need protection. - */ - -static void shrink_bucket(struct dlm_ls *ls, int b) -{ - struct rb_node *n, *next; - struct dlm_rsb *r; - char *name; - int our_nodeid = dlm_our_nodeid(); - int remote_count = 0; - int need_shrink = 0; - int i, len, rv; - - memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX); - - spin_lock(&ls->ls_rsbtbl[b].lock); - - if (!(ls->ls_rsbtbl[b].flags & DLM_RTF_SHRINK)) { - spin_unlock(&ls->ls_rsbtbl[b].lock); - return; - } - - for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) { - next = rb_next(n); - r = rb_entry(n, struct dlm_rsb, res_hashnode); - - /* If we're the directory record for this rsb, and - we're not the master of it, then we need to wait - for the master node to send us a dir remove for - before removing the dir record. */ - - if (!dlm_no_directory(ls) && - (r->res_master_nodeid != our_nodeid) && - (dlm_dir_nodeid(r) == our_nodeid)) { - continue; - } - - need_shrink = 1; - - if (!time_after_eq(jiffies, r->res_toss_time + - dlm_config.ci_toss_secs * HZ)) { - continue; - } - - if (!dlm_no_directory(ls) && - (r->res_master_nodeid == our_nodeid) && - (dlm_dir_nodeid(r) != our_nodeid)) { - - /* We're the master of this rsb but we're not - the directory record, so we need to tell the - dir node to remove the dir record. */ - - ls->ls_remove_lens[remote_count] = r->res_length; - memcpy(ls->ls_remove_names[remote_count], r->res_name, - DLM_RESNAME_MAXLEN); - remote_count++; - - if (remote_count >= DLM_REMOVE_NAMES_MAX) - break; - continue; - } - - if (!kref_put(&r->res_ref, kill_rsb)) { - log_error(ls, "tossed rsb in use %s", r->res_name); - continue; - } - - rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); - dlm_free_rsb(r); - } - - if (need_shrink) - ls->ls_rsbtbl[b].flags |= DLM_RTF_SHRINK; + if (!local) + spin_lock_bh(&ls->ls_waiters_lock); else - ls->ls_rsbtbl[b].flags &= ~DLM_RTF_SHRINK; - spin_unlock(&ls->ls_rsbtbl[b].lock); - - /* - * While searching for rsb's to free, we found some that require - * remote removal. We leave them in place and find them again here - * so there is a very small gap between removing them from the toss - * list and sending the removal. Keeping this gap small is - * important to keep us (the master node) from being out of sync - * with the remote dir node for very long. - * - * From the time the rsb is removed from toss until just after - * send_remove, the rsb name is saved in ls_remove_name. A new - * lookup checks this to ensure that a new lookup message for the - * same resource name is not sent just before the remove message. - */ - - for (i = 0; i < remote_count; i++) { - name = ls->ls_remove_names[i]; - len = ls->ls_remove_lens[i]; - - spin_lock(&ls->ls_rsbtbl[b].lock); - rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); - if (rv) { - spin_unlock(&ls->ls_rsbtbl[b].lock); - log_debug(ls, "remove_name not toss %s", name); - continue; - } - - if (r->res_master_nodeid != our_nodeid) { - spin_unlock(&ls->ls_rsbtbl[b].lock); - log_debug(ls, "remove_name master %d dir %d our %d %s", - r->res_master_nodeid, r->res_dir_nodeid, - our_nodeid, name); - continue; - } - - if (r->res_dir_nodeid == our_nodeid) { - /* should never happen */ - spin_unlock(&ls->ls_rsbtbl[b].lock); - log_error(ls, "remove_name dir %d master %d our %d %s", - r->res_dir_nodeid, r->res_master_nodeid, - our_nodeid, name); - continue; - } - - if (!time_after_eq(jiffies, r->res_toss_time + - dlm_config.ci_toss_secs * HZ)) { - spin_unlock(&ls->ls_rsbtbl[b].lock); - log_debug(ls, "remove_name toss_time %lu now %lu %s", - r->res_toss_time, jiffies, name); - continue; - } - - if (!kref_put(&r->res_ref, kill_rsb)) { - spin_unlock(&ls->ls_rsbtbl[b].lock); - log_error(ls, "remove_name in use %s", name); - continue; - } - - rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); - - /* block lookup of same name until we've sent remove */ - spin_lock(&ls->ls_remove_spin); - ls->ls_remove_len = len; - memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN); - spin_unlock(&ls->ls_remove_spin); - spin_unlock(&ls->ls_rsbtbl[b].lock); - - send_remove(r); - - /* allow lookup of name again */ - spin_lock(&ls->ls_remove_spin); - ls->ls_remove_len = 0; - memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN); - spin_unlock(&ls->ls_remove_spin); - - dlm_free_rsb(r); - } -} - -void dlm_scan_rsbs(struct dlm_ls *ls) -{ - int i; - - for (i = 0; i < ls->ls_rsbtbl_size; i++) { - shrink_bucket(ls, i); - if (dlm_locking_stopped(ls)) - break; - cond_resched(); - } -} - -static void add_timeout(struct dlm_lkb *lkb) -{ - struct dlm_ls *ls = lkb->lkb_resource->res_ls; - - if (is_master_copy(lkb)) - return; - - if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) && - !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) { - lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN; - goto add_it; - } - if (lkb->lkb_exflags & DLM_LKF_TIMEOUT) - goto add_it; - return; - - add_it: - DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb);); - mutex_lock(&ls->ls_timeout_mutex); - hold_lkb(lkb); - list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout); - mutex_unlock(&ls->ls_timeout_mutex); -} - -static void del_timeout(struct dlm_lkb *lkb) -{ - struct dlm_ls *ls = lkb->lkb_resource->res_ls; - - mutex_lock(&ls->ls_timeout_mutex); - if (!list_empty(&lkb->lkb_time_list)) { - list_del_init(&lkb->lkb_time_list); - unhold_lkb(lkb); - } - mutex_unlock(&ls->ls_timeout_mutex); -} - -/* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and - lkb_lksb_timeout without lock_rsb? Note: we can't lock timeout_mutex - and then lock rsb because of lock ordering in add_timeout. We may need - to specify some special timeout-related bits in the lkb that are just to - be accessed under the timeout_mutex. */ - -void dlm_scan_timeout(struct dlm_ls *ls) -{ - struct dlm_rsb *r; - struct dlm_lkb *lkb; - int do_cancel, do_warn; - s64 wait_us; - - for (;;) { - if (dlm_locking_stopped(ls)) - break; - - do_cancel = 0; - do_warn = 0; - mutex_lock(&ls->ls_timeout_mutex); - list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) { - - wait_us = ktime_to_us(ktime_sub(ktime_get(), - lkb->lkb_timestamp)); - - if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) && - wait_us >= (lkb->lkb_timeout_cs * 10000)) - do_cancel = 1; - - if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) && - wait_us >= dlm_config.ci_timewarn_cs * 10000) - do_warn = 1; - - if (!do_cancel && !do_warn) - continue; - hold_lkb(lkb); - break; - } - mutex_unlock(&ls->ls_timeout_mutex); - - if (!do_cancel && !do_warn) - break; - - r = lkb->lkb_resource; - hold_rsb(r); - lock_rsb(r); - - if (do_warn) { - /* clear flag so we only warn once */ - lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN; - if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT)) - del_timeout(lkb); - dlm_timeout_warn(lkb); - } - - if (do_cancel) { - log_debug(ls, "timeout cancel %x node %d %s", - lkb->lkb_id, lkb->lkb_nodeid, r->res_name); - lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN; - lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL; - del_timeout(lkb); - _cancel_lock(r, lkb); - } - - unlock_rsb(r); - unhold_rsb(r); - dlm_put_lkb(lkb); - } -} - -/* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping - dlm_recoverd before checking/setting ls_recover_begin. */ - -void dlm_adjust_timeouts(struct dlm_ls *ls) -{ - struct dlm_lkb *lkb; - u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin); - - ls->ls_recover_begin = 0; - mutex_lock(&ls->ls_timeout_mutex); - list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) - lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us); - mutex_unlock(&ls->ls_timeout_mutex); - - if (!dlm_config.ci_waitwarn_us) - return; - - mutex_lock(&ls->ls_waiters_mutex); - list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) { - if (ktime_to_us(lkb->lkb_wait_time)) - lkb->lkb_wait_time = ktime_get(); - } - mutex_unlock(&ls->ls_waiters_mutex); + WARN_ON_ONCE(!rwsem_is_locked(&ls->ls_in_recovery) || + !dlm_locking_stopped(ls)); + error = _remove_from_waiters(lkb, le32_to_cpu(ms->m_type), ms); + if (!local) + spin_unlock_bh(&ls->ls_waiters_lock); + return error; } /* lkb is master or local copy */ @@ -1992,7 +1938,7 @@ static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) } if (rsb_flag(r, RSB_VALNOTVALID)) - lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID; + set_bit(DLM_SBF_VALNOTVALID_BIT, &lkb->lkb_sbflags); } static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb) @@ -2025,7 +1971,7 @@ static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb) /* lkb is process copy (pc) */ static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb, - struct dlm_message *ms) + const struct dlm_message *ms) { int b; @@ -2041,7 +1987,7 @@ static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb, if (len > r->res_ls->ls_lvblen) len = r->res_ls->ls_lvblen; memcpy(lkb->lkb_lvbptr, ms->m_extra, len); - lkb->lkb_lvbseq = ms->m_lvbseq; + lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq); } } @@ -2132,7 +2078,7 @@ static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) } static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb, - struct dlm_message *ms) + const struct dlm_message *ms) { set_lvb_lock_pc(r, lkb, ms); _grant_lock(r, lkb); @@ -2170,12 +2116,12 @@ static void munge_demoted(struct dlm_lkb *lkb) lkb->lkb_grmode = DLM_LOCK_NL; } -static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms) +static void munge_altmode(struct dlm_lkb *lkb, const struct dlm_message *ms) { - if (ms->m_type != DLM_MSG_REQUEST_REPLY && - ms->m_type != DLM_MSG_GRANT) { + if (ms->m_type != cpu_to_le32(DLM_MSG_REQUEST_REPLY) && + ms->m_type != cpu_to_le32(DLM_MSG_GRANT)) { log_print("munge_altmode %x invalid reply type %d", - lkb->lkb_id, ms->m_type); + lkb->lkb_id, le32_to_cpu(ms->m_type)); return; } @@ -2464,15 +2410,13 @@ static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now, conversion_deadlock_detect(r, lkb)) { if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) { lkb->lkb_grmode = DLM_LOCK_NL; - lkb->lkb_sbflags |= DLM_SBF_DEMOTED; - } else if (!(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) { - if (err) - *err = -EDEADLK; - else { - log_print("can_be_granted deadlock %x now %d", - lkb->lkb_id, now); - dlm_dump_rsb(r); - } + set_bit(DLM_SBF_DEMOTED_BIT, &lkb->lkb_sbflags); + } else if (err) { + *err = -EDEADLK; + } else { + log_print("can_be_granted deadlock %x now %d", + lkb->lkb_id, now); + dlm_dump_rsb(r); } goto out; } @@ -2493,7 +2437,7 @@ static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now, lkb->lkb_rqmode = alt; rv = _can_be_granted(r, lkb, now, 0); if (rv) - lkb->lkb_sbflags |= DLM_SBF_ALTMODE; + set_bit(DLM_SBF_ALTMODE_BIT, &lkb->lkb_sbflags); else lkb->lkb_rqmode = rqmode; } @@ -2501,13 +2445,6 @@ static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now, return rv; } -/* FIXME: I don't think that can_be_granted() can/will demote or find deadlock - for locks pending on the convert list. Once verified (watch for these - log_prints), we should be able to just call _can_be_granted() and not - bother with the demote/deadlk cases here (and there's no easy way to deal - with a deadlk here, we'd have to generate something like grant_lock with - the deadlk error.) */ - /* Returns the highest requested mode of all blocked conversions; sets cw if there's a blocked conversion to DLM_LOCK_CW. */ @@ -2545,9 +2482,22 @@ static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw, } if (deadlk) { - log_print("WARN: pending deadlock %x node %d %s", - lkb->lkb_id, lkb->lkb_nodeid, r->res_name); - dlm_dump_rsb(r); + /* + * If DLM_LKB_NODLKWT flag is set and conversion + * deadlock is detected, we request blocking AST and + * down (or cancel) conversion. + */ + if (lkb->lkb_exflags & DLM_LKF_NODLCKWT) { + if (lkb->lkb_highbast < lkb->lkb_rqmode) { + queue_bast(r, lkb, lkb->lkb_rqmode); + lkb->lkb_highbast = lkb->lkb_rqmode; + } + } else { + log_print("WARN: pending deadlock %x node %d %s", + lkb->lkb_id, lkb->lkb_nodeid, + r->res_name); + dlm_dump_rsb(r); + } continue; } @@ -2744,8 +2694,6 @@ static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb) return 0; } - wait_pending_remove(r); - r->res_first_lkid = lkb->lkb_id; send_lookup(r, lkb); return 1; @@ -2758,7 +2706,6 @@ static void process_lookup_list(struct dlm_rsb *r) list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) { list_del_init(&lkb->lkb_rsb_lookup); _request_lock(r, lkb); - schedule(); } } @@ -2802,10 +2749,9 @@ static void confirm_master(struct dlm_rsb *r, int error) } static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags, - int namelen, unsigned long timeout_cs, - void (*ast) (void *astparam), + int namelen, void (*ast)(void *astparam), void *astparam, - void (*bast) (void *astparam, int mode), + void (*bast)(void *astparam, int mode), struct dlm_args *args) { int rv = -EINVAL; @@ -2859,7 +2805,6 @@ static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags, args->astfn = ast; args->astparam = astparam; args->bastfn = bast; - args->timeout = timeout_cs; args->mode = mode; args->lksb = lksb; rv = 0; @@ -2884,29 +2829,30 @@ static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args) static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, struct dlm_args *args) { - int rv = -EINVAL; + int rv = -EBUSY; if (args->flags & DLM_LKF_CONVERT) { - if (lkb->lkb_flags & DLM_IFL_MSTCPY) + if (lkb->lkb_status != DLM_LKSTS_GRANTED) goto out; - if (args->flags & DLM_LKF_QUECVT && - !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1]) + /* lock not allowed if there's any op in progress */ + if (lkb->lkb_wait_type || lkb->lkb_wait_count) goto out; - rv = -EBUSY; - if (lkb->lkb_status != DLM_LKSTS_GRANTED) + if (is_overlap(lkb)) goto out; - if (lkb->lkb_wait_type) + rv = -EINVAL; + if (test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags)) goto out; - if (is_overlap(lkb)) + if (args->flags & DLM_LKF_QUECVT && + !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1]) goto out; } lkb->lkb_exflags = args->flags; - lkb->lkb_sbflags = 0; + dlm_set_sbflags_val(lkb, 0); lkb->lkb_astfn = args->astfn; lkb->lkb_astparam = args->astparam; lkb->lkb_bastfn = args->bastfn; @@ -2914,14 +2860,25 @@ static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, lkb->lkb_lksb = args->lksb; lkb->lkb_lvbptr = args->lksb->sb_lvbptr; lkb->lkb_ownpid = (int) current->pid; - lkb->lkb_timeout_cs = args->timeout; rv = 0; out: - if (rv) - log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s", - rv, lkb->lkb_id, lkb->lkb_flags, args->flags, - lkb->lkb_status, lkb->lkb_wait_type, - lkb->lkb_resource->res_name); + switch (rv) { + case 0: + break; + case -EINVAL: + /* annoy the user because dlm usage is wrong */ + WARN_ON(1); + log_error(ls, "%s %d %x %x %x %d %d", __func__, + rv, lkb->lkb_id, dlm_iflags_val(lkb), args->flags, + lkb->lkb_status, lkb->lkb_wait_type); + break; + default: + log_debug(ls, "%s %d %x %x %x %d %d", __func__, + rv, lkb->lkb_id, dlm_iflags_val(lkb), args->flags, + lkb->lkb_status, lkb->lkb_wait_type); + break; + } + return rv; } @@ -2935,23 +2892,12 @@ static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args) { struct dlm_ls *ls = lkb->lkb_resource->res_ls; - int rv = -EINVAL; + int rv = -EBUSY; - if (lkb->lkb_flags & DLM_IFL_MSTCPY) { - log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id); - dlm_print_lkb(lkb); - goto out; - } - - /* an lkb may still exist even though the lock is EOL'ed due to a - cancel, unlock or failed noqueue request; an app can't use these - locks; return same error as if the lkid had not been found at all */ - - if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) { - log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id); - rv = -ENOENT; + /* normal unlock not allowed if there's any op in progress */ + if (!(args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) && + (lkb->lkb_wait_type || lkb->lkb_wait_count)) goto out; - } /* an lkb may be waiting for an rsb lookup to complete where the lookup was initiated by another lock */ @@ -2966,24 +2912,41 @@ static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args) unhold_lkb(lkb); /* undoes create_lkb() */ } /* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */ - rv = -EBUSY; goto out; } + rv = -EINVAL; + if (test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags)) { + log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id); + dlm_print_lkb(lkb); + goto out; + } + + /* an lkb may still exist even though the lock is EOL'ed due to a + * cancel, unlock or failed noqueue request; an app can't use these + * locks; return same error as if the lkid had not been found at all + */ + + if (test_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags)) { + log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id); + rv = -ENOENT; + goto out; + } + + if (is_overlap_unlock(lkb)) + goto out; + /* cancel not allowed with another cancel/unlock in progress */ if (args->flags & DLM_LKF_CANCEL) { if (lkb->lkb_exflags & DLM_LKF_CANCEL) goto out; - if (is_overlap(lkb)) + if (is_overlap_cancel(lkb)) goto out; - /* don't let scand try to do a cancel */ - del_timeout(lkb); - - if (lkb->lkb_flags & DLM_IFL_RESEND) { - lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL; + if (test_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags)) { + set_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); rv = -EBUSY; goto out; } @@ -2998,7 +2961,7 @@ static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args) switch (lkb->lkb_wait_type) { case DLM_MSG_LOOKUP: case DLM_MSG_REQUEST: - lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL; + set_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); rv = -EBUSY; goto out; case DLM_MSG_UNLOCK: @@ -3017,14 +2980,8 @@ static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args) if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK) goto out; - if (is_overlap_unlock(lkb)) - goto out; - - /* don't let scand try to do a cancel */ - del_timeout(lkb); - - if (lkb->lkb_flags & DLM_IFL_RESEND) { - lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK; + if (test_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags)) { + set_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); rv = -EBUSY; goto out; } @@ -3032,33 +2989,41 @@ static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args) switch (lkb->lkb_wait_type) { case DLM_MSG_LOOKUP: case DLM_MSG_REQUEST: - lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK; + set_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); rv = -EBUSY; goto out; case DLM_MSG_UNLOCK: goto out; } /* add_to_waiters() will set OVERLAP_UNLOCK */ - goto out_ok; } - /* normal unlock not allowed if there's any op in progress */ - rv = -EBUSY; - if (lkb->lkb_wait_type || lkb->lkb_wait_count) - goto out; - out_ok: /* an overlapping op shouldn't blow away exflags from other op */ lkb->lkb_exflags |= args->flags; - lkb->lkb_sbflags = 0; + dlm_set_sbflags_val(lkb, 0); lkb->lkb_astparam = args->astparam; rv = 0; out: - if (rv) - log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv, - lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags, + switch (rv) { + case 0: + break; + case -EINVAL: + /* annoy the user because dlm usage is wrong */ + WARN_ON(1); + log_error(ls, "%s %d %x %x %x %x %d %s", __func__, rv, + lkb->lkb_id, dlm_iflags_val(lkb), lkb->lkb_exflags, args->flags, lkb->lkb_wait_type, lkb->lkb_resource->res_name); + break; + default: + log_debug(ls, "%s %d %x %x %x %x %d %s", __func__, rv, + lkb->lkb_id, dlm_iflags_val(lkb), lkb->lkb_exflags, + args->flags, lkb->lkb_wait_type, + lkb->lkb_resource->res_name); + break; + } + return rv; } @@ -3082,7 +3047,6 @@ static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb) if (can_be_queued(lkb)) { error = -EINPROGRESS; add_lkb(r, lkb, DLM_LKSTS_WAITING); - add_timeout(lkb); goto out; } @@ -3123,7 +3087,7 @@ static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb) deadlock, so we leave it on the granted queue and return EDEADLK in the ast for the convert. */ - if (deadlk) { + if (deadlk && !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) { /* it's left on the granted queue */ revert_lock(r, lkb); queue_cast(r, lkb, -EDEADLK); @@ -3151,7 +3115,6 @@ static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb) error = -EINPROGRESS; del_lkb(r, lkb); add_lkb(r, lkb, DLM_LKSTS_CONVERT); - add_timeout(lkb); goto out; } @@ -3309,8 +3272,9 @@ static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) * request_lock(), convert_lock(), unlock_lock(), cancel_lock() */ -static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name, - int len, struct dlm_args *args) +static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, + const void *name, int len, + struct dlm_args *args) { struct dlm_rsb *r; int error; @@ -3409,7 +3373,7 @@ int dlm_lock(dlm_lockspace_t *lockspace, int mode, struct dlm_lksb *lksb, uint32_t flags, - void *name, + const void *name, unsigned int namelen, uint32_t parent_lkid, void (*ast) (void *astarg), @@ -3435,8 +3399,10 @@ int dlm_lock(dlm_lockspace_t *lockspace, if (error) goto out; - error = set_lock_args(mode, lksb, flags, namelen, 0, ast, - astarg, bast, &args); + trace_dlm_lock_start(ls, lkb, name, namelen, mode, flags); + + error = set_lock_args(mode, lksb, flags, namelen, ast, astarg, bast, + &args); if (error) goto out_put; @@ -3448,6 +3414,8 @@ int dlm_lock(dlm_lockspace_t *lockspace, if (error == -EINPROGRESS) error = 0; out_put: + trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, true); + if (convert || error) __put_lkb(ls, lkb); if (error == -EAGAIN || error == -EDEADLK) @@ -3479,6 +3447,8 @@ int dlm_unlock(dlm_lockspace_t *lockspace, if (error) goto out; + trace_dlm_unlock_start(ls, lkb, flags); + error = set_unlock_args(flags, astarg, &args); if (error) goto out_put; @@ -3493,6 +3463,8 @@ int dlm_unlock(dlm_lockspace_t *lockspace, if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK))) error = 0; out_put: + trace_dlm_unlock_end(ls, lkb, flags, error); + dlm_put_lkb(lkb); out: dlm_unlock_recovery(ls); @@ -3532,24 +3504,22 @@ static int _create_message(struct dlm_ls *ls, int mb_len, char *mb; /* get_buffer gives us a message handle (mh) that we need to - pass into lowcomms_commit and a message buffer (mb) that we + pass into midcomms_commit and a message buffer (mb) that we write our data into */ - mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb); + mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, &mb); if (!mh) return -ENOBUFS; - memset(mb, 0, mb_len); - ms = (struct dlm_message *) mb; - ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR); - ms->m_header.h_lockspace = ls->ls_global_id; - ms->m_header.h_nodeid = dlm_our_nodeid(); - ms->m_header.h_length = mb_len; + ms->m_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR); + ms->m_header.u.h_lockspace = cpu_to_le32(ls->ls_global_id); + ms->m_header.h_nodeid = cpu_to_le32(dlm_our_nodeid()); + ms->m_header.h_length = cpu_to_le16(mb_len); ms->m_header.h_cmd = DLM_MSG; - ms->m_type = mstype; + ms->m_type = cpu_to_le32(mstype); *mh_ret = mh; *ms_ret = ms; @@ -3574,7 +3544,7 @@ static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb, case DLM_MSG_REQUEST_REPLY: case DLM_MSG_CONVERT_REPLY: case DLM_MSG_GRANT: - if (lkb && lkb->lkb_lvbptr) + if (lkb && lkb->lkb_lvbptr && (lkb->lkb_exflags & DLM_LKF_VALBLK)) mb_len += r->res_ls->ls_lvblen; break; } @@ -3586,51 +3556,51 @@ static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb, /* further lowcomms enhancements or alternate implementations may make the return value from this function useful at some point */ -static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms) +static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms, + const void *name, int namelen) { - dlm_message_out(ms); - dlm_lowcomms_commit_buffer(mh); + dlm_midcomms_commit_mhandle(mh, name, namelen); return 0; } static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb, struct dlm_message *ms) { - ms->m_nodeid = lkb->lkb_nodeid; - ms->m_pid = lkb->lkb_ownpid; - ms->m_lkid = lkb->lkb_id; - ms->m_remid = lkb->lkb_remid; - ms->m_exflags = lkb->lkb_exflags; - ms->m_sbflags = lkb->lkb_sbflags; - ms->m_flags = lkb->lkb_flags; - ms->m_lvbseq = lkb->lkb_lvbseq; - ms->m_status = lkb->lkb_status; - ms->m_grmode = lkb->lkb_grmode; - ms->m_rqmode = lkb->lkb_rqmode; - ms->m_hash = r->res_hash; + ms->m_nodeid = cpu_to_le32(lkb->lkb_nodeid); + ms->m_pid = cpu_to_le32(lkb->lkb_ownpid); + ms->m_lkid = cpu_to_le32(lkb->lkb_id); + ms->m_remid = cpu_to_le32(lkb->lkb_remid); + ms->m_exflags = cpu_to_le32(lkb->lkb_exflags); + ms->m_sbflags = cpu_to_le32(dlm_sbflags_val(lkb)); + ms->m_flags = cpu_to_le32(dlm_dflags_val(lkb)); + ms->m_lvbseq = cpu_to_le32(lkb->lkb_lvbseq); + ms->m_status = cpu_to_le32(lkb->lkb_status); + ms->m_grmode = cpu_to_le32(lkb->lkb_grmode); + ms->m_rqmode = cpu_to_le32(lkb->lkb_rqmode); + ms->m_hash = cpu_to_le32(r->res_hash); /* m_result and m_bastmode are set from function args, not from lkb fields */ if (lkb->lkb_bastfn) - ms->m_asts |= DLM_CB_BAST; + ms->m_asts |= cpu_to_le32(DLM_CB_BAST); if (lkb->lkb_astfn) - ms->m_asts |= DLM_CB_CAST; + ms->m_asts |= cpu_to_le32(DLM_CB_CAST); /* compare with switch in create_message; send_remove() doesn't use send_args() */ switch (ms->m_type) { - case DLM_MSG_REQUEST: - case DLM_MSG_LOOKUP: + case cpu_to_le32(DLM_MSG_REQUEST): + case cpu_to_le32(DLM_MSG_LOOKUP): memcpy(ms->m_extra, r->res_name, r->res_length); break; - case DLM_MSG_CONVERT: - case DLM_MSG_UNLOCK: - case DLM_MSG_REQUEST_REPLY: - case DLM_MSG_CONVERT_REPLY: - case DLM_MSG_GRANT: - if (!lkb->lkb_lvbptr) + case cpu_to_le32(DLM_MSG_CONVERT): + case cpu_to_le32(DLM_MSG_UNLOCK): + case cpu_to_le32(DLM_MSG_REQUEST_REPLY): + case cpu_to_le32(DLM_MSG_CONVERT_REPLY): + case cpu_to_le32(DLM_MSG_GRANT): + if (!lkb->lkb_lvbptr || !(lkb->lkb_exflags & DLM_LKF_VALBLK)) break; memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen); break; @@ -3645,17 +3615,14 @@ static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype) to_nodeid = r->res_nodeid; - error = add_to_waiters(lkb, mstype, to_nodeid); - if (error) - return error; - + add_to_waiters(lkb, mstype, to_nodeid); error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh); if (error) goto fail; send_args(r, lkb, ms); - error = send_message(mh, ms); + error = send_message(mh, ms, r->res_name, r->res_length); if (error) goto fail; return 0; @@ -3679,10 +3646,9 @@ static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb) /* down conversions go without a reply from the master */ if (!error && down_conversion(lkb)) { remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY); - r->res_ls->ls_stub_ms.m_flags = DLM_IFL_STUB_MS; - r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY; - r->res_ls->ls_stub_ms.m_result = 0; - __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms); + r->res_ls->ls_local_ms.m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY); + r->res_ls->ls_local_ms.m_result = 0; + __receive_convert_reply(r, lkb, &r->res_ls->ls_local_ms, true); } return error; @@ -3718,7 +3684,7 @@ static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb) ms->m_result = 0; - error = send_message(mh, ms); + error = send_message(mh, ms, r->res_name, r->res_length); out: return error; } @@ -3737,9 +3703,9 @@ static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode) send_args(r, lkb, ms); - ms->m_bastmode = mode; + ms->m_bastmode = cpu_to_le32(mode); - error = send_message(mh, ms); + error = send_message(mh, ms, r->res_name, r->res_length); out: return error; } @@ -3752,17 +3718,14 @@ static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb) to_nodeid = dlm_dir_nodeid(r); - error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid); - if (error) - return error; - + add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid); error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh); if (error) goto fail; send_args(r, lkb, ms); - error = send_message(mh, ms); + error = send_message(mh, ms, r->res_name, r->res_length); if (error) goto fail; return 0; @@ -3785,9 +3748,9 @@ static int send_remove(struct dlm_rsb *r) goto out; memcpy(ms->m_extra, r->res_name, r->res_length); - ms->m_hash = r->res_hash; + ms->m_hash = cpu_to_le32(r->res_hash); - error = send_message(mh, ms); + error = send_message(mh, ms, r->res_name, r->res_length); out: return error; } @@ -3807,9 +3770,9 @@ static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, send_args(r, lkb, ms); - ms->m_result = rv; + ms->m_result = cpu_to_le32(to_dlm_errno(rv)); - error = send_message(mh, ms); + error = send_message(mh, ms, r->res_name, r->res_length); out: return error; } @@ -3834,23 +3797,24 @@ static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv) return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv); } -static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in, - int ret_nodeid, int rv) +static int send_lookup_reply(struct dlm_ls *ls, + const struct dlm_message *ms_in, int ret_nodeid, + int rv) { - struct dlm_rsb *r = &ls->ls_stub_rsb; + struct dlm_rsb *r = &ls->ls_local_rsb; struct dlm_message *ms; struct dlm_mhandle *mh; - int error, nodeid = ms_in->m_header.h_nodeid; + int error, nodeid = le32_to_cpu(ms_in->m_header.h_nodeid); error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh); if (error) goto out; ms->m_lkid = ms_in->m_lkid; - ms->m_result = rv; - ms->m_nodeid = ret_nodeid; + ms->m_result = cpu_to_le32(to_dlm_errno(rv)); + ms->m_nodeid = cpu_to_le32(ret_nodeid); - error = send_message(mh, ms); + error = send_message(mh, ms, ms_in->m_extra, receive_extralen(ms_in)); out: return error; } @@ -3859,31 +3823,32 @@ static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in, of message, unlike the send side where we can safely send everything about the lkb for any type of message */ -static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms) +static void receive_flags(struct dlm_lkb *lkb, const struct dlm_message *ms) { - lkb->lkb_exflags = ms->m_exflags; - lkb->lkb_sbflags = ms->m_sbflags; - lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) | - (ms->m_flags & 0x0000FFFF); + lkb->lkb_exflags = le32_to_cpu(ms->m_exflags); + dlm_set_sbflags_val(lkb, le32_to_cpu(ms->m_sbflags)); + dlm_set_dflags_val(lkb, le32_to_cpu(ms->m_flags)); } -static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms) +static void receive_flags_reply(struct dlm_lkb *lkb, + const struct dlm_message *ms, + bool local) { - if (ms->m_flags == DLM_IFL_STUB_MS) + if (local) return; - lkb->lkb_sbflags = ms->m_sbflags; - lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) | - (ms->m_flags & 0x0000FFFF); + dlm_set_sbflags_val(lkb, le32_to_cpu(ms->m_sbflags)); + dlm_set_dflags_val(lkb, le32_to_cpu(ms->m_flags)); } -static int receive_extralen(struct dlm_message *ms) +static int receive_extralen(const struct dlm_message *ms) { - return (ms->m_header.h_length - sizeof(struct dlm_message)); + return (le16_to_cpu(ms->m_header.h_length) - + sizeof(struct dlm_message)); } static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb, - struct dlm_message *ms) + const struct dlm_message *ms) { int len; @@ -3911,16 +3876,16 @@ static void fake_astfn(void *astparam) } static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb, - struct dlm_message *ms) + const struct dlm_message *ms) { - lkb->lkb_nodeid = ms->m_header.h_nodeid; - lkb->lkb_ownpid = ms->m_pid; - lkb->lkb_remid = ms->m_lkid; + lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid); + lkb->lkb_ownpid = le32_to_cpu(ms->m_pid); + lkb->lkb_remid = le32_to_cpu(ms->m_lkid); lkb->lkb_grmode = DLM_LOCK_IV; - lkb->lkb_rqmode = ms->m_rqmode; + lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode); - lkb->lkb_bastfn = (ms->m_asts & DLM_CB_BAST) ? &fake_bastfn : NULL; - lkb->lkb_astfn = (ms->m_asts & DLM_CB_CAST) ? &fake_astfn : NULL; + lkb->lkb_bastfn = (ms->m_asts & cpu_to_le32(DLM_CB_BAST)) ? &fake_bastfn : NULL; + lkb->lkb_astfn = (ms->m_asts & cpu_to_le32(DLM_CB_CAST)) ? &fake_astfn : NULL; if (lkb->lkb_exflags & DLM_LKF_VALBLK) { /* lkb was just created so there won't be an lvb yet */ @@ -3933,7 +3898,7 @@ static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb, } static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb, - struct dlm_message *ms) + const struct dlm_message *ms) { if (lkb->lkb_status != DLM_LKSTS_GRANTED) return -EBUSY; @@ -3941,56 +3906,65 @@ static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb, if (receive_lvb(ls, lkb, ms)) return -ENOMEM; - lkb->lkb_rqmode = ms->m_rqmode; - lkb->lkb_lvbseq = ms->m_lvbseq; + lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode); + lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq); return 0; } static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, - struct dlm_message *ms) + const struct dlm_message *ms) { if (receive_lvb(ls, lkb, ms)) return -ENOMEM; return 0; } -/* We fill in the stub-lkb fields with the info that send_xxxx_reply() +/* We fill in the local-lkb fields with the info that send_xxxx_reply() uses to send a reply and that the remote end uses to process the reply. */ -static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms) +static void setup_local_lkb(struct dlm_ls *ls, const struct dlm_message *ms) { - struct dlm_lkb *lkb = &ls->ls_stub_lkb; - lkb->lkb_nodeid = ms->m_header.h_nodeid; - lkb->lkb_remid = ms->m_lkid; + struct dlm_lkb *lkb = &ls->ls_local_lkb; + lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid); + lkb->lkb_remid = le32_to_cpu(ms->m_lkid); } /* This is called after the rsb is locked so that we can safely inspect fields in the lkb. */ -static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms) +static int validate_message(struct dlm_lkb *lkb, const struct dlm_message *ms) { - int from = ms->m_header.h_nodeid; + int from = le32_to_cpu(ms->m_header.h_nodeid); int error = 0; + /* currently mixing of user/kernel locks are not supported */ + if (ms->m_flags & cpu_to_le32(BIT(DLM_DFL_USER_BIT)) && + !test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) { + log_error(lkb->lkb_resource->res_ls, + "got user dlm message for a kernel lock"); + error = -EINVAL; + goto out; + } + switch (ms->m_type) { - case DLM_MSG_CONVERT: - case DLM_MSG_UNLOCK: - case DLM_MSG_CANCEL: + case cpu_to_le32(DLM_MSG_CONVERT): + case cpu_to_le32(DLM_MSG_UNLOCK): + case cpu_to_le32(DLM_MSG_CANCEL): if (!is_master_copy(lkb) || lkb->lkb_nodeid != from) error = -EINVAL; break; - case DLM_MSG_CONVERT_REPLY: - case DLM_MSG_UNLOCK_REPLY: - case DLM_MSG_CANCEL_REPLY: - case DLM_MSG_GRANT: - case DLM_MSG_BAST: + case cpu_to_le32(DLM_MSG_CONVERT_REPLY): + case cpu_to_le32(DLM_MSG_UNLOCK_REPLY): + case cpu_to_le32(DLM_MSG_CANCEL_REPLY): + case cpu_to_le32(DLM_MSG_GRANT): + case cpu_to_le32(DLM_MSG_BAST): if (!is_process_copy(lkb) || lkb->lkb_nodeid != from) error = -EINVAL; break; - case DLM_MSG_REQUEST_REPLY: + case cpu_to_le32(DLM_MSG_REQUEST_REPLY): if (!is_process_copy(lkb)) error = -EINVAL; else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from) @@ -4001,87 +3975,31 @@ static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms) error = -EINVAL; } +out: if (error) log_error(lkb->lkb_resource->res_ls, "ignore invalid message %d from %d %x %x %x %d", - ms->m_type, from, lkb->lkb_id, lkb->lkb_remid, - lkb->lkb_flags, lkb->lkb_nodeid); + le32_to_cpu(ms->m_type), from, lkb->lkb_id, + lkb->lkb_remid, dlm_iflags_val(lkb), + lkb->lkb_nodeid); return error; } -static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len) -{ - char name[DLM_RESNAME_MAXLEN + 1]; - struct dlm_message *ms; - struct dlm_mhandle *mh; - struct dlm_rsb *r; - uint32_t hash, b; - int rv, dir_nodeid; - - memset(name, 0, sizeof(name)); - memcpy(name, ms_name, len); - - hash = jhash(name, len, 0); - b = hash & (ls->ls_rsbtbl_size - 1); - - dir_nodeid = dlm_hash2nodeid(ls, hash); - - log_error(ls, "send_repeat_remove dir %d %s", dir_nodeid, name); - - spin_lock(&ls->ls_rsbtbl[b].lock); - rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); - if (!rv) { - spin_unlock(&ls->ls_rsbtbl[b].lock); - log_error(ls, "repeat_remove on keep %s", name); - return; - } - - rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); - if (!rv) { - spin_unlock(&ls->ls_rsbtbl[b].lock); - log_error(ls, "repeat_remove on toss %s", name); - return; - } - - /* use ls->remove_name2 to avoid conflict with shrink? */ - - spin_lock(&ls->ls_remove_spin); - ls->ls_remove_len = len; - memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN); - spin_unlock(&ls->ls_remove_spin); - spin_unlock(&ls->ls_rsbtbl[b].lock); - - rv = _create_message(ls, sizeof(struct dlm_message) + len, - dir_nodeid, DLM_MSG_REMOVE, &ms, &mh); - if (rv) - return; - - memcpy(ms->m_extra, name, len); - ms->m_hash = hash; - - send_message(mh, ms); - - spin_lock(&ls->ls_remove_spin); - ls->ls_remove_len = 0; - memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN); - spin_unlock(&ls->ls_remove_spin); -} - -static int receive_request(struct dlm_ls *ls, struct dlm_message *ms) +static int receive_request(struct dlm_ls *ls, const struct dlm_message *ms) { struct dlm_lkb *lkb; struct dlm_rsb *r; int from_nodeid; int error, namelen = 0; - from_nodeid = ms->m_header.h_nodeid; + from_nodeid = le32_to_cpu(ms->m_header.h_nodeid); error = create_lkb(ls, &lkb); if (error) goto fail; receive_flags(lkb, ms); - lkb->lkb_flags |= DLM_IFL_MSTCPY; + set_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags); error = receive_request_args(ls, lkb, ms); if (error) { __put_lkb(ls, lkb); @@ -4136,46 +4054,34 @@ static int receive_request(struct dlm_ls *ls, struct dlm_message *ms) ENOTBLK request failures when the lookup reply designating us as master is delayed. */ - /* We could repeatedly return -EBADR here if our send_remove() is - delayed in being sent/arriving/being processed on the dir node. - Another node would repeatedly lookup up the master, and the dir - node would continue returning our nodeid until our send_remove - took effect. - - We send another remove message in case our previous send_remove - was lost/ignored/missed somehow. */ - if (error != -ENOTBLK) { log_limit(ls, "receive_request %x from %d %d", - ms->m_lkid, from_nodeid, error); - } - - if (namelen && error == -EBADR) { - send_repeat_remove(ls, ms->m_extra, namelen); - msleep(1000); + le32_to_cpu(ms->m_lkid), from_nodeid, error); } - setup_stub_lkb(ls, ms); - send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error); + setup_local_lkb(ls, ms); + send_request_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error); return error; } -static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms) +static int receive_convert(struct dlm_ls *ls, const struct dlm_message *ms) { struct dlm_lkb *lkb; struct dlm_rsb *r; int error, reply = 1; - error = find_lkb(ls, ms->m_remid, &lkb); + error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); if (error) goto fail; - if (lkb->lkb_remid != ms->m_lkid) { + if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) { log_error(ls, "receive_convert %x remid %x recover_seq %llu " "remote %d %x", lkb->lkb_id, lkb->lkb_remid, (unsigned long long)lkb->lkb_recover_seq, - ms->m_header.h_nodeid, ms->m_lkid); + le32_to_cpu(ms->m_header.h_nodeid), + le32_to_cpu(ms->m_lkid)); error = -ENOENT; + dlm_put_lkb(lkb); goto fail; } @@ -4209,26 +4115,28 @@ static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms) return 0; fail: - setup_stub_lkb(ls, ms); - send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error); + setup_local_lkb(ls, ms); + send_convert_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error); return error; } -static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms) +static int receive_unlock(struct dlm_ls *ls, const struct dlm_message *ms) { struct dlm_lkb *lkb; struct dlm_rsb *r; int error; - error = find_lkb(ls, ms->m_remid, &lkb); + error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); if (error) goto fail; - if (lkb->lkb_remid != ms->m_lkid) { + if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) { log_error(ls, "receive_unlock %x remid %x remote %d %x", lkb->lkb_id, lkb->lkb_remid, - ms->m_header.h_nodeid, ms->m_lkid); + le32_to_cpu(ms->m_header.h_nodeid), + le32_to_cpu(ms->m_lkid)); error = -ENOENT; + dlm_put_lkb(lkb); goto fail; } @@ -4259,18 +4167,18 @@ static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms) return 0; fail: - setup_stub_lkb(ls, ms); - send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error); + setup_local_lkb(ls, ms); + send_unlock_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error); return error; } -static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms) +static int receive_cancel(struct dlm_ls *ls, const struct dlm_message *ms) { struct dlm_lkb *lkb; struct dlm_rsb *r; int error; - error = find_lkb(ls, ms->m_remid, &lkb); + error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); if (error) goto fail; @@ -4295,18 +4203,18 @@ static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms) return 0; fail: - setup_stub_lkb(ls, ms); - send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error); + setup_local_lkb(ls, ms); + send_cancel_reply(&ls->ls_local_rsb, &ls->ls_local_lkb, error); return error; } -static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms) +static int receive_grant(struct dlm_ls *ls, const struct dlm_message *ms) { struct dlm_lkb *lkb; struct dlm_rsb *r; int error; - error = find_lkb(ls, ms->m_remid, &lkb); + error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); if (error) return error; @@ -4319,7 +4227,7 @@ static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms) if (error) goto out; - receive_flags_reply(lkb, ms); + receive_flags_reply(lkb, ms, false); if (is_altmode(lkb)) munge_altmode(lkb, ms); grant_lock_pc(r, lkb, ms); @@ -4331,13 +4239,13 @@ static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms) return 0; } -static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms) +static int receive_bast(struct dlm_ls *ls, const struct dlm_message *ms) { struct dlm_lkb *lkb; struct dlm_rsb *r; int error; - error = find_lkb(ls, ms->m_remid, &lkb); + error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); if (error) return error; @@ -4350,8 +4258,8 @@ static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms) if (error) goto out; - queue_bast(r, lkb, ms->m_bastmode); - lkb->lkb_highbast = ms->m_bastmode; + queue_bast(r, lkb, le32_to_cpu(ms->m_bastmode)); + lkb->lkb_highbast = le32_to_cpu(ms->m_bastmode); out: unlock_rsb(r); put_rsb(r); @@ -4359,11 +4267,11 @@ static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms) return 0; } -static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms) +static void receive_lookup(struct dlm_ls *ls, const struct dlm_message *ms) { int len, error, ret_nodeid, from_nodeid, our_nodeid; - from_nodeid = ms->m_header.h_nodeid; + from_nodeid = le32_to_cpu(ms->m_header.h_nodeid); our_nodeid = dlm_our_nodeid(); len = receive_extralen(ms); @@ -4379,14 +4287,13 @@ static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms) send_lookup_reply(ls, ms, ret_nodeid, error); } -static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms) +static void receive_remove(struct dlm_ls *ls, const struct dlm_message *ms) { char name[DLM_RESNAME_MAXLEN+1]; struct dlm_rsb *r; - uint32_t hash, b; int rv, len, dir_nodeid, from_nodeid; - from_nodeid = ms->m_header.h_nodeid; + from_nodeid = le32_to_cpu(ms->m_header.h_nodeid); len = receive_extralen(ms); @@ -4396,90 +4303,99 @@ static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms) return; } - dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash); + dir_nodeid = dlm_hash2nodeid(ls, le32_to_cpu(ms->m_hash)); if (dir_nodeid != dlm_our_nodeid()) { log_error(ls, "receive_remove from %d bad nodeid %d", from_nodeid, dir_nodeid); return; } - /* Look for name on rsbtbl.toss, if it's there, kill it. - If it's on rsbtbl.keep, it's being used, and we should ignore this - message. This is an expected race between the dir node sending a - request to the master node at the same time as the master node sends - a remove to the dir node. The resolution to that race is for the - dir node to ignore the remove message, and the master node to - recreate the master rsb when it gets a request from the dir node for - an rsb it doesn't have. */ + /* + * Look for inactive rsb, if it's there, free it. + * If the rsb is active, it's being used, and we should ignore this + * message. This is an expected race between the dir node sending a + * request to the master node at the same time as the master node sends + * a remove to the dir node. The resolution to that race is for the + * dir node to ignore the remove message, and the master node to + * recreate the master rsb when it gets a request from the dir node for + * an rsb it doesn't have. + */ memset(name, 0, sizeof(name)); memcpy(name, ms->m_extra, len); - hash = jhash(name, len, 0); - b = hash & (ls->ls_rsbtbl_size - 1); + rcu_read_lock(); + rv = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r); + if (rv) { + rcu_read_unlock(); + /* should not happen */ + log_error(ls, "%s from %d not found %s", __func__, + from_nodeid, name); + return; + } - spin_lock(&ls->ls_rsbtbl[b].lock); + write_lock_bh(&ls->ls_rsbtbl_lock); + if (!rsb_flag(r, RSB_HASHED)) { + rcu_read_unlock(); + write_unlock_bh(&ls->ls_rsbtbl_lock); + /* should not happen */ + log_error(ls, "%s from %d got removed during removal %s", + __func__, from_nodeid, name); + return; + } + /* at this stage the rsb can only being freed here */ + rcu_read_unlock(); - rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r); - if (rv) { - /* verify the rsb is on keep list per comment above */ - rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r); - if (rv) { - /* should not happen */ - log_error(ls, "receive_remove from %d not found %s", - from_nodeid, name); - spin_unlock(&ls->ls_rsbtbl[b].lock); - return; - } + if (!rsb_flag(r, RSB_INACTIVE)) { if (r->res_master_nodeid != from_nodeid) { /* should not happen */ - log_error(ls, "receive_remove keep from %d master %d", + log_error(ls, "receive_remove on active rsb from %d master %d", from_nodeid, r->res_master_nodeid); dlm_print_rsb(r); - spin_unlock(&ls->ls_rsbtbl[b].lock); + write_unlock_bh(&ls->ls_rsbtbl_lock); return; } + /* Ignore the remove message, see race comment above. */ + log_debug(ls, "receive_remove from %d master %d first %x %s", from_nodeid, r->res_master_nodeid, r->res_first_lkid, name); - spin_unlock(&ls->ls_rsbtbl[b].lock); + write_unlock_bh(&ls->ls_rsbtbl_lock); return; } if (r->res_master_nodeid != from_nodeid) { - log_error(ls, "receive_remove toss from %d master %d", + log_error(ls, "receive_remove inactive from %d master %d", from_nodeid, r->res_master_nodeid); dlm_print_rsb(r); - spin_unlock(&ls->ls_rsbtbl[b].lock); + write_unlock_bh(&ls->ls_rsbtbl_lock); return; } - if (kref_put(&r->res_ref, kill_rsb)) { - rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss); - spin_unlock(&ls->ls_rsbtbl[b].lock); - dlm_free_rsb(r); - } else { - log_error(ls, "receive_remove from %d rsb ref error", - from_nodeid); - dlm_print_rsb(r); - spin_unlock(&ls->ls_rsbtbl[b].lock); - } + list_del(&r->res_slow_list); + rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, + dlm_rhash_rsb_params); + rsb_clear_flag(r, RSB_HASHED); + write_unlock_bh(&ls->ls_rsbtbl_lock); + + free_inactive_rsb(r); } -static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms) +static void receive_purge(struct dlm_ls *ls, const struct dlm_message *ms) { - do_purge(ls, ms->m_nodeid, ms->m_pid); + do_purge(ls, le32_to_cpu(ms->m_nodeid), le32_to_cpu(ms->m_pid)); } -static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms) +static int receive_request_reply(struct dlm_ls *ls, + const struct dlm_message *ms) { struct dlm_lkb *lkb; struct dlm_rsb *r; int error, mstype, result; - int from_nodeid = ms->m_header.h_nodeid; + int from_nodeid = le32_to_cpu(ms->m_header.h_nodeid); - error = find_lkb(ls, ms->m_remid, &lkb); + error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); if (error) return error; @@ -4495,7 +4411,8 @@ static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms) error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY); if (error) { log_error(ls, "receive_request_reply %x remote %d %x result %d", - lkb->lkb_id, from_nodeid, ms->m_lkid, ms->m_result); + lkb->lkb_id, from_nodeid, le32_to_cpu(ms->m_lkid), + from_dlm_errno(le32_to_cpu(ms->m_result))); dlm_dump_rsb(r); goto out; } @@ -4509,7 +4426,7 @@ static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms) } /* this is the value returned from do_request() on the master */ - result = ms->m_result; + result = from_dlm_errno(le32_to_cpu(ms->m_result)); switch (result) { case -EAGAIN: @@ -4522,13 +4439,12 @@ static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms) case -EINPROGRESS: case 0: /* request was queued or granted on remote master */ - receive_flags_reply(lkb, ms); - lkb->lkb_remid = ms->m_lkid; + receive_flags_reply(lkb, ms, false); + lkb->lkb_remid = le32_to_cpu(ms->m_lkid); if (is_altmode(lkb)) munge_altmode(lkb, ms); if (result) { add_lkb(r, lkb, DLM_LKSTS_WAITING); - add_timeout(lkb); } else { grant_lock_pc(r, lkb, ms); queue_cast(r, lkb, 0); @@ -4570,20 +4486,21 @@ static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms) lkb->lkb_id, result); } - if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) { + if ((result == 0 || result == -EINPROGRESS) && + test_and_clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags)) { log_debug(ls, "receive_request_reply %x result %d unlock", lkb->lkb_id, result); - lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK; - lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL; + clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); send_unlock(r, lkb); - } else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) { + } else if ((result == -EINPROGRESS) && + test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, + &lkb->lkb_iflags)) { log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id); - lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK; - lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL; + clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); send_cancel(r, lkb); } else { - lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL; - lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK; + clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, &lkb->lkb_iflags); + clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, &lkb->lkb_iflags); } out: unlock_rsb(r); @@ -4593,34 +4510,33 @@ static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms) } static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, - struct dlm_message *ms) + const struct dlm_message *ms, bool local) { /* this is the value returned from do_convert() on the master */ - switch (ms->m_result) { + switch (from_dlm_errno(le32_to_cpu(ms->m_result))) { case -EAGAIN: /* convert would block (be queued) on remote master */ queue_cast(r, lkb, -EAGAIN); break; case -EDEADLK: - receive_flags_reply(lkb, ms); + receive_flags_reply(lkb, ms, local); revert_lock_pc(r, lkb); queue_cast(r, lkb, -EDEADLK); break; case -EINPROGRESS: /* convert was queued on remote master */ - receive_flags_reply(lkb, ms); + receive_flags_reply(lkb, ms, local); if (is_demoted(lkb)) munge_demoted(lkb); del_lkb(r, lkb); add_lkb(r, lkb, DLM_LKSTS_CONVERT); - add_timeout(lkb); break; case 0: /* convert was granted on remote master */ - receive_flags_reply(lkb, ms); + receive_flags_reply(lkb, ms, local); if (is_demoted(lkb)) munge_demoted(lkb); grant_lock_pc(r, lkb, ms); @@ -4629,14 +4545,16 @@ static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, default: log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d", - lkb->lkb_id, ms->m_header.h_nodeid, ms->m_lkid, - ms->m_result); + lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid), + le32_to_cpu(ms->m_lkid), + from_dlm_errno(le32_to_cpu(ms->m_result))); dlm_print_rsb(r); dlm_print_lkb(lkb); } } -static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms) +static void _receive_convert_reply(struct dlm_lkb *lkb, + const struct dlm_message *ms, bool local) { struct dlm_rsb *r = lkb->lkb_resource; int error; @@ -4648,32 +4566,33 @@ static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms) if (error) goto out; - /* stub reply can happen with waiters_mutex held */ - error = remove_from_waiters_ms(lkb, ms); + error = remove_from_waiters_ms(lkb, ms, local); if (error) goto out; - __receive_convert_reply(r, lkb, ms); + __receive_convert_reply(r, lkb, ms, local); out: unlock_rsb(r); put_rsb(r); } -static int receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms) +static int receive_convert_reply(struct dlm_ls *ls, + const struct dlm_message *ms) { struct dlm_lkb *lkb; int error; - error = find_lkb(ls, ms->m_remid, &lkb); + error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); if (error) return error; - _receive_convert_reply(lkb, ms); + _receive_convert_reply(lkb, ms, false); dlm_put_lkb(lkb); return 0; } -static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms) +static void _receive_unlock_reply(struct dlm_lkb *lkb, + const struct dlm_message *ms, bool local) { struct dlm_rsb *r = lkb->lkb_resource; int error; @@ -4685,16 +4604,15 @@ static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms) if (error) goto out; - /* stub reply can happen with waiters_mutex held */ - error = remove_from_waiters_ms(lkb, ms); + error = remove_from_waiters_ms(lkb, ms, local); if (error) goto out; /* this is the value returned from do_unlock() on the master */ - switch (ms->m_result) { + switch (from_dlm_errno(le32_to_cpu(ms->m_result))) { case -DLM_EUNLOCK: - receive_flags_reply(lkb, ms); + receive_flags_reply(lkb, ms, local); remove_lock_pc(r, lkb); queue_cast(r, lkb, -DLM_EUNLOCK); break; @@ -4702,28 +4620,30 @@ static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms) break; default: log_error(r->res_ls, "receive_unlock_reply %x error %d", - lkb->lkb_id, ms->m_result); + lkb->lkb_id, from_dlm_errno(le32_to_cpu(ms->m_result))); } out: unlock_rsb(r); put_rsb(r); } -static int receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms) +static int receive_unlock_reply(struct dlm_ls *ls, + const struct dlm_message *ms) { struct dlm_lkb *lkb; int error; - error = find_lkb(ls, ms->m_remid, &lkb); + error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); if (error) return error; - _receive_unlock_reply(lkb, ms); + _receive_unlock_reply(lkb, ms, false); dlm_put_lkb(lkb); return 0; } -static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms) +static void _receive_cancel_reply(struct dlm_lkb *lkb, + const struct dlm_message *ms, bool local) { struct dlm_rsb *r = lkb->lkb_resource; int error; @@ -4735,16 +4655,15 @@ static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms) if (error) goto out; - /* stub reply can happen with waiters_mutex held */ - error = remove_from_waiters_ms(lkb, ms); + error = remove_from_waiters_ms(lkb, ms, local); if (error) goto out; /* this is the value returned from do_cancel() on the master */ - switch (ms->m_result) { + switch (from_dlm_errno(le32_to_cpu(ms->m_result))) { case -DLM_ECANCEL: - receive_flags_reply(lkb, ms); + receive_flags_reply(lkb, ms, local); revert_lock_pc(r, lkb); queue_cast(r, lkb, -DLM_ECANCEL); break; @@ -4752,37 +4671,41 @@ static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms) break; default: log_error(r->res_ls, "receive_cancel_reply %x error %d", - lkb->lkb_id, ms->m_result); + lkb->lkb_id, + from_dlm_errno(le32_to_cpu(ms->m_result))); } out: unlock_rsb(r); put_rsb(r); } -static int receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms) +static int receive_cancel_reply(struct dlm_ls *ls, + const struct dlm_message *ms) { struct dlm_lkb *lkb; int error; - error = find_lkb(ls, ms->m_remid, &lkb); + error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb); if (error) return error; - _receive_cancel_reply(lkb, ms); + _receive_cancel_reply(lkb, ms, false); dlm_put_lkb(lkb); return 0; } -static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms) +static void receive_lookup_reply(struct dlm_ls *ls, + const struct dlm_message *ms) { struct dlm_lkb *lkb; struct dlm_rsb *r; int error, ret_nodeid; int do_lookup_list = 0; - error = find_lkb(ls, ms->m_lkid, &lkb); + error = find_lkb(ls, le32_to_cpu(ms->m_lkid), &lkb); if (error) { - log_error(ls, "receive_lookup_reply no lkid %x", ms->m_lkid); + log_error(ls, "%s no lkid %x", __func__, + le32_to_cpu(ms->m_lkid)); return; } @@ -4797,7 +4720,7 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms) if (error) goto out; - ret_nodeid = ms->m_nodeid; + ret_nodeid = le32_to_cpu(ms->m_nodeid); /* We sometimes receive a request from the dir node for this rsb before we've received the dir node's loookup_reply for it. @@ -4809,8 +4732,8 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms) /* This should never happen */ log_error(ls, "receive_lookup_reply %x from %d ret %d " "master %d dir %d our %d first %x %s", - lkb->lkb_id, ms->m_header.h_nodeid, ret_nodeid, - r->res_master_nodeid, r->res_dir_nodeid, + lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid), + ret_nodeid, r->res_master_nodeid, r->res_dir_nodeid, dlm_our_nodeid(), r->res_first_lkid, r->res_name); } @@ -4822,7 +4745,7 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms) } else if (ret_nodeid == -1) { /* the remote node doesn't believe it's the dir node */ log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid", - lkb->lkb_id, ms->m_header.h_nodeid); + lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid)); r->res_master_nodeid = 0; r->res_nodeid = -1; lkb->lkb_nodeid = -1; @@ -4834,7 +4757,7 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms) if (is_overlap(lkb)) { log_debug(ls, "receive_lookup_reply %x unlock %x", - lkb->lkb_id, lkb->lkb_flags); + lkb->lkb_id, dlm_iflags_val(lkb)); queue_cast_overlap(r, lkb); unhold_lkb(lkb); /* undoes create_lkb() */ goto out_list; @@ -4851,15 +4774,17 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms) dlm_put_lkb(lkb); } -static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms, +static void _receive_message(struct dlm_ls *ls, const struct dlm_message *ms, uint32_t saved_seq) { int error = 0, noent = 0; - if (!dlm_is_member(ls, ms->m_header.h_nodeid)) { + if (WARN_ON_ONCE(!dlm_is_member(ls, le32_to_cpu(ms->m_header.h_nodeid)))) { log_limit(ls, "receive %d from non-member %d %x %x %d", - ms->m_type, ms->m_header.h_nodeid, ms->m_lkid, - ms->m_remid, ms->m_result); + le32_to_cpu(ms->m_type), + le32_to_cpu(ms->m_header.h_nodeid), + le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid), + from_dlm_errno(le32_to_cpu(ms->m_result))); return; } @@ -4867,77 +4792,78 @@ static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms, /* messages sent to a master node */ - case DLM_MSG_REQUEST: + case cpu_to_le32(DLM_MSG_REQUEST): error = receive_request(ls, ms); break; - case DLM_MSG_CONVERT: + case cpu_to_le32(DLM_MSG_CONVERT): error = receive_convert(ls, ms); break; - case DLM_MSG_UNLOCK: + case cpu_to_le32(DLM_MSG_UNLOCK): error = receive_unlock(ls, ms); break; - case DLM_MSG_CANCEL: + case cpu_to_le32(DLM_MSG_CANCEL): noent = 1; error = receive_cancel(ls, ms); break; /* messages sent from a master node (replies to above) */ - case DLM_MSG_REQUEST_REPLY: + case cpu_to_le32(DLM_MSG_REQUEST_REPLY): error = receive_request_reply(ls, ms); break; - case DLM_MSG_CONVERT_REPLY: + case cpu_to_le32(DLM_MSG_CONVERT_REPLY): error = receive_convert_reply(ls, ms); break; - case DLM_MSG_UNLOCK_REPLY: + case cpu_to_le32(DLM_MSG_UNLOCK_REPLY): error = receive_unlock_reply(ls, ms); break; - case DLM_MSG_CANCEL_REPLY: + case cpu_to_le32(DLM_MSG_CANCEL_REPLY): error = receive_cancel_reply(ls, ms); break; /* messages sent from a master node (only two types of async msg) */ - case DLM_MSG_GRANT: + case cpu_to_le32(DLM_MSG_GRANT): noent = 1; error = receive_grant(ls, ms); break; - case DLM_MSG_BAST: + case cpu_to_le32(DLM_MSG_BAST): noent = 1; error = receive_bast(ls, ms); break; /* messages sent to a dir node */ - case DLM_MSG_LOOKUP: + case cpu_to_le32(DLM_MSG_LOOKUP): receive_lookup(ls, ms); break; - case DLM_MSG_REMOVE: + case cpu_to_le32(DLM_MSG_REMOVE): receive_remove(ls, ms); break; /* messages sent from a dir node (remove has no reply) */ - case DLM_MSG_LOOKUP_REPLY: + case cpu_to_le32(DLM_MSG_LOOKUP_REPLY): receive_lookup_reply(ls, ms); break; /* other messages */ - case DLM_MSG_PURGE: + case cpu_to_le32(DLM_MSG_PURGE): receive_purge(ls, ms); break; default: - log_error(ls, "unknown message type %d", ms->m_type); + log_error(ls, "unknown message type %d", + le32_to_cpu(ms->m_type)); } /* @@ -4953,22 +4879,26 @@ static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms, if (error == -ENOENT && noent) { log_debug(ls, "receive %d no %x remote %d %x saved_seq %u", - ms->m_type, ms->m_remid, ms->m_header.h_nodeid, - ms->m_lkid, saved_seq); + le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid), + le32_to_cpu(ms->m_header.h_nodeid), + le32_to_cpu(ms->m_lkid), saved_seq); } else if (error == -ENOENT) { log_error(ls, "receive %d no %x remote %d %x saved_seq %u", - ms->m_type, ms->m_remid, ms->m_header.h_nodeid, - ms->m_lkid, saved_seq); + le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid), + le32_to_cpu(ms->m_header.h_nodeid), + le32_to_cpu(ms->m_lkid), saved_seq); - if (ms->m_type == DLM_MSG_CONVERT) - dlm_dump_rsb_hash(ls, ms->m_hash); + if (ms->m_type == cpu_to_le32(DLM_MSG_CONVERT)) + dlm_dump_rsb_hash(ls, le32_to_cpu(ms->m_hash)); } if (error == -EINVAL) { log_error(ls, "receive %d inval from %d lkid %x remid %x " "saved_seq %u", - ms->m_type, ms->m_header.h_nodeid, - ms->m_lkid, ms->m_remid, saved_seq); + le32_to_cpu(ms->m_type), + le32_to_cpu(ms->m_header.h_nodeid), + le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid), + saved_seq); } } @@ -4980,30 +4910,42 @@ static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms, requestqueue, to processing all the saved messages, to processing new messages as they arrive. */ -static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms, +static void dlm_receive_message(struct dlm_ls *ls, const struct dlm_message *ms, int nodeid) { - if (dlm_locking_stopped(ls)) { +try_again: + read_lock_bh(&ls->ls_requestqueue_lock); + if (test_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags)) { /* If we were a member of this lockspace, left, and rejoined, other nodes may still be sending us messages from the lockspace generation before we left. */ - if (!ls->ls_generation) { + if (WARN_ON_ONCE(!ls->ls_generation)) { + read_unlock_bh(&ls->ls_requestqueue_lock); log_limit(ls, "receive %d from %d ignore old gen", - ms->m_type, nodeid); + le32_to_cpu(ms->m_type), nodeid); return; } + read_unlock_bh(&ls->ls_requestqueue_lock); + write_lock_bh(&ls->ls_requestqueue_lock); + /* recheck because we hold writelock now */ + if (!test_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags)) { + write_unlock_bh(&ls->ls_requestqueue_lock); + goto try_again; + } + dlm_add_requestqueue(ls, nodeid, ms); + write_unlock_bh(&ls->ls_requestqueue_lock); } else { - dlm_wait_requestqueue(ls); _receive_message(ls, ms, 0); + read_unlock_bh(&ls->ls_requestqueue_lock); } } /* This is called by dlm_recoverd to process messages that were saved on the requestqueue. */ -void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms, +void dlm_receive_message_saved(struct dlm_ls *ls, const struct dlm_message *ms, uint32_t saved_seq) { _receive_message(ls, ms, saved_seq); @@ -5014,38 +4956,38 @@ void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms, standard locking activity) or an RCOM (recovery message sent as part of lockspace recovery). */ -void dlm_receive_buffer(union dlm_packet *p, int nodeid) +void dlm_receive_buffer(const union dlm_packet *p, int nodeid) { - struct dlm_header *hd = &p->header; + const struct dlm_header *hd = &p->header; struct dlm_ls *ls; int type = 0; switch (hd->h_cmd) { case DLM_MSG: - dlm_message_in(&p->message); - type = p->message.m_type; + type = le32_to_cpu(p->message.m_type); break; case DLM_RCOM: - dlm_rcom_in(&p->rcom); - type = p->rcom.rc_type; + type = le32_to_cpu(p->rcom.rc_type); break; default: log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid); return; } - if (hd->h_nodeid != nodeid) { + if (le32_to_cpu(hd->h_nodeid) != nodeid) { log_print("invalid h_nodeid %d from %d lockspace %x", - hd->h_nodeid, nodeid, hd->h_lockspace); + le32_to_cpu(hd->h_nodeid), nodeid, + le32_to_cpu(hd->u.h_lockspace)); return; } - ls = dlm_find_lockspace_global(hd->h_lockspace); + ls = dlm_find_lockspace_global(le32_to_cpu(hd->u.h_lockspace)); if (!ls) { if (dlm_config.ci_log_debug) { printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace " "%u from %d cmd %d type %d\n", - hd->h_lockspace, nodeid, hd->h_cmd, type); + le32_to_cpu(hd->u.h_lockspace), nodeid, + hd->h_cmd, type); } if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS) @@ -5056,35 +4998,40 @@ void dlm_receive_buffer(union dlm_packet *p, int nodeid) /* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to be inactive (in this ls) before transitioning to recovery mode */ - down_read(&ls->ls_recv_active); + read_lock_bh(&ls->ls_recv_active); if (hd->h_cmd == DLM_MSG) dlm_receive_message(ls, &p->message, nodeid); - else + else if (hd->h_cmd == DLM_RCOM) dlm_receive_rcom(ls, &p->rcom, nodeid); - up_read(&ls->ls_recv_active); + else + log_error(ls, "invalid h_cmd %d from %d lockspace %x", + hd->h_cmd, nodeid, le32_to_cpu(hd->u.h_lockspace)); + read_unlock_bh(&ls->ls_recv_active); dlm_put_lockspace(ls); } static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb, - struct dlm_message *ms_stub) + struct dlm_message *ms_local) { if (middle_conversion(lkb)) { + log_rinfo(ls, "%s %x middle convert in progress", __func__, + lkb->lkb_id); + + /* We sent this lock to the new master. The new master will + * tell us when it's granted. We no longer need a reply, so + * use a fake reply to put the lkb into the right state. + */ hold_lkb(lkb); - memset(ms_stub, 0, sizeof(struct dlm_message)); - ms_stub->m_flags = DLM_IFL_STUB_MS; - ms_stub->m_type = DLM_MSG_CONVERT_REPLY; - ms_stub->m_result = -EINPROGRESS; - ms_stub->m_header.h_nodeid = lkb->lkb_nodeid; - _receive_convert_reply(lkb, ms_stub); - - /* Same special case as in receive_rcom_lock_args() */ - lkb->lkb_grmode = DLM_LOCK_IV; - rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT); + memset(ms_local, 0, sizeof(struct dlm_message)); + ms_local->m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY); + ms_local->m_result = cpu_to_le32(to_dlm_errno(-EINPROGRESS)); + ms_local->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid); + _receive_convert_reply(lkb, ms_local, true); unhold_lkb(lkb); } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) { - lkb->lkb_flags |= DLM_IFL_RESEND; + set_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags); } /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down @@ -5115,17 +5062,13 @@ static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb, void dlm_recover_waiters_pre(struct dlm_ls *ls) { struct dlm_lkb *lkb, *safe; - struct dlm_message *ms_stub; - int wait_type, stub_unlock_result, stub_cancel_result; + struct dlm_message *ms_local; + int wait_type, local_unlock_result, local_cancel_result; int dir_nodeid; - ms_stub = kmalloc(sizeof(struct dlm_message), GFP_KERNEL); - if (!ms_stub) { - log_error(ls, "dlm_recover_waiters_pre no mem"); + ms_local = kmalloc(sizeof(*ms_local), GFP_KERNEL); + if (!ms_local) return; - } - - mutex_lock(&ls->ls_waiters_mutex); list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) { @@ -5150,7 +5093,7 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls) resent after recovery is done */ if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) { - lkb->lkb_flags |= DLM_IFL_RESEND; + set_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags); continue; } @@ -5158,8 +5101,8 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls) continue; wait_type = lkb->lkb_wait_type; - stub_unlock_result = -DLM_EUNLOCK; - stub_cancel_result = -DLM_ECANCEL; + local_unlock_result = -DLM_EUNLOCK; + local_cancel_result = -DLM_ECANCEL; /* Main reply may have been received leaving a zero wait_type, but a reply for the overlapping op may not have been @@ -5170,48 +5113,46 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls) if (is_overlap_cancel(lkb)) { wait_type = DLM_MSG_CANCEL; if (lkb->lkb_grmode == DLM_LOCK_IV) - stub_cancel_result = 0; + local_cancel_result = 0; } if (is_overlap_unlock(lkb)) { wait_type = DLM_MSG_UNLOCK; if (lkb->lkb_grmode == DLM_LOCK_IV) - stub_unlock_result = -ENOENT; + local_unlock_result = -ENOENT; } log_debug(ls, "rwpre overlap %x %x %d %d %d", - lkb->lkb_id, lkb->lkb_flags, wait_type, - stub_cancel_result, stub_unlock_result); + lkb->lkb_id, dlm_iflags_val(lkb), wait_type, + local_cancel_result, local_unlock_result); } switch (wait_type) { case DLM_MSG_REQUEST: - lkb->lkb_flags |= DLM_IFL_RESEND; + set_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags); break; case DLM_MSG_CONVERT: - recover_convert_waiter(ls, lkb, ms_stub); + recover_convert_waiter(ls, lkb, ms_local); break; case DLM_MSG_UNLOCK: hold_lkb(lkb); - memset(ms_stub, 0, sizeof(struct dlm_message)); - ms_stub->m_flags = DLM_IFL_STUB_MS; - ms_stub->m_type = DLM_MSG_UNLOCK_REPLY; - ms_stub->m_result = stub_unlock_result; - ms_stub->m_header.h_nodeid = lkb->lkb_nodeid; - _receive_unlock_reply(lkb, ms_stub); + memset(ms_local, 0, sizeof(struct dlm_message)); + ms_local->m_type = cpu_to_le32(DLM_MSG_UNLOCK_REPLY); + ms_local->m_result = cpu_to_le32(to_dlm_errno(local_unlock_result)); + ms_local->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid); + _receive_unlock_reply(lkb, ms_local, true); dlm_put_lkb(lkb); break; case DLM_MSG_CANCEL: hold_lkb(lkb); - memset(ms_stub, 0, sizeof(struct dlm_message)); - ms_stub->m_flags = DLM_IFL_STUB_MS; - ms_stub->m_type = DLM_MSG_CANCEL_REPLY; - ms_stub->m_result = stub_cancel_result; - ms_stub->m_header.h_nodeid = lkb->lkb_nodeid; - _receive_cancel_reply(lkb, ms_stub); + memset(ms_local, 0, sizeof(struct dlm_message)); + ms_local->m_type = cpu_to_le32(DLM_MSG_CANCEL_REPLY); + ms_local->m_result = cpu_to_le32(to_dlm_errno(local_cancel_result)); + ms_local->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid); + _receive_cancel_reply(lkb, ms_local, true); dlm_put_lkb(lkb); break; @@ -5221,45 +5162,52 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls) } schedule(); } - mutex_unlock(&ls->ls_waiters_mutex); - kfree(ms_stub); + kfree(ms_local); } static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls) { - struct dlm_lkb *lkb; - int found = 0; + struct dlm_lkb *lkb = NULL, *iter; - mutex_lock(&ls->ls_waiters_mutex); - list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) { - if (lkb->lkb_flags & DLM_IFL_RESEND) { - hold_lkb(lkb); - found = 1; + spin_lock_bh(&ls->ls_waiters_lock); + list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) { + if (test_bit(DLM_IFL_RESEND_BIT, &iter->lkb_iflags)) { + hold_lkb(iter); + lkb = iter; break; } } - mutex_unlock(&ls->ls_waiters_mutex); + spin_unlock_bh(&ls->ls_waiters_lock); - if (!found) - lkb = NULL; return lkb; } -/* Deal with lookups and lkb's marked RESEND from _pre. We may now be the - master or dir-node for r. Processing the lkb may result in it being placed - back on waiters. */ - -/* We do this after normal locking has been enabled and any saved messages - (in requestqueue) have been processed. We should be confident that at - this point we won't get or process a reply to any of these waiting - operations. But, new ops may be coming in on the rsbs/locks here from - userspace or remotely. */ - -/* there may have been an overlap unlock/cancel prior to recovery or after - recovery. if before, the lkb may still have a pos wait_count; if after, the - overlap flag would just have been set and nothing new sent. we can be - confident here than any replies to either the initial op or overlap ops - prior to recovery have been received. */ +/* + * Forced state reset for locks that were in the middle of remote operations + * when recovery happened (i.e. lkbs that were on the waiters list, waiting + * for a reply from a remote operation.) The lkbs remaining on the waiters + * list need to be reevaluated; some may need resending to a different node + * than previously, and some may now need local handling rather than remote. + * + * First, the lkb state for the voided remote operation is forcibly reset, + * equivalent to what remove_from_waiters() would normally do: + * . lkb removed from ls_waiters list + * . lkb wait_type cleared + * . lkb waiters_count cleared + * . lkb ref count decremented for each waiters_count (almost always 1, + * but possibly 2 in case of cancel/unlock overlapping, which means + * two remote replies were being expected for the lkb.) + * + * Second, the lkb is reprocessed like an original operation would be, + * by passing it to _request_lock or _convert_lock, which will either + * process the lkb operation locally, or send it to a remote node again + * and put the lkb back onto the waiters list. + * + * When reprocessing the lkb, we may find that it's flagged for an overlapping + * force-unlock or cancel, either from before recovery began, or after recovery + * finished. If this is the case, the unlock/cancel is done directly, and the + * original operation is not initiated again (no _request_lock/_convert_lock.) + */ int dlm_recover_waiters_post(struct dlm_ls *ls) { @@ -5274,6 +5222,11 @@ int dlm_recover_waiters_post(struct dlm_ls *ls) break; } + /* + * Find an lkb from the waiters list that's been affected by + * recovery node changes, and needs to be reprocessed. Does + * hold_lkb(), adding a refcount. + */ lkb = find_resend_waiter(ls); if (!lkb) break; @@ -5282,9 +5235,16 @@ int dlm_recover_waiters_post(struct dlm_ls *ls) hold_rsb(r); lock_rsb(r); + /* + * If the lkb has been flagged for a force unlock or cancel, + * then the reprocessing below will be replaced by just doing + * the unlock/cancel directly. + */ mstype = lkb->lkb_wait_type; - oc = is_overlap_cancel(lkb); - ou = is_overlap_unlock(lkb); + oc = test_and_clear_bit(DLM_IFL_OVERLAP_CANCEL_BIT, + &lkb->lkb_iflags); + ou = test_and_clear_bit(DLM_IFL_OVERLAP_UNLOCK_BIT, + &lkb->lkb_iflags); err = 0; log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d " @@ -5293,19 +5253,39 @@ int dlm_recover_waiters_post(struct dlm_ls *ls) r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid, dlm_dir_nodeid(r), oc, ou); - /* At this point we assume that we won't get a reply to any - previous op or overlap op on this lock. First, do a big - remove_from_waiters() for all previous ops. */ + /* + * No reply to the pre-recovery operation will now be received, + * so a forced equivalent of remove_from_waiters() is needed to + * reset the waiters state that was in place before recovery. + */ + + clear_bit(DLM_IFL_RESEND_BIT, &lkb->lkb_iflags); - lkb->lkb_flags &= ~DLM_IFL_RESEND; - lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK; - lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL; + /* Forcibly clear wait_type */ lkb->lkb_wait_type = 0; - lkb->lkb_wait_count = 0; - mutex_lock(&ls->ls_waiters_mutex); + + /* + * Forcibly reset wait_count and associated refcount. The + * wait_count will almost always be 1, but in case of an + * overlapping unlock/cancel it could be 2: see where + * add_to_waiters() finds the lkb is already on the waiters + * list and does lkb_wait_count++; hold_lkb(). + */ + while (lkb->lkb_wait_count) { + lkb->lkb_wait_count--; + unhold_lkb(lkb); + } + + /* Forcibly remove from waiters list */ + spin_lock_bh(&ls->ls_waiters_lock); list_del_init(&lkb->lkb_wait_reply); - mutex_unlock(&ls->ls_waiters_mutex); - unhold_lkb(lkb); /* for waiters list */ + spin_unlock_bh(&ls->ls_waiters_lock); + + /* + * The lkb is now clear of all prior waiters state and can be + * processed locally, or sent to remote node again, or directly + * cancelled/unlocked. + */ if (oc || ou) { /* do an unlock or cancel instead of resending */ @@ -5332,7 +5312,7 @@ int dlm_recover_waiters_post(struct dlm_ls *ls) case DLM_MSG_LOOKUP: case DLM_MSG_REQUEST: _request_lock(r, lkb); - if (is_master(r)) + if (r->res_nodeid != -1 && is_master(r)) confirm_master(r, 0); break; case DLM_MSG_CONVERT: @@ -5424,7 +5404,7 @@ static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r, /* Get rid of locks held by nodes that are gone. */ -void dlm_recover_purge(struct dlm_ls *ls) +void dlm_recover_purge(struct dlm_ls *ls, const struct list_head *root_list) { struct dlm_rsb *r; struct dlm_member *memb; @@ -5443,11 +5423,9 @@ void dlm_recover_purge(struct dlm_ls *ls) if (!nodes_count) return; - down_write(&ls->ls_root_sem); - list_for_each_entry(r, &ls->ls_root_list, res_root_list) { - hold_rsb(r); + list_for_each_entry(r, root_list, res_root_list) { lock_rsb(r); - if (is_master(r)) { + if (r->res_nodeid != -1 && is_master(r)) { purge_dead_list(ls, r, &r->res_grantqueue, nodeid_gone, &lkb_count); purge_dead_list(ls, r, &r->res_convertqueue, @@ -5456,25 +5434,21 @@ void dlm_recover_purge(struct dlm_ls *ls) nodeid_gone, &lkb_count); } unlock_rsb(r); - unhold_rsb(r); + cond_resched(); } - up_write(&ls->ls_root_sem); if (lkb_count) - log_debug(ls, "dlm_recover_purge %u locks for %u nodes", + log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes", lkb_count, nodes_count); } -static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket) +static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls) { - struct rb_node *n; struct dlm_rsb *r; - spin_lock(&ls->ls_rsbtbl[bucket].lock); - for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) { - r = rb_entry(n, struct dlm_rsb, res_hashnode); - + read_lock_bh(&ls->ls_rsbtbl_lock); + list_for_each_entry(r, &ls->ls_slow_active, res_slow_list) { if (!rsb_flag(r, RSB_RECOVER_GRANT)) continue; if (!is_master(r)) { @@ -5482,10 +5456,10 @@ static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket) continue; } hold_rsb(r); - spin_unlock(&ls->ls_rsbtbl[bucket].lock); + read_unlock_bh(&ls->ls_rsbtbl_lock); return r; } - spin_unlock(&ls->ls_rsbtbl[bucket].lock); + read_unlock_bh(&ls->ls_rsbtbl_lock); return NULL; } @@ -5509,19 +5483,15 @@ static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket) void dlm_recover_grant(struct dlm_ls *ls) { struct dlm_rsb *r; - int bucket = 0; unsigned int count = 0; unsigned int rsb_count = 0; unsigned int lkb_count = 0; while (1) { - r = find_grant_rsb(ls, bucket); - if (!r) { - if (bucket == ls->ls_rsbtbl_size - 1) - break; - bucket++; - continue; - } + r = find_grant_rsb(ls); + if (!r) + break; + rsb_count++; count = 0; lock_rsb(r); @@ -5536,7 +5506,7 @@ void dlm_recover_grant(struct dlm_ls *ls) } if (lkb_count) - log_debug(ls, "dlm_recover_grant %u locks on %u resources", + log_rinfo(ls, "dlm_recover_grant %u locks on %u resources", lkb_count, rsb_count); } @@ -5571,16 +5541,16 @@ static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid, /* needs at least dlm_rcom + rcom_lock */ static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, - struct dlm_rsb *r, struct dlm_rcom *rc) + struct dlm_rsb *r, const struct dlm_rcom *rc) { struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; - lkb->lkb_nodeid = rc->rc_header.h_nodeid; + lkb->lkb_nodeid = le32_to_cpu(rc->rc_header.h_nodeid); lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid); lkb->lkb_remid = le32_to_cpu(rl->rl_lkid); lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags); - lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF; - lkb->lkb_flags |= DLM_IFL_MSTCPY; + dlm_set_dflags_val(lkb, le32_to_cpu(rl->rl_flags)); + set_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags); lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq); lkb->lkb_rqmode = rl->rl_rqmode; lkb->lkb_grmode = rl->rl_grmode; @@ -5590,8 +5560,8 @@ static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL; if (lkb->lkb_exflags & DLM_LKF_VALBLK) { - int lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) - - sizeof(struct rcom_lock); + int lvblen = le16_to_cpu(rc->rc_header.h_length) - + sizeof(struct dlm_rcom) - sizeof(struct rcom_lock); if (lvblen > ls->ls_lvblen) return -EINVAL; lkb->lkb_lvbptr = dlm_allocate_lvb(ls); @@ -5604,10 +5574,11 @@ static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, The real granted mode of these converting locks cannot be determined until all locks have been rebuilt on the rsb (recover_conversion) */ - if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) && - middle_conversion(lkb)) { - rl->rl_status = DLM_LKSTS_CONVERT; - lkb->lkb_grmode = DLM_LOCK_IV; + if (rl->rl_status == DLM_LKSTS_CONVERT && middle_conversion(lkb)) { + /* We may need to adjust grmode depending on other granted locks. */ + log_rinfo(ls, "%s %x middle convert gr %d rq %d remote %d %x", + __func__, lkb->lkb_id, lkb->lkb_grmode, + lkb->lkb_rqmode, lkb->lkb_nodeid, lkb->lkb_remid); rsb_set_flag(r, RSB_RECOVER_CONVERT); } @@ -5621,15 +5592,19 @@ static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb, back the rcom_lock struct we got but with the remid field filled in. */ /* needs at least dlm_rcom + rcom_lock */ -int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc) +int dlm_recover_master_copy(struct dlm_ls *ls, const struct dlm_rcom *rc, + __le32 *rl_remid, __le32 *rl_result) { struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; struct dlm_rsb *r; struct dlm_lkb *lkb; uint32_t remid = 0; - int from_nodeid = rc->rc_header.h_nodeid; + int from_nodeid = le32_to_cpu(rc->rc_header.h_nodeid); int error; + /* init rl_remid with rcom lock rl_remid */ + *rl_remid = rl->rl_remid; + if (rl->rl_parent_lkid) { error = -EOPNOTSUPP; goto out; @@ -5677,7 +5652,6 @@ int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc) attach_lkb(r, lkb); add_lkb(r, lkb, rl->rl_status); - error = 0; ls->ls_recover_locks_in++; if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue)) @@ -5686,7 +5660,7 @@ int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc) out_remid: /* this is the new value returned to the lock holder for saving in its process-copy lkb */ - rl->rl_remid = cpu_to_le32(lkb->lkb_id); + *rl_remid = cpu_to_le32(lkb->lkb_id); lkb->lkb_recover_seq = ls->ls_recover_seq; @@ -5695,14 +5669,15 @@ int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc) put_rsb(r); out: if (error && error != -EEXIST) - log_debug(ls, "dlm_recover_master_copy remote %d %x error %d", + log_rinfo(ls, "dlm_recover_master_copy remote %d %x error %d", from_nodeid, remid, error); - rl->rl_result = cpu_to_le32(error); + *rl_result = cpu_to_le32(error); return error; } /* needs at least dlm_rcom + rcom_lock */ -int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc) +int dlm_recover_process_copy(struct dlm_ls *ls, const struct dlm_rcom *rc, + uint64_t seq) { struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf; struct dlm_rsb *r; @@ -5717,7 +5692,8 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc) error = find_lkb(ls, lkid, &lkb); if (error) { log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d", - lkid, rc->rc_header.h_nodeid, remid, result); + lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid, + result); return error; } @@ -5727,7 +5703,8 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc) if (!is_process_copy(lkb)) { log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d", - lkid, rc->rc_header.h_nodeid, remid, result); + lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid, + result); dlm_dump_rsb(r); unlock_rsb(r); put_rsb(r); @@ -5742,9 +5719,10 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc) a barrier between recover_masters and recover_locks. */ log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d", - lkid, rc->rc_header.h_nodeid, remid, result); + lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid, + result); - dlm_send_rcom_lock(r, lkb); + dlm_send_rcom_lock(r, lkb, seq); goto out; case -EEXIST: case 0: @@ -5752,7 +5730,8 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc) break; default: log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk", - lkid, rc->rc_header.h_nodeid, remid, result); + lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid, + result); } /* an ack for dlm_recover_locks() which waits for replies from @@ -5767,11 +5746,11 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc) } int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua, - int mode, uint32_t flags, void *name, unsigned int namelen, - unsigned long timeout_cs) + int mode, uint32_t flags, void *name, unsigned int namelen) { struct dlm_lkb *lkb; struct dlm_args args; + bool do_put = true; int error; dlm_lock_recovery(ls); @@ -5782,29 +5761,29 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua, goto out; } + trace_dlm_lock_start(ls, lkb, name, namelen, mode, flags); + if (flags & DLM_LKF_VALBLK) { ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS); if (!ua->lksb.sb_lvbptr) { kfree(ua); - __put_lkb(ls, lkb); error = -ENOMEM; - goto out; + goto out_put; } } - - /* After ua is attached to lkb it will be freed by dlm_free_lkb(). - When DLM_IFL_USER is set, the dlm knows that this is a userspace - lock and that lkb_astparam is the dlm_user_args structure. */ - - error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs, - fake_astfn, ua, fake_bastfn, &args); - lkb->lkb_flags |= DLM_IFL_USER; - + error = set_lock_args(mode, &ua->lksb, flags, namelen, fake_astfn, ua, + fake_bastfn, &args); if (error) { - __put_lkb(ls, lkb); - goto out; + kfree(ua->lksb.sb_lvbptr); + ua->lksb.sb_lvbptr = NULL; + kfree(ua); + goto out_put; } + /* After ua is attached to lkb it will be freed by dlm_free_lkb(). + When DLM_DFL_USER_BIT is set, the dlm knows that this is a userspace + lock and that lkb_astparam is the dlm_user_args structure. */ + set_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags); error = request_lock(ls, lkb, name, namelen, &args); switch (error) { @@ -5815,25 +5794,28 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua, break; case -EAGAIN: error = 0; - /* fall through */ + fallthrough; default: - __put_lkb(ls, lkb); - goto out; + goto out_put; } /* add this new lkb to the per-process list of locks */ - spin_lock(&ua->proc->locks_spin); + spin_lock_bh(&ua->proc->locks_spin); hold_lkb(lkb); list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks); - spin_unlock(&ua->proc->locks_spin); + spin_unlock_bh(&ua->proc->locks_spin); + do_put = false; + out_put: + trace_dlm_lock_end(ls, lkb, name, namelen, mode, flags, error, false); + if (do_put) + __put_lkb(ls, lkb); out: dlm_unlock_recovery(ls); return error; } int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, - int mode, uint32_t flags, uint32_t lkid, char *lvb_in, - unsigned long timeout_cs) + int mode, uint32_t flags, uint32_t lkid, char *lvb_in) { struct dlm_lkb *lkb; struct dlm_args args; @@ -5846,6 +5828,8 @@ int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, if (error) goto out; + trace_dlm_lock_start(ls, lkb, NULL, 0, mode, flags); + /* user can change the params on its lock when it converts it, or add an lvb that didn't exist before */ @@ -5868,8 +5852,8 @@ int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, ua->bastaddr = ua_tmp->bastaddr; ua->user_lksb = ua_tmp->user_lksb; - error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs, - fake_astfn, ua, fake_bastfn, &args); + error = set_lock_args(mode, &ua->lksb, flags, 0, fake_astfn, ua, + fake_bastfn, &args); if (error) goto out_put; @@ -5878,6 +5862,7 @@ int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK) error = 0; out_put: + trace_dlm_lock_end(ls, lkb, NULL, 0, mode, flags, error, false); dlm_put_lkb(lkb); out: dlm_unlock_recovery(ls); @@ -5885,6 +5870,77 @@ int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, return error; } +/* + * The caller asks for an orphan lock on a given resource with a given mode. + * If a matching lock exists, it's moved to the owner's list of locks and + * the lkid is returned. + */ + +int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, + int mode, uint32_t flags, void *name, unsigned int namelen, + uint32_t *lkid) +{ + struct dlm_lkb *lkb = NULL, *iter; + struct dlm_user_args *ua; + int found_other_mode = 0; + int rv = 0; + + spin_lock_bh(&ls->ls_orphans_lock); + list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) { + if (iter->lkb_resource->res_length != namelen) + continue; + if (memcmp(iter->lkb_resource->res_name, name, namelen)) + continue; + if (iter->lkb_grmode != mode) { + found_other_mode = 1; + continue; + } + + lkb = iter; + list_del_init(&iter->lkb_ownqueue); + clear_bit(DLM_DFL_ORPHAN_BIT, &iter->lkb_dflags); + *lkid = iter->lkb_id; + break; + } + spin_unlock_bh(&ls->ls_orphans_lock); + + if (!lkb && found_other_mode) { + rv = -EAGAIN; + goto out; + } + + if (!lkb) { + rv = -ENOENT; + goto out; + } + + lkb->lkb_exflags = flags; + lkb->lkb_ownpid = (int) current->pid; + + ua = lkb->lkb_ua; + + ua->proc = ua_tmp->proc; + ua->xid = ua_tmp->xid; + ua->castparam = ua_tmp->castparam; + ua->castaddr = ua_tmp->castaddr; + ua->bastparam = ua_tmp->bastparam; + ua->bastaddr = ua_tmp->bastaddr; + ua->user_lksb = ua_tmp->user_lksb; + + /* + * The lkb reference from the ls_orphans list was not + * removed above, and is now considered the reference + * for the proc locks list. + */ + + spin_lock_bh(&ua->proc->locks_spin); + list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks); + spin_unlock_bh(&ua->proc->locks_spin); + out: + kfree(ua_tmp); + return rv; +} + int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, uint32_t flags, uint32_t lkid, char *lvb_in) { @@ -5899,6 +5955,8 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, if (error) goto out; + trace_dlm_unlock_start(ls, lkb, flags); + ua = lkb->lkb_ua; if (lvb_in && ua->lksb.sb_lvbptr) @@ -5921,12 +5979,13 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, if (error) goto out_put; - spin_lock(&ua->proc->locks_spin); + spin_lock_bh(&ua->proc->locks_spin); /* dlm_user_add_cb() may have already taken lkb off the proc list */ if (!list_empty(&lkb->lkb_ownqueue)) list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking); - spin_unlock(&ua->proc->locks_spin); + spin_unlock_bh(&ua->proc->locks_spin); out_put: + trace_dlm_unlock_end(ls, lkb, flags, error); dlm_put_lkb(lkb); out: dlm_unlock_recovery(ls); @@ -5948,6 +6007,8 @@ int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, if (error) goto out; + trace_dlm_unlock_start(ls, lkb, flags); + ua = lkb->lkb_ua; if (ua_tmp->castparam) ua->castparam = ua_tmp->castparam; @@ -5965,6 +6026,7 @@ int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, if (error == -EBUSY) error = 0; out_put: + trace_dlm_unlock_end(ls, lkb, flags, error); dlm_put_lkb(lkb); out: dlm_unlock_recovery(ls); @@ -5986,6 +6048,8 @@ int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid) if (error) goto out; + trace_dlm_unlock_start(ls, lkb, flags); + ua = lkb->lkb_ua; error = set_unlock_args(flags, ua, &args); @@ -6001,7 +6065,7 @@ int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid) error = validate_unlock_args(lkb, &args); if (error) goto out_r; - lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL; + set_bit(DLM_IFL_DEADLOCK_CANCEL_BIT, &lkb->lkb_iflags); error = _cancel_lock(r, lkb); out_r: @@ -6014,6 +6078,7 @@ int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid) if (error == -EBUSY) error = 0; out_put: + trace_dlm_unlock_end(ls, lkb, flags, error); dlm_put_lkb(lkb); out: dlm_unlock_recovery(ls); @@ -6028,10 +6093,10 @@ static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb) struct dlm_args args; int error; - hold_lkb(lkb); - mutex_lock(&ls->ls_orphans_mutex); + hold_lkb(lkb); /* reference for the ls_orphans list */ + spin_lock_bh(&ls->ls_orphans_lock); list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans); - mutex_unlock(&ls->ls_orphans_mutex); + spin_unlock_bh(&ls->ls_orphans_lock); set_unlock_args(0, lkb->lkb_ua, &args); @@ -6069,7 +6134,7 @@ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls, { struct dlm_lkb *lkb = NULL; - mutex_lock(&ls->ls_clear_proc_locks); + spin_lock_bh(&ls->ls_clear_proc_locks); if (list_empty(&proc->locks)) goto out; @@ -6077,11 +6142,11 @@ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls, list_del_init(&lkb->lkb_ownqueue); if (lkb->lkb_exflags & DLM_LKF_PERSISTENT) - lkb->lkb_flags |= DLM_IFL_ORPHAN; + set_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags); else - lkb->lkb_flags |= DLM_IFL_DEAD; + set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); out: - mutex_unlock(&ls->ls_clear_proc_locks); + spin_unlock_bh(&ls->ls_clear_proc_locks); return lkb; } @@ -6097,6 +6162,7 @@ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls, void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) { + struct dlm_callback *cb, *cb_safe; struct dlm_lkb *lkb, *safe; dlm_lock_recovery(ls); @@ -6105,7 +6171,6 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) lkb = del_proc_lock(ls, proc); if (!lkb) break; - del_timeout(lkb); if (lkb->lkb_exflags & DLM_LKF_PERSISTENT) orphan_proc_lock(ls, lkb); else @@ -6118,64 +6183,61 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) dlm_put_lkb(lkb); } - mutex_lock(&ls->ls_clear_proc_locks); + spin_lock_bh(&ls->ls_clear_proc_locks); /* in-progress unlocks */ list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) { list_del_init(&lkb->lkb_ownqueue); - lkb->lkb_flags |= DLM_IFL_DEAD; + set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); dlm_put_lkb(lkb); } - list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) { - memset(&lkb->lkb_callbacks, 0, - sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE); - list_del_init(&lkb->lkb_cb_list); - dlm_put_lkb(lkb); + list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) { + list_del(&cb->list); + dlm_free_cb(cb); } - mutex_unlock(&ls->ls_clear_proc_locks); + spin_unlock_bh(&ls->ls_clear_proc_locks); dlm_unlock_recovery(ls); } static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc) { + struct dlm_callback *cb, *cb_safe; struct dlm_lkb *lkb, *safe; while (1) { lkb = NULL; - spin_lock(&proc->locks_spin); + spin_lock_bh(&proc->locks_spin); if (!list_empty(&proc->locks)) { lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue); list_del_init(&lkb->lkb_ownqueue); } - spin_unlock(&proc->locks_spin); + spin_unlock_bh(&proc->locks_spin); if (!lkb) break; - lkb->lkb_flags |= DLM_IFL_DEAD; + set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); unlock_proc_lock(ls, lkb); dlm_put_lkb(lkb); /* ref from proc->locks list */ } - spin_lock(&proc->locks_spin); + spin_lock_bh(&proc->locks_spin); list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) { list_del_init(&lkb->lkb_ownqueue); - lkb->lkb_flags |= DLM_IFL_DEAD; + set_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags); dlm_put_lkb(lkb); } - spin_unlock(&proc->locks_spin); + spin_unlock_bh(&proc->locks_spin); - spin_lock(&proc->asts_spin); - list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) { - memset(&lkb->lkb_callbacks, 0, - sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE); - list_del_init(&lkb->lkb_cb_list); - dlm_put_lkb(lkb); + spin_lock_bh(&proc->asts_spin); + list_for_each_entry_safe(cb, cb_safe, &proc->asts, list) { + list_del(&cb->list); + dlm_free_cb(cb); } - spin_unlock(&proc->asts_spin); + spin_unlock_bh(&proc->asts_spin); } /* pid of 0 means purge all orphans */ @@ -6184,7 +6246,7 @@ static void do_purge(struct dlm_ls *ls, int nodeid, int pid) { struct dlm_lkb *lkb, *safe; - mutex_lock(&ls->ls_orphans_mutex); + spin_lock_bh(&ls->ls_orphans_lock); list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) { if (pid && lkb->lkb_ownpid != pid) continue; @@ -6192,7 +6254,7 @@ static void do_purge(struct dlm_ls *ls, int nodeid, int pid) list_del_init(&lkb->lkb_ownqueue); dlm_put_lkb(lkb); } - mutex_unlock(&ls->ls_orphans_mutex); + spin_unlock_bh(&ls->ls_orphans_lock); } static int send_purge(struct dlm_ls *ls, int nodeid, int pid) @@ -6205,10 +6267,10 @@ static int send_purge(struct dlm_ls *ls, int nodeid, int pid) DLM_MSG_PURGE, &ms, &mh); if (error) return error; - ms->m_nodeid = nodeid; - ms->m_pid = pid; + ms->m_nodeid = cpu_to_le32(nodeid); + ms->m_pid = cpu_to_le32(pid); - return send_message(mh, ms); + return send_message(mh, ms, NULL, 0); } int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc, @@ -6216,7 +6278,7 @@ int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc, { int error = 0; - if (nodeid != dlm_our_nodeid()) { + if (nodeid && (nodeid != dlm_our_nodeid())) { error = send_purge(ls, nodeid, pid); } else { dlm_lock_recovery(ls); @@ -6229,3 +6291,64 @@ int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc, return error; } +/* debug functionality */ +int dlm_debug_add_lkb(struct dlm_ls *ls, uint32_t lkb_id, char *name, int len, + int lkb_nodeid, unsigned int lkb_dflags, int lkb_status) +{ + struct dlm_lksb *lksb; + struct dlm_lkb *lkb; + struct dlm_rsb *r; + int error; + + /* we currently can't set a valid user lock */ + if (lkb_dflags & BIT(DLM_DFL_USER_BIT)) + return -EOPNOTSUPP; + + lksb = kzalloc(sizeof(*lksb), GFP_NOFS); + if (!lksb) + return -ENOMEM; + + error = _create_lkb(ls, &lkb, lkb_id, lkb_id + 1); + if (error) { + kfree(lksb); + return error; + } + + dlm_set_dflags_val(lkb, lkb_dflags); + lkb->lkb_nodeid = lkb_nodeid; + lkb->lkb_lksb = lksb; + /* user specific pointer, just don't have it NULL for kernel locks */ + if (~lkb_dflags & BIT(DLM_DFL_USER_BIT)) + lkb->lkb_astparam = (void *)0xDEADBEEF; + + error = find_rsb(ls, name, len, 0, R_REQUEST, &r); + if (error) { + kfree(lksb); + __put_lkb(ls, lkb); + return error; + } + + lock_rsb(r); + attach_lkb(r, lkb); + add_lkb(r, lkb, lkb_status); + unlock_rsb(r); + put_rsb(r); + + return 0; +} + +int dlm_debug_add_lkb_to_waiters(struct dlm_ls *ls, uint32_t lkb_id, + int mstype, int to_nodeid) +{ + struct dlm_lkb *lkb; + int error; + + error = find_lkb(ls, lkb_id, &lkb); + if (error) + return error; + + add_to_waiters(lkb, mstype, to_nodeid); + dlm_put_lkb(lkb); + return 0; +} + diff --git a/fs/dlm/lock.h b/fs/dlm/lock.h index 5e0c72e36a9b..b23d7b854ed4 100644 --- a/fs/dlm/lock.h +++ b/fs/dlm/lock.h @@ -1,11 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** ******************************************************************************* ** ** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -14,41 +12,45 @@ #define __LOCK_DOT_H__ void dlm_dump_rsb(struct dlm_rsb *r); -void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len); +void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len); void dlm_print_lkb(struct dlm_lkb *lkb); -void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms, +void dlm_receive_message_saved(struct dlm_ls *ls, const struct dlm_message *ms, uint32_t saved_seq); -void dlm_receive_buffer(union dlm_packet *p, int nodeid); +void dlm_receive_buffer(const union dlm_packet *p, int nodeid); int dlm_modes_compat(int mode1, int mode2); +void free_inactive_rsb(struct dlm_rsb *r); void dlm_put_rsb(struct dlm_rsb *r); void dlm_hold_rsb(struct dlm_rsb *r); int dlm_put_lkb(struct dlm_lkb *lkb); -void dlm_scan_rsbs(struct dlm_ls *ls); int dlm_lock_recovery_try(struct dlm_ls *ls); +void dlm_lock_recovery(struct dlm_ls *ls); void dlm_unlock_recovery(struct dlm_ls *ls); -void dlm_scan_waiters(struct dlm_ls *ls); -void dlm_scan_timeout(struct dlm_ls *ls); -void dlm_adjust_timeouts(struct dlm_ls *ls); -int dlm_master_lookup(struct dlm_ls *ls, int nodeid, char *name, int len, - unsigned int flags, int *r_nodeid, int *result); +void dlm_rsb_scan(struct timer_list *timer); +void resume_scan_timer(struct dlm_ls *ls); -int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len, +int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name, + int len, unsigned int flags, int *r_nodeid, int *result); + +int dlm_search_rsb_tree(struct rhashtable *rhash, const void *name, int len, struct dlm_rsb **r_ret); -void dlm_recover_purge(struct dlm_ls *ls); +void dlm_recover_purge(struct dlm_ls *ls, const struct list_head *root_list); void dlm_purge_mstcpy_locks(struct dlm_rsb *r); void dlm_recover_grant(struct dlm_ls *ls); int dlm_recover_waiters_post(struct dlm_ls *ls); void dlm_recover_waiters_pre(struct dlm_ls *ls); -int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc); -int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc); +int dlm_recover_master_copy(struct dlm_ls *ls, const struct dlm_rcom *rc, + __le32 *rl_remid, __le32 *rl_result); +int dlm_recover_process_copy(struct dlm_ls *ls, const struct dlm_rcom *rc, + uint64_t seq); int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua, int mode, - uint32_t flags, void *name, unsigned int namelen, - unsigned long timeout_cs); + uint32_t flags, void *name, unsigned int namelen); int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, - int mode, uint32_t flags, uint32_t lkid, char *lvb_in, - unsigned long timeout_cs); + int mode, uint32_t flags, uint32_t lkid, char *lvb_in); +int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, + int mode, uint32_t flags, void *name, unsigned int namelen, + uint32_t *lkid); int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, uint32_t flags, uint32_t lkid, char *lvb_in); int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp, @@ -57,20 +59,26 @@ int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc, int nodeid, int pid); int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid); void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc); +int dlm_debug_add_lkb(struct dlm_ls *ls, uint32_t lkb_id, char *name, int len, + int lkb_nodeid, unsigned int lkb_flags, int lkb_status); +int dlm_debug_add_lkb_to_waiters(struct dlm_ls *ls, uint32_t lkb_id, + int mstype, int to_nodeid); static inline int is_master(struct dlm_rsb *r) { + WARN_ON_ONCE(r->res_nodeid == -1); + return !r->res_nodeid; } static inline void lock_rsb(struct dlm_rsb *r) { - mutex_lock(&r->res_mutex); + spin_lock_bh(&r->res_lock); } static inline void unlock_rsb(struct dlm_rsb *r) { - mutex_unlock(&r->res_mutex); + spin_unlock_bh(&r->res_lock); } #endif diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index 88556dc0458e..ddaa76558706 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -1,22 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ +#include <linux/module.h> + #include "dlm_internal.h" #include "lockspace.h" #include "member.h" #include "recoverd.h" #include "dir.h" -#include "lowcomms.h" +#include "midcomms.h" #include "config.h" #include "memory.h" #include "lock.h" @@ -29,15 +29,16 @@ static int ls_count; static struct mutex ls_lock; static struct list_head lslist; static spinlock_t lslist_lock; -static struct task_struct * scand_task; - static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len) { ssize_t ret = len; - int n = simple_strtol(buf, NULL, 0); + int n; + int rc = kstrtoint(buf, 0, &n); - ls = dlm_find_lockspace_local(ls->ls_local_handle); + if (rc) + return rc; + ls = dlm_find_lockspace_local(ls); if (!ls) return -EINVAL; @@ -57,7 +58,10 @@ static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len) static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len) { - ls->ls_uevent_result = simple_strtol(buf, NULL, 0); + int rc = kstrtoint(buf, 0, &ls->ls_uevent_result); + + if (rc) + return rc; set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags); wake_up(&ls->ls_uevent_wait); return len; @@ -70,7 +74,10 @@ static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf) static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len) { - ls->ls_global_id = simple_strtoul(buf, NULL, 0); + int rc = kstrtouint(buf, 0, &ls->ls_global_id); + + if (rc) + return rc; return len; } @@ -81,7 +88,11 @@ static ssize_t dlm_nodir_show(struct dlm_ls *ls, char *buf) static ssize_t dlm_nodir_store(struct dlm_ls *ls, const char *buf, size_t len) { - int val = simple_strtoul(buf, NULL, 0); + int val; + int rc = kstrtoint(buf, 0, &val); + + if (rc) + return rc; if (val == 1) set_bit(LSFL_NODIR, &ls->ls_flags); return len; @@ -145,6 +156,7 @@ static struct attribute *dlm_attrs[] = { &dlm_attr_recover_nodeid.attr, NULL, }; +ATTRIBUTE_GROUPS(dlm); static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) @@ -162,65 +174,52 @@ static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr, return a->store ? a->store(ls, buf, len) : len; } -static void lockspace_kobj_release(struct kobject *k) -{ - struct dlm_ls *ls = container_of(k, struct dlm_ls, ls_kobj); - kfree(ls); -} - static const struct sysfs_ops dlm_attr_ops = { .show = dlm_attr_show, .store = dlm_attr_store, }; static struct kobj_type dlm_ktype = { - .default_attrs = dlm_attrs, + .default_groups = dlm_groups, .sysfs_ops = &dlm_attr_ops, - .release = lockspace_kobj_release, }; static struct kset *dlm_kset; -static int do_uevent(struct dlm_ls *ls, int in) +static int do_uevent(struct dlm_ls *ls, int in, unsigned int release_recover) { - int error; + char message[512] = {}; + char *envp[] = { message, NULL }; - if (in) + if (in) { kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE); - else - kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE); + } else { + snprintf(message, 511, "RELEASE_RECOVER=%u", release_recover); + kobject_uevent_env(&ls->ls_kobj, KOBJ_OFFLINE, envp); + } - log_debug(ls, "%s the lockspace group...", in ? "joining" : "leaving"); + log_rinfo(ls, "%s the lockspace group...", in ? "joining" : "leaving"); /* dlm_controld will see the uevent, do the necessary group management and then write to sysfs to wake us */ - error = wait_event_interruptible(ls->ls_uevent_wait, - test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags)); - - log_debug(ls, "group event done %d %d", error, ls->ls_uevent_result); + wait_event(ls->ls_uevent_wait, + test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags)); - if (error) - goto out; + log_rinfo(ls, "group event done %d", ls->ls_uevent_result); - error = ls->ls_uevent_result; - out: - if (error) - log_error(ls, "group %s failed %d %d", in ? "join" : "leave", - error, ls->ls_uevent_result); - return error; + return ls->ls_uevent_result; } -static int dlm_uevent(struct kset *kset, struct kobject *kobj, - struct kobj_uevent_env *env) +static int dlm_uevent(const struct kobject *kobj, struct kobj_uevent_env *env) { - struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj); + const struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj); add_uevent_var(env, "LOCKSPACE=%s", ls->ls_name); return 0; } -static struct kset_uevent_ops dlm_uevent_ops = { +static const struct kset_uevent_ops dlm_uevent_ops = { .uevent = dlm_uevent, }; @@ -244,95 +243,29 @@ void dlm_lockspace_exit(void) kset_unregister(dlm_kset); } -static struct dlm_ls *find_ls_to_scan(void) -{ - struct dlm_ls *ls; - - spin_lock(&lslist_lock); - list_for_each_entry(ls, &lslist, ls_list) { - if (time_after_eq(jiffies, ls->ls_scan_time + - dlm_config.ci_scan_secs * HZ)) { - spin_unlock(&lslist_lock); - return ls; - } - } - spin_unlock(&lslist_lock); - return NULL; -} - -static int dlm_scand(void *data) -{ - struct dlm_ls *ls; - - while (!kthread_should_stop()) { - ls = find_ls_to_scan(); - if (ls) { - if (dlm_lock_recovery_try(ls)) { - ls->ls_scan_time = jiffies; - dlm_scan_rsbs(ls); - dlm_scan_timeout(ls); - dlm_scan_waiters(ls); - dlm_unlock_recovery(ls); - } else { - ls->ls_scan_time += HZ; - } - continue; - } - schedule_timeout_interruptible(dlm_config.ci_scan_secs * HZ); - } - return 0; -} - -static int dlm_scand_start(void) -{ - struct task_struct *p; - int error = 0; - - p = kthread_run(dlm_scand, NULL, "dlm_scand"); - if (IS_ERR(p)) - error = PTR_ERR(p); - else - scand_task = p; - return error; -} - -static void dlm_scand_stop(void) -{ - kthread_stop(scand_task); -} - struct dlm_ls *dlm_find_lockspace_global(uint32_t id) { struct dlm_ls *ls; - spin_lock(&lslist_lock); + spin_lock_bh(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { if (ls->ls_global_id == id) { - ls->ls_count++; + atomic_inc(&ls->ls_count); goto out; } } ls = NULL; out: - spin_unlock(&lslist_lock); + spin_unlock_bh(&lslist_lock); return ls; } struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace) { - struct dlm_ls *ls; + struct dlm_ls *ls = lockspace; - spin_lock(&lslist_lock); - list_for_each_entry(ls, &lslist, ls_list) { - if (ls->ls_local_handle == lockspace) { - ls->ls_count++; - goto out; - } - } - ls = NULL; - out: - spin_unlock(&lslist_lock); + atomic_inc(&ls->ls_count); return ls; } @@ -340,70 +273,89 @@ struct dlm_ls *dlm_find_lockspace_device(int minor) { struct dlm_ls *ls; - spin_lock(&lslist_lock); + spin_lock_bh(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { if (ls->ls_device.minor == minor) { - ls->ls_count++; + atomic_inc(&ls->ls_count); goto out; } } ls = NULL; out: - spin_unlock(&lslist_lock); + spin_unlock_bh(&lslist_lock); return ls; } void dlm_put_lockspace(struct dlm_ls *ls) { - spin_lock(&lslist_lock); - ls->ls_count--; - spin_unlock(&lslist_lock); + if (atomic_dec_and_test(&ls->ls_count)) + wake_up(&ls->ls_count_wait); } static void remove_lockspace(struct dlm_ls *ls) { - for (;;) { - spin_lock(&lslist_lock); - if (ls->ls_count == 0) { - WARN_ON(ls->ls_create_count != 0); - list_del(&ls->ls_list); - spin_unlock(&lslist_lock); - return; - } - spin_unlock(&lslist_lock); - ssleep(1); +retry: + wait_event(ls->ls_count_wait, atomic_read(&ls->ls_count) == 0); + + spin_lock_bh(&lslist_lock); + if (atomic_read(&ls->ls_count) != 0) { + spin_unlock_bh(&lslist_lock); + goto retry; } + + WARN_ON(ls->ls_create_count != 0); + list_del(&ls->ls_list); + spin_unlock_bh(&lslist_lock); } static int threads_start(void) { int error; - error = dlm_scand_start(); - if (error) { - log_print("cannot start dlm_scand thread %d", error); - goto fail; - } - /* Thread for sending/receiving messages for all lockspace's */ - error = dlm_lowcomms_start(); - if (error) { - log_print("cannot start dlm lowcomms %d", error); - goto scand_fail; - } + error = dlm_midcomms_start(); + if (error) + log_print("cannot start dlm midcomms %d", error); + + return error; +} +static int lkb_idr_free(struct dlm_lkb *lkb) +{ + if (lkb->lkb_lvbptr && test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags)) + dlm_free_lvb(lkb->lkb_lvbptr); + + dlm_free_lkb(lkb); return 0; +} - scand_fail: - dlm_scand_stop(); - fail: - return error; +static void rhash_free_rsb(void *ptr, void *arg) +{ + struct dlm_rsb *rsb = ptr; + + dlm_free_rsb(rsb); } -static void threads_stop(void) +static void free_lockspace(struct work_struct *work) { - dlm_scand_stop(); - dlm_lowcomms_stop(); + struct dlm_ls *ls = container_of(work, struct dlm_ls, ls_free_work); + struct dlm_lkb *lkb; + unsigned long id; + + /* + * Free all lkb's in xa + */ + xa_for_each(&ls->ls_lkbxa, id, lkb) { + lkb_idr_free(lkb); + } + xa_destroy(&ls->ls_lkbxa); + + /* + * Free all rsb's on rsbtbl + */ + rhashtable_free_and_destroy(&ls->ls_rsbtbl, rhash_free_rsb, NULL); + + kfree(ls); } static int new_lockspace(const char *name, const char *cluster, @@ -412,14 +364,13 @@ static int new_lockspace(const char *name, const char *cluster, int *ops_result, dlm_lockspace_t **lockspace) { struct dlm_ls *ls; - int i, size, error; - int do_unreg = 0; int namelen = strlen(name); + int error; - if (namelen > DLM_LOCKSPACE_LEN) + if (namelen > DLM_LOCKSPACE_LEN || namelen == 0) return -EINVAL; - if (!lvblen || (lvblen % 8)) + if (lvblen % 8) return -EINVAL; if (!try_module_get(THIS_MODULE)) @@ -438,9 +389,14 @@ static int new_lockspace(const char *name, const char *cluster, *ops_result = 0; } + if (!cluster) + log_print("dlm cluster name '%s' is being used without an application provided cluster name", + dlm_config.ci_cluster_name); + if (dlm_config.ci_recover_callbacks && cluster && strncmp(cluster, dlm_config.ci_cluster_name, DLM_LOCKSPACE_LEN)) { - log_print("dlm cluster name %s mismatch %s", + log_print("dlm cluster name '%s' does not match " + "the application cluster name '%s'", dlm_config.ci_cluster_name, cluster); error = -EBADR; goto out; @@ -448,7 +404,7 @@ static int new_lockspace(const char *name, const char *cluster, error = 0; - spin_lock(&lslist_lock); + spin_lock_bh(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { WARN_ON(ls->ls_create_count <= 0); if (ls->ls_namelen != namelen) @@ -464,69 +420,52 @@ static int new_lockspace(const char *name, const char *cluster, error = 1; break; } - spin_unlock(&lslist_lock); + spin_unlock_bh(&lslist_lock); if (error) goto out; error = -ENOMEM; - ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_NOFS); + ls = kzalloc(sizeof(*ls), GFP_NOFS); if (!ls) goto out; memcpy(ls->ls_name, name, namelen); ls->ls_namelen = namelen; ls->ls_lvblen = lvblen; - ls->ls_count = 0; + atomic_set(&ls->ls_count, 0); + init_waitqueue_head(&ls->ls_count_wait); ls->ls_flags = 0; - ls->ls_scan_time = jiffies; if (ops && dlm_config.ci_recover_callbacks) { ls->ls_ops = ops; ls->ls_ops_arg = ops_arg; } - if (flags & DLM_LSFL_TIMEWARN) - set_bit(LSFL_TIMEWARN, &ls->ls_flags); + if (flags & DLM_LSFL_SOFTIRQ) + set_bit(LSFL_SOFTIRQ, &ls->ls_flags); /* ls_exflags are forced to match among nodes, and we don't - need to require all nodes to have some flags set */ - ls->ls_exflags = (flags & ~(DLM_LSFL_TIMEWARN | DLM_LSFL_FS | - DLM_LSFL_NEWEXCL)); + * need to require all nodes to have some flags set + */ + ls->ls_exflags = (flags & ~(DLM_LSFL_FS | DLM_LSFL_NEWEXCL | + DLM_LSFL_SOFTIRQ)); - size = dlm_config.ci_rsbtbl_size; - ls->ls_rsbtbl_size = size; + INIT_LIST_HEAD(&ls->ls_slow_inactive); + INIT_LIST_HEAD(&ls->ls_slow_active); + rwlock_init(&ls->ls_rsbtbl_lock); - ls->ls_rsbtbl = vmalloc(sizeof(struct dlm_rsbtable) * size); - if (!ls->ls_rsbtbl) + error = rhashtable_init(&ls->ls_rsbtbl, &dlm_rhash_rsb_params); + if (error) goto out_lsfree; - for (i = 0; i < size; i++) { - ls->ls_rsbtbl[i].keep.rb_node = NULL; - ls->ls_rsbtbl[i].toss.rb_node = NULL; - spin_lock_init(&ls->ls_rsbtbl[i].lock); - } - spin_lock_init(&ls->ls_remove_spin); - - for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) { - ls->ls_remove_names[i] = kzalloc(DLM_RESNAME_MAXLEN+1, - GFP_KERNEL); - if (!ls->ls_remove_names[i]) - goto out_rsbtbl; - } - - idr_init(&ls->ls_lkbidr); - spin_lock_init(&ls->ls_lkbidr_spin); + xa_init_flags(&ls->ls_lkbxa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_BH); + rwlock_init(&ls->ls_lkbxa_lock); INIT_LIST_HEAD(&ls->ls_waiters); - mutex_init(&ls->ls_waiters_mutex); + spin_lock_init(&ls->ls_waiters_lock); INIT_LIST_HEAD(&ls->ls_orphans); - mutex_init(&ls->ls_orphans_mutex); - INIT_LIST_HEAD(&ls->ls_timeout); - mutex_init(&ls->ls_timeout_mutex); - - INIT_LIST_HEAD(&ls->ls_new_rsb); - spin_lock_init(&ls->ls_new_rsb_spin); + spin_lock_init(&ls->ls_orphans_lock); INIT_LIST_HEAD(&ls->ls_nodes); INIT_LIST_HEAD(&ls->ls_nodes_gone); @@ -535,37 +474,46 @@ static int new_lockspace(const char *name, const char *cluster, ls->ls_total_weight = 0; ls->ls_node_array = NULL; - memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb)); - ls->ls_stub_rsb.res_ls = ls; + memset(&ls->ls_local_rsb, 0, sizeof(struct dlm_rsb)); + ls->ls_local_rsb.res_ls = ls; ls->ls_debug_rsb_dentry = NULL; ls->ls_debug_waiters_dentry = NULL; init_waitqueue_head(&ls->ls_uevent_wait); ls->ls_uevent_result = 0; - init_completion(&ls->ls_members_done); - ls->ls_members_result = -1; + init_completion(&ls->ls_recovery_done); + ls->ls_recovery_result = -1; - mutex_init(&ls->ls_cb_mutex); + spin_lock_init(&ls->ls_cb_lock); INIT_LIST_HEAD(&ls->ls_cb_delay); + INIT_WORK(&ls->ls_free_work, free_lockspace); + ls->ls_recoverd_task = NULL; mutex_init(&ls->ls_recoverd_active); spin_lock_init(&ls->ls_recover_lock); spin_lock_init(&ls->ls_rcom_spin); get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t)); ls->ls_recover_status = 0; - ls->ls_recover_seq = 0; + ls->ls_recover_seq = get_random_u64(); ls->ls_recover_args = NULL; init_rwsem(&ls->ls_in_recovery); - init_rwsem(&ls->ls_recv_active); + rwlock_init(&ls->ls_recv_active); INIT_LIST_HEAD(&ls->ls_requestqueue); - mutex_init(&ls->ls_requestqueue_mutex); - mutex_init(&ls->ls_clear_proc_locks); + rwlock_init(&ls->ls_requestqueue_lock); + spin_lock_init(&ls->ls_clear_proc_locks); - ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS); - if (!ls->ls_recover_buf) - goto out_lkbidr; + /* Due backwards compatibility with 3.1 we need to use maximum + * possible dlm message size to be sure the message will fit and + * not having out of bounds issues. However on sending side 3.2 + * might send less. + */ + ls->ls_recover_buf = kmalloc(DLM_MAX_SOCKET_BUFSIZE, GFP_NOFS); + if (!ls->ls_recover_buf) { + error = -ENOMEM; + goto out_lkbxa; + } ls->ls_slot = 0; ls->ls_num_slots = 0; @@ -574,25 +522,31 @@ static int new_lockspace(const char *name, const char *cluster, INIT_LIST_HEAD(&ls->ls_recover_list); spin_lock_init(&ls->ls_recover_list_lock); - idr_init(&ls->ls_recover_idr); - spin_lock_init(&ls->ls_recover_idr_lock); + xa_init_flags(&ls->ls_recover_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_BH); + spin_lock_init(&ls->ls_recover_xa_lock); ls->ls_recover_list_count = 0; - ls->ls_local_handle = ls; init_waitqueue_head(&ls->ls_wait_general); - INIT_LIST_HEAD(&ls->ls_root_list); - init_rwsem(&ls->ls_root_sem); + INIT_LIST_HEAD(&ls->ls_masters_list); + rwlock_init(&ls->ls_masters_lock); + INIT_LIST_HEAD(&ls->ls_dir_dump_list); + rwlock_init(&ls->ls_dir_dump_lock); + + INIT_LIST_HEAD(&ls->ls_scan_list); + spin_lock_init(&ls->ls_scan_lock); + timer_setup(&ls->ls_scan_timer, dlm_rsb_scan, TIMER_DEFERRABLE); - spin_lock(&lslist_lock); + spin_lock_bh(&lslist_lock); ls->ls_create_count = 1; list_add(&ls->ls_list, &lslist); - spin_unlock(&lslist_lock); + spin_unlock_bh(&lslist_lock); - if (flags & DLM_LSFL_FS) { - error = dlm_callback_start(ls); - if (error) { - log_error(ls, "can't start dlm_callback %d", error); - goto out_delist; - } + if (flags & DLM_LSFL_FS) + set_bit(LSFL_FS, &ls->ls_flags); + + error = dlm_callback_start(ls); + if (error) { + log_error(ls, "can't start dlm_callback %d", error); + goto out_delist; } init_waitqueue_head(&ls->ls_recover_lock_wait); @@ -620,32 +574,30 @@ static int new_lockspace(const char *name, const char *cluster, goto out_recoverd; kobject_uevent(&ls->ls_kobj, KOBJ_ADD); - /* let kobject handle freeing of ls if there's an error */ - do_unreg = 1; - /* This uevent triggers dlm_controld in userspace to add us to the group of nodes that are members of this lockspace (managed by the cluster infrastructure.) Once it's done that, it tells us who the current lockspace members are (via configfs) and then tells the lockspace to start running (via sysfs) in dlm_ls_start(). */ - error = do_uevent(ls, 1); - if (error) + error = do_uevent(ls, 1, 0); + if (error < 0) goto out_recoverd; - wait_for_completion(&ls->ls_members_done); - error = ls->ls_members_result; + /* wait until recovery is successful or failed */ + wait_for_completion(&ls->ls_recovery_done); + error = ls->ls_recovery_result; if (error) goto out_members; dlm_create_debug_file(ls); - log_debug(ls, "join complete"); + log_rinfo(ls, "join complete"); *lockspace = ls; return 0; out_members: - do_uevent(ls, 0); + do_uevent(ls, 0, 0); dlm_clear_members(ls); kfree(ls->ls_node_array); out_recoverd: @@ -653,33 +605,27 @@ static int new_lockspace(const char *name, const char *cluster, out_callback: dlm_callback_stop(ls); out_delist: - spin_lock(&lslist_lock); + spin_lock_bh(&lslist_lock); list_del(&ls->ls_list); - spin_unlock(&lslist_lock); - idr_destroy(&ls->ls_recover_idr); + spin_unlock_bh(&lslist_lock); + xa_destroy(&ls->ls_recover_xa); kfree(ls->ls_recover_buf); - out_lkbidr: - idr_destroy(&ls->ls_lkbidr); - for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) { - if (ls->ls_remove_names[i]) - kfree(ls->ls_remove_names[i]); - } - out_rsbtbl: - vfree(ls->ls_rsbtbl); + out_lkbxa: + xa_destroy(&ls->ls_lkbxa); + rhashtable_destroy(&ls->ls_rsbtbl); out_lsfree: - if (do_unreg) - kobject_put(&ls->ls_kobj); - else - kfree(ls); + kobject_put(&ls->ls_kobj); + kfree(ls); out: module_put(THIS_MODULE); return error; } -int dlm_new_lockspace(const char *name, const char *cluster, - uint32_t flags, int lvblen, - const struct dlm_lockspace_ops *ops, void *ops_arg, - int *ops_result, dlm_lockspace_t **lockspace) +static int __dlm_new_lockspace(const char *name, const char *cluster, + uint32_t flags, int lvblen, + const struct dlm_lockspace_ops *ops, + void *ops_arg, int *ops_result, + dlm_lockspace_t **lockspace) { int error = 0; @@ -695,67 +641,76 @@ int dlm_new_lockspace(const char *name, const char *cluster, ls_count++; if (error > 0) error = 0; - if (!ls_count) - threads_stop(); + if (!ls_count) { + dlm_midcomms_shutdown(); + dlm_midcomms_stop(); + } out: mutex_unlock(&ls_lock); return error; } -static int lkb_idr_is_local(int id, void *p, void *data) +int dlm_new_lockspace(const char *name, const char *cluster, uint32_t flags, + int lvblen, const struct dlm_lockspace_ops *ops, + void *ops_arg, int *ops_result, + dlm_lockspace_t **lockspace) { - struct dlm_lkb *lkb = p; - - if (!lkb->lkb_nodeid) - return 1; - return 0; + return __dlm_new_lockspace(name, cluster, flags | DLM_LSFL_FS, lvblen, + ops, ops_arg, ops_result, lockspace); } -static int lkb_idr_is_any(int id, void *p, void *data) +int dlm_new_user_lockspace(const char *name, const char *cluster, + uint32_t flags, int lvblen, + const struct dlm_lockspace_ops *ops, + void *ops_arg, int *ops_result, + dlm_lockspace_t **lockspace) { - return 1; -} - -static int lkb_idr_free(int id, void *p, void *data) -{ - struct dlm_lkb *lkb = p; - - if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY) - dlm_free_lvb(lkb->lkb_lvbptr); + if (flags & DLM_LSFL_SOFTIRQ) + return -EINVAL; - dlm_free_lkb(lkb); - return 0; + return __dlm_new_lockspace(name, cluster, flags, lvblen, ops, + ops_arg, ops_result, lockspace); } -/* NOTE: We check the lkbidr here rather than the resource table. +/* NOTE: We check the lkbxa here rather than the resource table. This is because there may be LKBs queued as ASTs that have been unlinked from their RSBs and are pending deletion once the AST has been delivered */ -static int lockspace_busy(struct dlm_ls *ls, int force) +static int lockspace_busy(struct dlm_ls *ls, unsigned int release_option) { - int rv; + struct dlm_lkb *lkb; + unsigned long id; + int rv = 0; - spin_lock(&ls->ls_lkbidr_spin); - if (force == 0) { - rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls); - } else if (force == 1) { - rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_local, ls); + read_lock_bh(&ls->ls_lkbxa_lock); + if (release_option == DLM_RELEASE_NO_LOCKS) { + xa_for_each(&ls->ls_lkbxa, id, lkb) { + rv = 1; + break; + } + } else if (release_option == DLM_RELEASE_UNUSED) { + /* TODO: handle this UNUSED option as NO_LOCKS in later patch */ + xa_for_each(&ls->ls_lkbxa, id, lkb) { + if (lkb->lkb_nodeid == 0 && + lkb->lkb_grmode != DLM_LOCK_IV) { + rv = 1; + break; + } + } } else { rv = 0; } - spin_unlock(&ls->ls_lkbidr_spin); + read_unlock_bh(&ls->ls_lkbxa_lock); return rv; } -static int release_lockspace(struct dlm_ls *ls, int force) +static int release_lockspace(struct dlm_ls *ls, unsigned int release_option) { - struct dlm_rsb *rsb; - struct rb_node *n; - int i, busy, rv; + int busy, rv; - busy = lockspace_busy(ls, force); + busy = lockspace_busy(ls, release_option); - spin_lock(&lslist_lock); + spin_lock_bh(&lslist_lock); if (ls->ls_create_count == 1) { if (busy) { rv = -EBUSY; @@ -769,64 +724,45 @@ static int release_lockspace(struct dlm_ls *ls, int force) } else { rv = -EINVAL; } - spin_unlock(&lslist_lock); + spin_unlock_bh(&lslist_lock); if (rv) { log_debug(ls, "release_lockspace no remove %d", rv); return rv; } + if (ls_count == 1) + dlm_midcomms_version_wait(); + dlm_device_deregister(ls); - if (force < 3 && dlm_user_daemon_available()) - do_uevent(ls, 0); + if (release_option != DLM_RELEASE_NO_EVENT && + dlm_user_daemon_available()) + do_uevent(ls, 0, (release_option == DLM_RELEASE_RECOVER)); dlm_recoverd_stop(ls); - dlm_callback_stop(ls); - - remove_lockspace(ls); - - dlm_delete_debug_file(ls); - - kfree(ls->ls_recover_buf); - - /* - * Free all lkb's in idr + /* clear the LSFL_RUNNING flag to fast up + * time_shutdown_sync(), we don't care anymore */ + clear_bit(LSFL_RUNNING, &ls->ls_flags); + timer_shutdown_sync(&ls->ls_scan_timer); - idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls); - idr_destroy(&ls->ls_lkbidr); - - /* - * Free all rsb's on rsbtbl[] lists - */ + if (ls_count == 1) { + dlm_clear_members(ls); + dlm_midcomms_shutdown(); + } - for (i = 0; i < ls->ls_rsbtbl_size; i++) { - while ((n = rb_first(&ls->ls_rsbtbl[i].keep))) { - rsb = rb_entry(n, struct dlm_rsb, res_hashnode); - rb_erase(n, &ls->ls_rsbtbl[i].keep); - dlm_free_rsb(rsb); - } + dlm_callback_stop(ls); - while ((n = rb_first(&ls->ls_rsbtbl[i].toss))) { - rsb = rb_entry(n, struct dlm_rsb, res_hashnode); - rb_erase(n, &ls->ls_rsbtbl[i].toss); - dlm_free_rsb(rsb); - } - } + remove_lockspace(ls); - vfree(ls->ls_rsbtbl); + dlm_delete_debug_file(ls); - for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) - kfree(ls->ls_remove_names[i]); + kobject_put(&ls->ls_kobj); - while (!list_empty(&ls->ls_new_rsb)) { - rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, - res_hashchain); - list_del(&rsb->res_hashchain); - dlm_free_rsb(rsb); - } + xa_destroy(&ls->ls_recover_xa); + kfree(ls->ls_recover_buf); /* * Free structures on any other lists @@ -837,10 +773,11 @@ static int release_lockspace(struct dlm_ls *ls, int force) dlm_clear_members(ls); dlm_clear_members_gone(ls); kfree(ls->ls_node_array); - log_debug(ls, "release_lockspace final free"); - kobject_put(&ls->ls_kobj); - /* The ls structure will be freed when the kobject is done with */ + log_rinfo(ls, "%s final free", __func__); + + /* delayed free of data structures see free_lockspace() */ + queue_work(dlm_wq, &ls->ls_free_work); module_put(THIS_MODULE); return 0; } @@ -852,29 +789,28 @@ static int release_lockspace(struct dlm_ls *ls, int force) * lockspace must continue to function as usual, participating in recoveries, * until this returns. * - * Force has 4 possible values: - * 0 - don't destroy locksapce if it has any LKBs - * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs - * 2 - destroy lockspace regardless of LKBs - * 3 - destroy lockspace as part of a forced shutdown + * See DLM_RELEASE defines for release_option values and their meaning. */ -int dlm_release_lockspace(void *lockspace, int force) +int dlm_release_lockspace(void *lockspace, unsigned int release_option) { struct dlm_ls *ls; int error; + if (release_option > __DLM_RELEASE_MAX) + return -EINVAL; + ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; dlm_put_lockspace(ls); mutex_lock(&ls_lock); - error = release_lockspace(ls, force); + error = release_lockspace(ls, release_option); if (!error) ls_count--; if (!ls_count) - threads_stop(); + dlm_midcomms_stop(); mutex_unlock(&ls_lock); return error; @@ -887,20 +823,19 @@ void dlm_stop_lockspaces(void) restart: count = 0; - spin_lock(&lslist_lock); + spin_lock_bh(&lslist_lock); list_for_each_entry(ls, &lslist, ls_list) { if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) { count++; continue; } - spin_unlock(&lslist_lock); + spin_unlock_bh(&lslist_lock); log_error(ls, "no userland control daemon, stopping lockspace"); dlm_ls_stop(ls); goto restart; } - spin_unlock(&lslist_lock); + spin_unlock_bh(&lslist_lock); if (count) log_print("dlm user daemon left %d lockspaces", count); } - diff --git a/fs/dlm/lockspace.h b/fs/dlm/lockspace.h index f879f87901f8..47ebd4411926 100644 --- a/fs/dlm/lockspace.h +++ b/fs/dlm/lockspace.h @@ -1,12 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -14,6 +12,14 @@ #ifndef __LOCKSPACE_DOT_H__ #define __LOCKSPACE_DOT_H__ +/* DLM_LSFL_FS + * The lockspace user is in the kernel (i.e. filesystem). Enables + * direct bast/cast callbacks. + * + * internal lockspace flag - will be removed in future + */ +#define DLM_LSFL_FS 0x00000004 + int dlm_lockspace_init(void); void dlm_lockspace_exit(void); struct dlm_ls *dlm_find_lockspace_global(uint32_t id); @@ -21,6 +27,11 @@ struct dlm_ls *dlm_find_lockspace_local(void *id); struct dlm_ls *dlm_find_lockspace_device(int minor); void dlm_put_lockspace(struct dlm_ls *ls); void dlm_stop_lockspaces(void); +int dlm_new_user_lockspace(const char *name, const char *cluster, + uint32_t flags, int lvblen, + const struct dlm_lockspace_ops *ops, + void *ops_arg, int *ops_result, + dlm_lockspace_t **lockspace); #endif /* __LOCKSPACE_DOT_H__ */ diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index d90909ec6aa6..b3958008ba3f 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -1,12 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -55,80 +53,75 @@ #include <net/sctp/sctp.h> #include <net/ipv6.h> +#include <trace/events/dlm.h> +#include <trace/events/sock.h> + #include "dlm_internal.h" #include "lowcomms.h" #include "midcomms.h" +#include "memory.h" #include "config.h" +#define DLM_SHUTDOWN_WAIT_TIMEOUT msecs_to_jiffies(5000) +#define DLM_MAX_PROCESS_BUFFERS 24 #define NEEDED_RMEM (4*1024*1024) -#define CONN_HASH_SIZE 32 - -/* Number of messages to send before rescheduling */ -#define MAX_SEND_MSG_COUNT 25 - -struct cbuf { - unsigned int base; - unsigned int len; - unsigned int mask; -}; - -static void cbuf_add(struct cbuf *cb, int n) -{ - cb->len += n; -} - -static int cbuf_data(struct cbuf *cb) -{ - return ((cb->base + cb->len) & cb->mask); -} - -static void cbuf_init(struct cbuf *cb, int size) -{ - cb->base = cb->len = 0; - cb->mask = size-1; -} - -static void cbuf_eat(struct cbuf *cb, int n) -{ - cb->len -= n; - cb->base += n; - cb->base &= cb->mask; -} - -static bool cbuf_empty(struct cbuf *cb) -{ - return cb->len == 0; -} struct connection { struct socket *sock; /* NULL if not connected */ uint32_t nodeid; /* So we know who we are in the list */ - struct mutex sock_mutex; + /* this semaphore is used to allow parallel recv/send in read + * lock mode. When we release a sock we need to held the write lock. + * + * However this is locking code and not nice. When we remove the + * othercon handling we can look into other mechanism to synchronize + * io handling to call sock_release() at the right time. + */ + struct rw_semaphore sock_lock; unsigned long flags; -#define CF_READ_PENDING 1 -#define CF_WRITE_PENDING 2 -#define CF_CONNECT_PENDING 3 -#define CF_INIT_PENDING 4 +#define CF_APP_LIMITED 0 +#define CF_RECV_PENDING 1 +#define CF_SEND_PENDING 2 +#define CF_RECV_INTR 3 +#define CF_IO_STOP 4 #define CF_IS_OTHERCON 5 -#define CF_CLOSE 6 -#define CF_APP_LIMITED 7 struct list_head writequeue; /* List of outgoing writequeue_entries */ spinlock_t writequeue_lock; - int (*rx_action) (struct connection *); /* What to do when active */ - void (*connect_action) (struct connection *); /* What to do to connect */ - struct page *rx_page; - struct cbuf cb; int retries; -#define MAX_CONNECT_RETRIES 3 - int sctp_assoc; struct hlist_node list; + /* due some connect()/accept() races we currently have this cross over + * connection attempt second connection for one node. + * + * There is a solution to avoid the race by introducing a connect + * rule as e.g. our_nodeid > nodeid_to_connect who is allowed to + * connect. Otherside can connect but will only be considered that + * the other side wants to have a reconnect. + * + * However changing to this behaviour will break backwards compatible. + * In a DLM protocol major version upgrade we should remove this! + */ struct connection *othercon; - struct work_struct rwork; /* Receive workqueue */ - struct work_struct swork; /* Send workqueue */ - bool try_new_addr; + struct work_struct rwork; /* receive worker */ + struct work_struct swork; /* send worker */ + wait_queue_head_t shutdown_wait; + unsigned char rx_leftover_buf[DLM_MAX_SOCKET_BUFSIZE]; + int rx_leftover; + int mark; + int addr_count; + int curr_addr_index; + struct sockaddr_storage addr[DLM_MAX_ADDR_COUNT]; + spinlock_t addrs_lock; + struct rcu_head rcu; }; #define sock2con(x) ((struct connection *)(x)->sk_user_data) +struct listen_connection { + struct socket *sock; + struct work_struct rwork; +}; + +#define DLM_WQ_REMAIN_BYTES(e) (PAGE_SIZE - e->end) +#define DLM_WQ_LENGTH_BYTES(e) (e->end - e->offset) + /* An entry waiting to be sent */ struct writequeue_entry { struct list_head list; @@ -137,153 +130,209 @@ struct writequeue_entry { int len; int end; int users; + bool dirty; struct connection *con; + struct list_head msgs; + struct kref ref; }; -struct dlm_node_addr { +struct dlm_msg { + struct writequeue_entry *entry; + struct dlm_msg *orig_msg; + bool retransmit; + void *ppc; + int len; + int idx; /* new()/commit() idx exchange */ + struct list_head list; + struct kref ref; +}; + +struct processqueue_entry { + unsigned char *buf; int nodeid; - int addr_count; - int curr_addr_index; - struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT]; + int buflen; + + struct list_head list; +}; + +struct dlm_proto_ops { + bool try_new_addr; + const char *name; + int proto; + int how; + + void (*sockopts)(struct socket *sock); + int (*bind)(struct socket *sock); + int (*listen_validate)(void); + void (*listen_sockopts)(struct socket *sock); + int (*listen_bind)(struct socket *sock); }; -static LIST_HEAD(dlm_node_addrs); -static DEFINE_SPINLOCK(dlm_node_addrs_spin); +static struct listen_sock_callbacks { + void (*sk_error_report)(struct sock *); + void (*sk_data_ready)(struct sock *); + void (*sk_state_change)(struct sock *); + void (*sk_write_space)(struct sock *); +} listen_sock; -static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT]; +static struct listen_connection listen_con; +static struct sockaddr_storage dlm_local_addr[DLM_MAX_ADDR_COUNT]; static int dlm_local_count; -static int dlm_allow_conn; /* Work queues */ -static struct workqueue_struct *recv_workqueue; -static struct workqueue_struct *send_workqueue; +static struct workqueue_struct *io_workqueue; +static struct workqueue_struct *process_workqueue; static struct hlist_head connection_hash[CONN_HASH_SIZE]; -static DEFINE_MUTEX(connections_lock); -static struct kmem_cache *con_cache; +static DEFINE_SPINLOCK(connections_lock); +DEFINE_STATIC_SRCU(connections_srcu); + +static const struct dlm_proto_ops *dlm_proto_ops; + +#define DLM_IO_SUCCESS 0 +#define DLM_IO_END 1 +#define DLM_IO_EOF 2 +#define DLM_IO_RESCHED 3 +#define DLM_IO_FLUSH 4 static void process_recv_sockets(struct work_struct *work); static void process_send_sockets(struct work_struct *work); +static void process_dlm_messages(struct work_struct *work); +static DECLARE_WORK(process_work, process_dlm_messages); +static DEFINE_SPINLOCK(processqueue_lock); +static bool process_dlm_messages_pending; +static DECLARE_WAIT_QUEUE_HEAD(processqueue_wq); +static atomic_t processqueue_count; +static LIST_HEAD(processqueue); -/* This is deliberately very simple because most clusters have simple - sequential nodeids, so we should be able to go straight to a connection - struct in the array */ -static inline int nodeid_hash(int nodeid) +bool dlm_lowcomms_is_running(void) { - return nodeid & (CONN_HASH_SIZE-1); + return !!listen_con.sock; } -static struct connection *__find_con(int nodeid) +static void lowcomms_queue_swork(struct connection *con) { - int r; - struct connection *con; + assert_spin_locked(&con->writequeue_lock); - r = nodeid_hash(nodeid); - - hlist_for_each_entry(con, &connection_hash[r], list) { - if (con->nodeid == nodeid) - return con; - } - return NULL; + if (!test_bit(CF_IO_STOP, &con->flags) && + !test_bit(CF_APP_LIMITED, &con->flags) && + !test_and_set_bit(CF_SEND_PENDING, &con->flags)) + queue_work(io_workqueue, &con->swork); } -/* - * If 'allocation' is zero then we don't attempt to create a new - * connection structure for this node. - */ -static struct connection *__nodeid2con(int nodeid, gfp_t alloc) +static void lowcomms_queue_rwork(struct connection *con) { - struct connection *con = NULL; - int r; - - con = __find_con(nodeid); - if (con || !alloc) - return con; +#ifdef CONFIG_LOCKDEP + WARN_ON_ONCE(!lockdep_sock_is_held(con->sock->sk)); +#endif - con = kmem_cache_zalloc(con_cache, alloc); - if (!con) - return NULL; - - r = nodeid_hash(nodeid); - hlist_add_head(&con->list, &connection_hash[r]); - - con->nodeid = nodeid; - mutex_init(&con->sock_mutex); - INIT_LIST_HEAD(&con->writequeue); - spin_lock_init(&con->writequeue_lock); - INIT_WORK(&con->swork, process_send_sockets); - INIT_WORK(&con->rwork, process_recv_sockets); - - /* Setup action pointers for child sockets */ - if (con->nodeid) { - struct connection *zerocon = __find_con(0); + if (!test_bit(CF_IO_STOP, &con->flags) && + !test_and_set_bit(CF_RECV_PENDING, &con->flags)) + queue_work(io_workqueue, &con->rwork); +} - con->connect_action = zerocon->connect_action; - if (!con->rx_action) - con->rx_action = zerocon->rx_action; - } +static void writequeue_entry_ctor(void *data) +{ + struct writequeue_entry *entry = data; - return con; + INIT_LIST_HEAD(&entry->msgs); } -/* Loop round all connections */ -static void foreach_conn(void (*conn_func)(struct connection *c)) +struct kmem_cache *dlm_lowcomms_writequeue_cache_create(void) { - int i; - struct hlist_node *n; - struct connection *con; + return kmem_cache_create("dlm_writequeue", sizeof(struct writequeue_entry), + 0, 0, writequeue_entry_ctor); +} - for (i = 0; i < CONN_HASH_SIZE; i++) { - hlist_for_each_entry_safe(con, n, &connection_hash[i], list) - conn_func(con); - } +struct kmem_cache *dlm_lowcomms_msg_cache_create(void) +{ + return KMEM_CACHE(dlm_msg, 0); } -static struct connection *nodeid2con(int nodeid, gfp_t allocation) +/* need to held writequeue_lock */ +static struct writequeue_entry *con_next_wq(struct connection *con) { - struct connection *con; + struct writequeue_entry *e; - mutex_lock(&connections_lock); - con = __nodeid2con(nodeid, allocation); - mutex_unlock(&connections_lock); + e = list_first_entry_or_null(&con->writequeue, struct writequeue_entry, + list); + /* if len is zero nothing is to send, if there are users filling + * buffers we wait until the users are done so we can send more. + */ + if (!e || e->users || e->len == 0) + return NULL; - return con; + return e; } -/* This is a bit drastic, but only called when things go wrong */ -static struct connection *assoc2con(int assoc_id) +static struct connection *__find_con(int nodeid, int r) { - int i; struct connection *con; - mutex_lock(&connections_lock); - - for (i = 0 ; i < CONN_HASH_SIZE; i++) { - hlist_for_each_entry(con, &connection_hash[i], list) { - if (con->sctp_assoc == assoc_id) { - mutex_unlock(&connections_lock); - return con; - } - } + hlist_for_each_entry_rcu(con, &connection_hash[r], list) { + if (con->nodeid == nodeid) + return con; } - mutex_unlock(&connections_lock); + return NULL; } -static struct dlm_node_addr *find_node_addr(int nodeid) +static void dlm_con_init(struct connection *con, int nodeid) +{ + con->nodeid = nodeid; + init_rwsem(&con->sock_lock); + INIT_LIST_HEAD(&con->writequeue); + spin_lock_init(&con->writequeue_lock); + INIT_WORK(&con->swork, process_send_sockets); + INIT_WORK(&con->rwork, process_recv_sockets); + spin_lock_init(&con->addrs_lock); + init_waitqueue_head(&con->shutdown_wait); +} + +/* + * If 'allocation' is zero then we don't attempt to create a new + * connection structure for this node. + */ +static struct connection *nodeid2con(int nodeid, gfp_t alloc) { - struct dlm_node_addr *na; + struct connection *con, *tmp; + int r; - list_for_each_entry(na, &dlm_node_addrs, list) { - if (na->nodeid == nodeid) - return na; + r = nodeid_hash(nodeid); + con = __find_con(nodeid, r); + if (con || !alloc) + return con; + + con = kzalloc(sizeof(*con), alloc); + if (!con) + return NULL; + + dlm_con_init(con, nodeid); + + spin_lock(&connections_lock); + /* Because multiple workqueues/threads calls this function it can + * race on multiple cpu's. Instead of locking hot path __find_con() + * we just check in rare cases of recently added nodes again + * under protection of connections_lock. If this is the case we + * abort our connection creation and return the existing connection. + */ + tmp = __find_con(nodeid, r); + if (tmp) { + spin_unlock(&connections_lock); + kfree(con); + return tmp; } - return NULL; + + hlist_add_head_rcu(&con->list, &connection_hash[r]); + spin_unlock(&connections_lock); + + return con; } -static int addr_compare(struct sockaddr_storage *x, struct sockaddr_storage *y) +static int addr_compare(const struct sockaddr_storage *x, + const struct sockaddr_storage *y) { switch (x->ss_family) { case AF_INET: { @@ -311,41 +360,51 @@ static int addr_compare(struct sockaddr_storage *x, struct sockaddr_storage *y) } static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out, - struct sockaddr *sa_out, bool try_new_addr) + struct sockaddr *sa_out, bool try_new_addr, + unsigned int *mark) { struct sockaddr_storage sas; - struct dlm_node_addr *na; + struct connection *con; + int idx; if (!dlm_local_count) return -1; - spin_lock(&dlm_node_addrs_spin); - na = find_node_addr(nodeid); - if (na && na->addr_count) { - if (try_new_addr) { - na->curr_addr_index++; - if (na->curr_addr_index == na->addr_count) - na->curr_addr_index = 0; - } + idx = srcu_read_lock(&connections_srcu); + con = nodeid2con(nodeid, 0); + if (!con) { + srcu_read_unlock(&connections_srcu, idx); + return -ENOENT; + } - memcpy(&sas, na->addr[na->curr_addr_index ], - sizeof(struct sockaddr_storage)); + spin_lock(&con->addrs_lock); + if (!con->addr_count) { + spin_unlock(&con->addrs_lock); + srcu_read_unlock(&connections_srcu, idx); + return -ENOENT; } - spin_unlock(&dlm_node_addrs_spin); - if (!na) - return -EEXIST; + memcpy(&sas, &con->addr[con->curr_addr_index], + sizeof(struct sockaddr_storage)); - if (!na->addr_count) - return -ENOENT; + if (try_new_addr) { + con->curr_addr_index++; + if (con->curr_addr_index == con->addr_count) + con->curr_addr_index = 0; + } + + *mark = con->mark; + spin_unlock(&con->addrs_lock); if (sas_out) memcpy(sas_out, &sas, sizeof(struct sockaddr_storage)); - if (!sa_out) + if (!sa_out) { + srcu_read_unlock(&connections_srcu, idx); return 0; + } - if (dlm_local_addr[0]->ss_family == AF_INET) { + if (dlm_local_addr[0].ss_family == AF_INET) { struct sockaddr_in *in4 = (struct sockaddr_in *) &sas; struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out; ret4->sin_addr.s_addr = in4->sin_addr.s_addr; @@ -355,594 +414,619 @@ static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out, ret6->sin6_addr = in6->sin6_addr; } + srcu_read_unlock(&connections_srcu, idx); return 0; } -static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid) +static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid, + unsigned int *mark) { - struct dlm_node_addr *na; - int rv = -EEXIST; - int addr_i; - - spin_lock(&dlm_node_addrs_spin); - list_for_each_entry(na, &dlm_node_addrs, list) { - if (!na->addr_count) - continue; + struct connection *con; + int i, idx, addr_i; - for (addr_i = 0; addr_i < na->addr_count; addr_i++) { - if (addr_compare(na->addr[addr_i], addr)) { - *nodeid = na->nodeid; - rv = 0; - goto unlock; + idx = srcu_read_lock(&connections_srcu); + for (i = 0; i < CONN_HASH_SIZE; i++) { + hlist_for_each_entry_rcu(con, &connection_hash[i], list) { + WARN_ON_ONCE(!con->addr_count); + + spin_lock(&con->addrs_lock); + for (addr_i = 0; addr_i < con->addr_count; addr_i++) { + if (addr_compare(&con->addr[addr_i], addr)) { + *nodeid = con->nodeid; + *mark = con->mark; + spin_unlock(&con->addrs_lock); + srcu_read_unlock(&connections_srcu, idx); + return 0; + } } + spin_unlock(&con->addrs_lock); } } -unlock: - spin_unlock(&dlm_node_addrs_spin); - return rv; + srcu_read_unlock(&connections_srcu, idx); + + return -ENOENT; } -int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len) +static bool dlm_lowcomms_con_has_addr(const struct connection *con, + const struct sockaddr_storage *addr) { - struct sockaddr_storage *new_addr; - struct dlm_node_addr *new_node, *na; + int i; - new_node = kzalloc(sizeof(struct dlm_node_addr), GFP_NOFS); - if (!new_node) - return -ENOMEM; + for (i = 0; i < con->addr_count; i++) { + if (addr_compare(&con->addr[i], addr)) + return true; + } + + return false; +} - new_addr = kzalloc(sizeof(struct sockaddr_storage), GFP_NOFS); - if (!new_addr) { - kfree(new_node); +int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr) +{ + struct connection *con; + bool ret; + int idx; + + idx = srcu_read_lock(&connections_srcu); + con = nodeid2con(nodeid, GFP_NOFS); + if (!con) { + srcu_read_unlock(&connections_srcu, idx); return -ENOMEM; } - memcpy(new_addr, addr, len); - - spin_lock(&dlm_node_addrs_spin); - na = find_node_addr(nodeid); - if (!na) { - new_node->nodeid = nodeid; - new_node->addr[0] = new_addr; - new_node->addr_count = 1; - list_add(&new_node->list, &dlm_node_addrs); - spin_unlock(&dlm_node_addrs_spin); + spin_lock(&con->addrs_lock); + if (!con->addr_count) { + memcpy(&con->addr[0], addr, sizeof(*addr)); + con->addr_count = 1; + con->mark = dlm_config.ci_mark; + spin_unlock(&con->addrs_lock); + srcu_read_unlock(&connections_srcu, idx); return 0; } - if (na->addr_count >= DLM_MAX_ADDR_COUNT) { - spin_unlock(&dlm_node_addrs_spin); - kfree(new_addr); - kfree(new_node); + ret = dlm_lowcomms_con_has_addr(con, addr); + if (ret) { + spin_unlock(&con->addrs_lock); + srcu_read_unlock(&connections_srcu, idx); + return -EEXIST; + } + + if (con->addr_count >= DLM_MAX_ADDR_COUNT) { + spin_unlock(&con->addrs_lock); + srcu_read_unlock(&connections_srcu, idx); return -ENOSPC; } - na->addr[na->addr_count++] = new_addr; - spin_unlock(&dlm_node_addrs_spin); - kfree(new_node); + memcpy(&con->addr[con->addr_count++], addr, sizeof(*addr)); + srcu_read_unlock(&connections_srcu, idx); + spin_unlock(&con->addrs_lock); return 0; } /* Data available on socket or listen socket received a connect */ -static void lowcomms_data_ready(struct sock *sk, int count_unused) +static void lowcomms_data_ready(struct sock *sk) { struct connection *con = sock2con(sk); - if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags)) - queue_work(recv_workqueue, &con->rwork); + + trace_sk_data_ready(sk); + + set_bit(CF_RECV_INTR, &con->flags); + lowcomms_queue_rwork(con); } static void lowcomms_write_space(struct sock *sk) { struct connection *con = sock2con(sk); - if (!con) - return; - clear_bit(SOCK_NOSPACE, &con->sock->flags); + spin_lock_bh(&con->writequeue_lock); if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) { con->sock->sk->sk_write_pending--; - clear_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags); + clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags); } - if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) - queue_work(send_workqueue, &con->swork); + lowcomms_queue_swork(con); + spin_unlock_bh(&con->writequeue_lock); } -static inline void lowcomms_connect_sock(struct connection *con) +static void lowcomms_state_change(struct sock *sk) { - if (test_bit(CF_CLOSE, &con->flags)) - return; - if (!test_and_set_bit(CF_CONNECT_PENDING, &con->flags)) - queue_work(send_workqueue, &con->swork); + /* SCTP layer is not calling sk_data_ready when the connection + * is done, so we catch the signal through here. + */ + if (sk->sk_shutdown & RCV_SHUTDOWN) + lowcomms_data_ready(sk); } -static void lowcomms_state_change(struct sock *sk) +static void lowcomms_listen_data_ready(struct sock *sk) { - if (sk->sk_state == TCP_ESTABLISHED) - lowcomms_write_space(sk); + trace_sk_data_ready(sk); + + queue_work(io_workqueue, &listen_con.rwork); } int dlm_lowcomms_connect_node(int nodeid) { struct connection *con; + int idx; - /* with sctp there's no connecting without sending */ - if (dlm_config.ci_protocol != 0) - return 0; + idx = srcu_read_lock(&connections_srcu); + con = nodeid2con(nodeid, 0); + if (WARN_ON_ONCE(!con)) { + srcu_read_unlock(&connections_srcu, idx); + return -ENOENT; + } - if (nodeid == dlm_our_nodeid()) - return 0; + down_read(&con->sock_lock); + if (!con->sock) { + spin_lock_bh(&con->writequeue_lock); + lowcomms_queue_swork(con); + spin_unlock_bh(&con->writequeue_lock); + } + up_read(&con->sock_lock); + srcu_read_unlock(&connections_srcu, idx); - con = nodeid2con(nodeid, GFP_NOFS); - if (!con) - return -ENOMEM; - lowcomms_connect_sock(con); + cond_resched(); return 0; } +int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark) +{ + struct connection *con; + int idx; + + idx = srcu_read_lock(&connections_srcu); + con = nodeid2con(nodeid, 0); + if (!con) { + srcu_read_unlock(&connections_srcu, idx); + return -ENOENT; + } + + spin_lock(&con->addrs_lock); + con->mark = mark; + spin_unlock(&con->addrs_lock); + srcu_read_unlock(&connections_srcu, idx); + return 0; +} + +static void lowcomms_error_report(struct sock *sk) +{ + struct connection *con = sock2con(sk); + struct inet_sock *inet; + + inet = inet_sk(sk); + switch (sk->sk_family) { + case AF_INET: + printk_ratelimited(KERN_ERR "dlm: node %d: socket error " + "sending to node %d at %pI4, dport %d, " + "sk_err=%d/%d\n", dlm_our_nodeid(), + con->nodeid, &inet->inet_daddr, + ntohs(inet->inet_dport), sk->sk_err, + READ_ONCE(sk->sk_err_soft)); + break; +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: + printk_ratelimited(KERN_ERR "dlm: node %d: socket error " + "sending to node %d at %pI6c, " + "dport %d, sk_err=%d/%d\n", dlm_our_nodeid(), + con->nodeid, &sk->sk_v6_daddr, + ntohs(inet->inet_dport), sk->sk_err, + READ_ONCE(sk->sk_err_soft)); + break; +#endif + default: + printk_ratelimited(KERN_ERR "dlm: node %d: socket error " + "invalid socket family %d set, " + "sk_err=%d/%d\n", dlm_our_nodeid(), + sk->sk_family, sk->sk_err, + READ_ONCE(sk->sk_err_soft)); + break; + } + + dlm_midcomms_unack_msg_resend(con->nodeid); + + listen_sock.sk_error_report(sk); +} + +static void restore_callbacks(struct sock *sk) +{ +#ifdef CONFIG_LOCKDEP + WARN_ON_ONCE(!lockdep_sock_is_held(sk)); +#endif + + sk->sk_user_data = NULL; + sk->sk_data_ready = listen_sock.sk_data_ready; + sk->sk_state_change = listen_sock.sk_state_change; + sk->sk_write_space = listen_sock.sk_write_space; + sk->sk_error_report = listen_sock.sk_error_report; +} + /* Make a socket active */ static void add_sock(struct socket *sock, struct connection *con) { + struct sock *sk = sock->sk; + + lock_sock(sk); con->sock = sock; - /* Install a data_ready callback */ - con->sock->sk->sk_data_ready = lowcomms_data_ready; - con->sock->sk->sk_write_space = lowcomms_write_space; - con->sock->sk->sk_state_change = lowcomms_state_change; - con->sock->sk->sk_user_data = con; - con->sock->sk->sk_allocation = GFP_NOFS; + sk->sk_user_data = con; + sk->sk_data_ready = lowcomms_data_ready; + sk->sk_write_space = lowcomms_write_space; + if (dlm_config.ci_protocol == DLM_PROTO_SCTP) + sk->sk_state_change = lowcomms_state_change; + sk->sk_allocation = GFP_NOFS; + sk->sk_use_task_frag = false; + sk->sk_error_report = lowcomms_error_report; + release_sock(sk); } /* Add the port number to an IPv6 or 4 sockaddr and return the address length */ -static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port, +static void make_sockaddr(struct sockaddr_storage *saddr, __be16 port, int *addr_len) { - saddr->ss_family = dlm_local_addr[0]->ss_family; + saddr->ss_family = dlm_local_addr[0].ss_family; if (saddr->ss_family == AF_INET) { struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr; - in4_addr->sin_port = cpu_to_be16(port); + in4_addr->sin_port = port; *addr_len = sizeof(struct sockaddr_in); memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero)); } else { struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr; - in6_addr->sin6_port = cpu_to_be16(port); + in6_addr->sin6_port = port; *addr_len = sizeof(struct sockaddr_in6); } memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len); } -/* Close a remote connection and tidy up */ -static void close_connection(struct connection *con, bool and_other) +static void dlm_page_release(struct kref *kref) { - mutex_lock(&con->sock_mutex); + struct writequeue_entry *e = container_of(kref, struct writequeue_entry, + ref); - if (con->sock) { - sock_release(con->sock); - con->sock = NULL; - } - if (con->othercon && and_other) { - /* Will only re-enter once. */ - close_connection(con->othercon, false); - } - if (con->rx_page) { - __free_page(con->rx_page); - con->rx_page = NULL; - } - - con->retries = 0; - mutex_unlock(&con->sock_mutex); + __free_page(e->page); + dlm_free_writequeue(e); } -/* We only send shutdown messages to nodes that are not part of the cluster */ -static void sctp_send_shutdown(sctp_assoc_t associd) +static void dlm_msg_release(struct kref *kref) { - static char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))]; - struct msghdr outmessage; - struct cmsghdr *cmsg; - struct sctp_sndrcvinfo *sinfo; - int ret; - struct connection *con; + struct dlm_msg *msg = container_of(kref, struct dlm_msg, ref); - con = nodeid2con(0,0); - BUG_ON(con == NULL); + kref_put(&msg->entry->ref, dlm_page_release); + dlm_free_msg(msg); +} + +static void free_entry(struct writequeue_entry *e) +{ + struct dlm_msg *msg, *tmp; - outmessage.msg_name = NULL; - outmessage.msg_namelen = 0; - outmessage.msg_control = outcmsg; - outmessage.msg_controllen = sizeof(outcmsg); - outmessage.msg_flags = MSG_EOR; + list_for_each_entry_safe(msg, tmp, &e->msgs, list) { + if (msg->orig_msg) { + msg->orig_msg->retransmit = false; + kref_put(&msg->orig_msg->ref, dlm_msg_release); + } - cmsg = CMSG_FIRSTHDR(&outmessage); - cmsg->cmsg_level = IPPROTO_SCTP; - cmsg->cmsg_type = SCTP_SNDRCV; - cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); - outmessage.msg_controllen = cmsg->cmsg_len; - sinfo = CMSG_DATA(cmsg); - memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); + list_del(&msg->list); + kref_put(&msg->ref, dlm_msg_release); + } - sinfo->sinfo_flags |= MSG_EOF; - sinfo->sinfo_assoc_id = associd; + list_del(&e->list); + kref_put(&e->ref, dlm_page_release); +} - ret = kernel_sendmsg(con->sock, &outmessage, NULL, 0, 0); +static void dlm_close_sock(struct socket **sock) +{ + lock_sock((*sock)->sk); + restore_callbacks((*sock)->sk); + release_sock((*sock)->sk); - if (ret != 0) - log_print("send EOF to node failed: %d", ret); + sock_release(*sock); + *sock = NULL; } -static void sctp_init_failed_foreach(struct connection *con) +static void allow_connection_io(struct connection *con) { + if (con->othercon) + clear_bit(CF_IO_STOP, &con->othercon->flags); + clear_bit(CF_IO_STOP, &con->flags); +} - /* - * Don't try to recover base con and handle race where the - * other node's assoc init creates a assoc and we get that - * notification, then we get a notification that our attempt - * failed due. This happens when we are still trying the primary - * address, but the other node has already tried secondary addrs - * and found one that worked. - */ - if (!con->nodeid || con->sctp_assoc) - return; +static void stop_connection_io(struct connection *con) +{ + if (con->othercon) + stop_connection_io(con->othercon); - log_print("Retrying SCTP association init for node %d\n", con->nodeid); + spin_lock_bh(&con->writequeue_lock); + set_bit(CF_IO_STOP, &con->flags); + spin_unlock_bh(&con->writequeue_lock); - con->try_new_addr = true; - con->sctp_assoc = 0; - if (test_and_clear_bit(CF_INIT_PENDING, &con->flags)) { - if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) - queue_work(send_workqueue, &con->swork); + down_write(&con->sock_lock); + if (con->sock) { + lock_sock(con->sock->sk); + restore_callbacks(con->sock->sk); + release_sock(con->sock->sk); } + up_write(&con->sock_lock); + + cancel_work_sync(&con->swork); + cancel_work_sync(&con->rwork); } -/* INIT failed but we don't know which node... - restart INIT on all pending nodes */ -static void sctp_init_failed(void) +/* Close a remote connection and tidy up */ +static void close_connection(struct connection *con, bool and_other) { - mutex_lock(&connections_lock); + struct writequeue_entry *e; + + if (con->othercon && and_other) + close_connection(con->othercon, false); - foreach_conn(sctp_init_failed_foreach); + down_write(&con->sock_lock); + if (!con->sock) { + up_write(&con->sock_lock); + return; + } + + dlm_close_sock(&con->sock); + + /* if we send a writequeue entry only a half way, we drop the + * whole entry because reconnection and that we not start of the + * middle of a msg which will confuse the other end. + * + * we can always drop messages because retransmits, but what we + * cannot allow is to transmit half messages which may be processed + * at the other side. + * + * our policy is to start on a clean state when disconnects, we don't + * know what's send/received on transport layer in this case. + */ + spin_lock_bh(&con->writequeue_lock); + if (!list_empty(&con->writequeue)) { + e = list_first_entry(&con->writequeue, struct writequeue_entry, + list); + if (e->dirty) + free_entry(e); + } + spin_unlock_bh(&con->writequeue_lock); - mutex_unlock(&connections_lock); + con->rx_leftover = 0; + con->retries = 0; + clear_bit(CF_APP_LIMITED, &con->flags); + clear_bit(CF_RECV_PENDING, &con->flags); + clear_bit(CF_SEND_PENDING, &con->flags); + up_write(&con->sock_lock); } -static void retry_failed_sctp_send(struct connection *recv_con, - struct sctp_send_failed *sn_send_failed, - char *buf) +static void shutdown_connection(struct connection *con, bool and_other) { - int len = sn_send_failed->ssf_length - sizeof(struct sctp_send_failed); - struct dlm_mhandle *mh; - struct connection *con; - char *retry_buf; - int nodeid = sn_send_failed->ssf_info.sinfo_ppid; + int ret; - log_print("Retry sending %d bytes to node id %d", len, nodeid); + if (con->othercon && and_other) + shutdown_connection(con->othercon, false); - con = nodeid2con(nodeid, 0); - if (!con) { - log_print("Could not look up con for nodeid %d\n", - nodeid); + flush_workqueue(io_workqueue); + down_read(&con->sock_lock); + /* nothing to shutdown */ + if (!con->sock) { + up_read(&con->sock_lock); return; } - mh = dlm_lowcomms_get_buffer(nodeid, len, GFP_NOFS, &retry_buf); - if (!mh) { - log_print("Could not allocate buf for retry."); - return; + ret = kernel_sock_shutdown(con->sock, dlm_proto_ops->how); + up_read(&con->sock_lock); + if (ret) { + log_print("Connection %p failed to shutdown: %d will force close", + con, ret); + goto force_close; + } else { + ret = wait_event_timeout(con->shutdown_wait, !con->sock, + DLM_SHUTDOWN_WAIT_TIMEOUT); + if (ret == 0) { + log_print("Connection %p shutdown timed out, will force close", + con); + goto force_close; + } } - memcpy(retry_buf, buf + sizeof(struct sctp_send_failed), len); - dlm_lowcomms_commit_buffer(mh); - /* - * If we got a assoc changed event before the send failed event then - * we only need to retry the send. - */ - if (con->sctp_assoc) { - if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) - queue_work(send_workqueue, &con->swork); - } else - sctp_init_failed_foreach(con); + return; + +force_close: + close_connection(con, false); } -/* Something happened to an association */ -static void process_sctp_notification(struct connection *con, - struct msghdr *msg, char *buf) +static struct processqueue_entry *new_processqueue_entry(int nodeid, + int buflen) { - union sctp_notification *sn = (union sctp_notification *)buf; + struct processqueue_entry *pentry; - switch (sn->sn_header.sn_type) { - case SCTP_SEND_FAILED: - retry_failed_sctp_send(con, &sn->sn_send_failed, buf); - break; - case SCTP_ASSOC_CHANGE: - switch (sn->sn_assoc_change.sac_state) { - case SCTP_COMM_UP: - case SCTP_RESTART: - { - /* Check that the new node is in the lockspace */ - struct sctp_prim prim; - int nodeid; - int prim_len, ret; - int addr_len; - struct connection *new_con; - - /* - * We get this before any data for an association. - * We verify that the node is in the cluster and - * then peel off a socket for it. - */ - if ((int)sn->sn_assoc_change.sac_assoc_id <= 0) { - log_print("COMM_UP for invalid assoc ID %d", - (int)sn->sn_assoc_change.sac_assoc_id); - sctp_init_failed(); - return; - } - memset(&prim, 0, sizeof(struct sctp_prim)); - prim_len = sizeof(struct sctp_prim); - prim.ssp_assoc_id = sn->sn_assoc_change.sac_assoc_id; - - ret = kernel_getsockopt(con->sock, - IPPROTO_SCTP, - SCTP_PRIMARY_ADDR, - (char*)&prim, - &prim_len); - if (ret < 0) { - log_print("getsockopt/sctp_primary_addr on " - "new assoc %d failed : %d", - (int)sn->sn_assoc_change.sac_assoc_id, - ret); - - /* Retry INIT later */ - new_con = assoc2con(sn->sn_assoc_change.sac_assoc_id); - if (new_con) - clear_bit(CF_CONNECT_PENDING, &con->flags); - return; - } - make_sockaddr(&prim.ssp_addr, 0, &addr_len); - if (addr_to_nodeid(&prim.ssp_addr, &nodeid)) { - unsigned char *b=(unsigned char *)&prim.ssp_addr; - log_print("reject connect from unknown addr"); - print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE, - b, sizeof(struct sockaddr_storage)); - sctp_send_shutdown(prim.ssp_assoc_id); - return; - } + pentry = kmalloc(sizeof(*pentry), GFP_NOFS); + if (!pentry) + return NULL; - new_con = nodeid2con(nodeid, GFP_NOFS); - if (!new_con) - return; + pentry->buf = kmalloc(buflen, GFP_NOFS); + if (!pentry->buf) { + kfree(pentry); + return NULL; + } - /* Peel off a new sock */ - sctp_lock_sock(con->sock->sk); - ret = sctp_do_peeloff(con->sock->sk, - sn->sn_assoc_change.sac_assoc_id, - &new_con->sock); - sctp_release_sock(con->sock->sk); - if (ret < 0) { - log_print("Can't peel off a socket for " - "connection %d to node %d: err=%d", - (int)sn->sn_assoc_change.sac_assoc_id, - nodeid, ret); - return; - } - add_sock(new_con->sock, new_con); - - log_print("connecting to %d sctp association %d", - nodeid, (int)sn->sn_assoc_change.sac_assoc_id); - - new_con->sctp_assoc = sn->sn_assoc_change.sac_assoc_id; - new_con->try_new_addr = false; - /* Send any pending writes */ - clear_bit(CF_CONNECT_PENDING, &new_con->flags); - clear_bit(CF_INIT_PENDING, &new_con->flags); - if (!test_and_set_bit(CF_WRITE_PENDING, &new_con->flags)) { - queue_work(send_workqueue, &new_con->swork); - } - if (!test_and_set_bit(CF_READ_PENDING, &new_con->flags)) - queue_work(recv_workqueue, &new_con->rwork); - } - break; + pentry->nodeid = nodeid; + return pentry; +} - case SCTP_COMM_LOST: - case SCTP_SHUTDOWN_COMP: - { - con = assoc2con(sn->sn_assoc_change.sac_assoc_id); - if (con) { - con->sctp_assoc = 0; - } - } - break; +static void free_processqueue_entry(struct processqueue_entry *pentry) +{ + kfree(pentry->buf); + kfree(pentry); +} - case SCTP_CANT_STR_ASSOC: - { - /* Will retry init when we get the send failed notification */ - log_print("Can't start SCTP association - retrying"); - } - break; +static void process_dlm_messages(struct work_struct *work) +{ + struct processqueue_entry *pentry; + + spin_lock_bh(&processqueue_lock); + pentry = list_first_entry_or_null(&processqueue, + struct processqueue_entry, list); + if (WARN_ON_ONCE(!pentry)) { + process_dlm_messages_pending = false; + spin_unlock_bh(&processqueue_lock); + return; + } - default: - log_print("unexpected SCTP assoc change id=%d state=%d", - (int)sn->sn_assoc_change.sac_assoc_id, - sn->sn_assoc_change.sac_state); + list_del(&pentry->list); + if (atomic_dec_and_test(&processqueue_count)) + wake_up(&processqueue_wq); + spin_unlock_bh(&processqueue_lock); + + for (;;) { + dlm_process_incoming_buffer(pentry->nodeid, pentry->buf, + pentry->buflen); + free_processqueue_entry(pentry); + + spin_lock_bh(&processqueue_lock); + pentry = list_first_entry_or_null(&processqueue, + struct processqueue_entry, list); + if (!pentry) { + process_dlm_messages_pending = false; + spin_unlock_bh(&processqueue_lock); + break; } - default: - ; /* fall through */ + + list_del(&pentry->list); + if (atomic_dec_and_test(&processqueue_count)) + wake_up(&processqueue_wq); + spin_unlock_bh(&processqueue_lock); } } /* Data received from remote end */ -static int receive_from_sock(struct connection *con) +static int receive_from_sock(struct connection *con, int buflen) { - int ret = 0; - struct msghdr msg = {}; - struct kvec iov[2]; - unsigned len; - int r; - int call_again_soon = 0; - int nvec; - char incmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))]; - - mutex_lock(&con->sock_mutex); + struct processqueue_entry *pentry; + int ret, buflen_real; + struct msghdr msg; + struct kvec iov; - if (con->sock == NULL) { - ret = -EAGAIN; - goto out_close; - } - - if (con->rx_page == NULL) { - /* - * This doesn't need to be atomic, but I think it should - * improve performance if it is. - */ - con->rx_page = alloc_page(GFP_ATOMIC); - if (con->rx_page == NULL) - goto out_resched; - cbuf_init(&con->cb, PAGE_CACHE_SIZE); - } + pentry = new_processqueue_entry(con->nodeid, buflen); + if (!pentry) + return DLM_IO_RESCHED; - /* Only SCTP needs these really */ - memset(&incmsg, 0, sizeof(incmsg)); - msg.msg_control = incmsg; - msg.msg_controllen = sizeof(incmsg); + memcpy(pentry->buf, con->rx_leftover_buf, con->rx_leftover); - /* - * iov[0] is the bit of the circular buffer between the current end - * point (cb.base + cb.len) and the end of the buffer. - */ - iov[0].iov_len = con->cb.base - cbuf_data(&con->cb); - iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb); - iov[1].iov_len = 0; - nvec = 1; - - /* - * iov[1] is the bit of the circular buffer between the start of the - * buffer and the start of the currently used section (cb.base) + /* calculate new buffer parameter regarding last receive and + * possible leftover bytes */ - if (cbuf_data(&con->cb) >= con->cb.base) { - iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb); - iov[1].iov_len = con->cb.base; - iov[1].iov_base = page_address(con->rx_page); - nvec = 2; - } - len = iov[0].iov_len + iov[1].iov_len; - - r = ret = kernel_recvmsg(con->sock, &msg, iov, nvec, len, - MSG_DONTWAIT | MSG_NOSIGNAL); - if (ret <= 0) - goto out_close; - - /* Process SCTP notifications */ - if (msg.msg_flags & MSG_NOTIFICATION) { - msg.msg_control = incmsg; - msg.msg_controllen = sizeof(incmsg); - - process_sctp_notification(con, &msg, - page_address(con->rx_page) + con->cb.base); - mutex_unlock(&con->sock_mutex); - return 0; - } - BUG_ON(con->nodeid == 0); - - if (ret == len) - call_again_soon = 1; - cbuf_add(&con->cb, ret); - ret = dlm_process_incoming_buffer(con->nodeid, - page_address(con->rx_page), - con->cb.base, con->cb.len, - PAGE_CACHE_SIZE); - if (ret == -EBADMSG) { - log_print("lowcomms: addr=%p, base=%u, len=%u, " - "iov_len=%u, iov_base[0]=%p, read=%d", - page_address(con->rx_page), con->cb.base, con->cb.len, - len, iov[0].iov_base, r); - } - if (ret < 0) - goto out_close; - cbuf_eat(&con->cb, ret); + iov.iov_base = pentry->buf + con->rx_leftover; + iov.iov_len = buflen - con->rx_leftover; + + memset(&msg, 0, sizeof(msg)); + msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; + clear_bit(CF_RECV_INTR, &con->flags); +again: + ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len, + msg.msg_flags); + trace_dlm_recv(con->nodeid, ret); + if (ret == -EAGAIN) { + lock_sock(con->sock->sk); + if (test_and_clear_bit(CF_RECV_INTR, &con->flags)) { + release_sock(con->sock->sk); + goto again; + } - if (cbuf_empty(&con->cb) && !call_again_soon) { - __free_page(con->rx_page); - con->rx_page = NULL; + clear_bit(CF_RECV_PENDING, &con->flags); + release_sock(con->sock->sk); + free_processqueue_entry(pentry); + return DLM_IO_END; + } else if (ret == 0) { + /* close will clear CF_RECV_PENDING */ + free_processqueue_entry(pentry); + return DLM_IO_EOF; + } else if (ret < 0) { + free_processqueue_entry(pentry); + return ret; + } + + /* new buflen according readed bytes and leftover from last receive */ + buflen_real = ret + con->rx_leftover; + ret = dlm_validate_incoming_buffer(con->nodeid, pentry->buf, + buflen_real); + if (ret < 0) { + free_processqueue_entry(pentry); + return ret; } - if (call_again_soon) - goto out_resched; - mutex_unlock(&con->sock_mutex); - return 0; + pentry->buflen = ret; -out_resched: - if (!test_and_set_bit(CF_READ_PENDING, &con->flags)) - queue_work(recv_workqueue, &con->rwork); - mutex_unlock(&con->sock_mutex); - return -EAGAIN; + /* calculate leftover bytes from process and put it into begin of + * the receive buffer, so next receive we have the full message + * at the start address of the receive buffer. + */ + con->rx_leftover = buflen_real - ret; + memmove(con->rx_leftover_buf, pentry->buf + ret, + con->rx_leftover); -out_close: - mutex_unlock(&con->sock_mutex); - if (ret != -EAGAIN) { - close_connection(con, false); - /* Reconnect when there is something to send */ + spin_lock_bh(&processqueue_lock); + ret = atomic_inc_return(&processqueue_count); + list_add_tail(&pentry->list, &processqueue); + if (!process_dlm_messages_pending) { + process_dlm_messages_pending = true; + queue_work(process_workqueue, &process_work); } - /* Don't return success if we really got EOF */ - if (ret == 0) - ret = -EAGAIN; + spin_unlock_bh(&processqueue_lock); + + if (ret > DLM_MAX_PROCESS_BUFFERS) + return DLM_IO_FLUSH; - return ret; + return DLM_IO_SUCCESS; } /* Listening socket is busy, accept a connection */ -static int tcp_accept_from_sock(struct connection *con) +static int accept_from_sock(void) { - int result; struct sockaddr_storage peeraddr; - struct socket *newsock; - int len; - int nodeid; + int len, idx, result, nodeid; struct connection *newcon; - struct connection *addcon; - - mutex_lock(&connections_lock); - if (!dlm_allow_conn) { - mutex_unlock(&connections_lock); - return -1; - } - mutex_unlock(&connections_lock); - - memset(&peeraddr, 0, sizeof(peeraddr)); - result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM, - IPPROTO_TCP, &newsock); - if (result < 0) - return -ENOMEM; - - mutex_lock_nested(&con->sock_mutex, 0); - - result = -ENOTCONN; - if (con->sock == NULL) - goto accept_err; - - newsock->type = con->sock->type; - newsock->ops = con->sock->ops; + struct socket *newsock; + unsigned int mark; - result = con->sock->ops->accept(con->sock, newsock, O_NONBLOCK); - if (result < 0) + result = kernel_accept(listen_con.sock, &newsock, O_NONBLOCK); + if (result == -EAGAIN) + return DLM_IO_END; + else if (result < 0) goto accept_err; /* Get the connected socket's peer */ memset(&peeraddr, 0, sizeof(peeraddr)); - if (newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, - &len, 2)) { + len = newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 2); + if (len < 0) { result = -ECONNABORTED; goto accept_err; } /* Get the new node's NODEID */ make_sockaddr(&peeraddr, 0, &len); - if (addr_to_nodeid(&peeraddr, &nodeid)) { - unsigned char *b=(unsigned char *)&peeraddr; - log_print("connect from non cluster node"); - print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE, - b, sizeof(struct sockaddr_storage)); + if (addr_to_nodeid(&peeraddr, &nodeid, &mark)) { + switch (peeraddr.ss_family) { + case AF_INET: { + struct sockaddr_in *sin = (struct sockaddr_in *)&peeraddr; + + log_print("connect from non cluster IPv4 node %pI4", + &sin->sin_addr); + break; + } +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: { + struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&peeraddr; + + log_print("connect from non cluster IPv6 node %pI6c", + &sin6->sin6_addr); + break; + } +#endif + default: + log_print("invalid family from non cluster node"); + break; + } + sock_release(newsock); - mutex_unlock(&con->sock_mutex); return -1; } @@ -953,79 +1037,71 @@ static int tcp_accept_from_sock(struct connection *con) * the same time and the connections cross on the wire. * In this case we store the incoming one in "othercon" */ - newcon = nodeid2con(nodeid, GFP_NOFS); - if (!newcon) { - result = -ENOMEM; + idx = srcu_read_lock(&connections_srcu); + newcon = nodeid2con(nodeid, 0); + if (WARN_ON_ONCE(!newcon)) { + srcu_read_unlock(&connections_srcu, idx); + result = -ENOENT; goto accept_err; } - mutex_lock_nested(&newcon->sock_mutex, 1); + + sock_set_mark(newsock->sk, mark); + + down_write(&newcon->sock_lock); if (newcon->sock) { struct connection *othercon = newcon->othercon; if (!othercon) { - othercon = kmem_cache_zalloc(con_cache, GFP_NOFS); + othercon = kzalloc(sizeof(*othercon), GFP_NOFS); if (!othercon) { log_print("failed to allocate incoming socket"); - mutex_unlock(&newcon->sock_mutex); + up_write(&newcon->sock_lock); + srcu_read_unlock(&connections_srcu, idx); result = -ENOMEM; goto accept_err; } - othercon->nodeid = nodeid; - othercon->rx_action = receive_from_sock; - mutex_init(&othercon->sock_mutex); - INIT_WORK(&othercon->swork, process_send_sockets); - INIT_WORK(&othercon->rwork, process_recv_sockets); - set_bit(CF_IS_OTHERCON, &othercon->flags); - } - if (!othercon->sock) { + + dlm_con_init(othercon, nodeid); + lockdep_set_subclass(&othercon->sock_lock, 1); newcon->othercon = othercon; - othercon->sock = newsock; - newsock->sk->sk_user_data = othercon; - add_sock(newsock, othercon); - addcon = othercon; - } - else { - printk("Extra connection from node %d attempted\n", nodeid); - result = -EAGAIN; - mutex_unlock(&newcon->sock_mutex); - goto accept_err; + set_bit(CF_IS_OTHERCON, &othercon->flags); + } else { + /* close other sock con if we have something new */ + close_connection(othercon, false); } + + down_write(&othercon->sock_lock); + add_sock(newsock, othercon); + + /* check if we receved something while adding */ + lock_sock(othercon->sock->sk); + lowcomms_queue_rwork(othercon); + release_sock(othercon->sock->sk); + up_write(&othercon->sock_lock); } else { - newsock->sk->sk_user_data = newcon; - newcon->rx_action = receive_from_sock; + /* accept copies the sk after we've saved the callbacks, so we + don't want to save them a second time or comm errors will + result in calling sk_error_report recursively. */ add_sock(newsock, newcon); - addcon = newcon; - } - mutex_unlock(&newcon->sock_mutex); + /* check if we receved something while adding */ + lock_sock(newcon->sock->sk); + lowcomms_queue_rwork(newcon); + release_sock(newcon->sock->sk); + } + up_write(&newcon->sock_lock); + srcu_read_unlock(&connections_srcu, idx); - /* - * Add it to the active queue in case we got data - * between processing the accept adding the socket - * to the read_sockets list - */ - if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags)) - queue_work(recv_workqueue, &addcon->rwork); - mutex_unlock(&con->sock_mutex); - - return 0; + return DLM_IO_SUCCESS; accept_err: - mutex_unlock(&con->sock_mutex); - sock_release(newsock); + if (newsock) + sock_release(newsock); - if (result != -EAGAIN) - log_print("error accepting connection from node: %d", result); return result; } -static void free_entry(struct writequeue_entry *e) -{ - __free_page(e->page); - kfree(e); -} - /* * writequeue_entry_complete - try to delete and free write queue entry * @e: write queue entry to try to delete @@ -1037,727 +1113,803 @@ static void writequeue_entry_complete(struct writequeue_entry *e, int completed) { e->offset += completed; e->len -= completed; + /* signal that page was half way transmitted */ + e->dirty = true; - if (e->len == 0 && e->users == 0) { - list_del(&e->list); + if (e->len == 0 && e->users == 0) free_entry(e); - } } -/* Initiate an SCTP association. - This is a special case of send_to_sock() in that we don't yet have a - peeled-off socket for this association, so we use the listening socket - and add the primary IP address of the remote node. +/* + * sctp_bind_addrs - bind a SCTP socket to all our addresses */ -static void sctp_init_assoc(struct connection *con) -{ - struct sockaddr_storage rem_addr; - char outcmsg[CMSG_SPACE(sizeof(struct sctp_sndrcvinfo))]; - struct msghdr outmessage; - struct cmsghdr *cmsg; - struct sctp_sndrcvinfo *sinfo; - struct connection *base_con; - struct writequeue_entry *e; - int len, offset; - int ret; - int addrlen; - struct kvec iov[1]; - - mutex_lock(&con->sock_mutex); - if (test_and_set_bit(CF_INIT_PENDING, &con->flags)) - goto unlock; - - if (nodeid_to_addr(con->nodeid, NULL, (struct sockaddr *)&rem_addr, - con->try_new_addr)) { - log_print("no address for nodeid %d", con->nodeid); - goto unlock; - } - base_con = nodeid2con(0, 0); - BUG_ON(base_con == NULL); - - make_sockaddr(&rem_addr, dlm_config.ci_tcp_port, &addrlen); +static int sctp_bind_addrs(struct socket *sock, __be16 port) +{ + struct sockaddr_storage localaddr; + struct sockaddr_unsized *addr = (struct sockaddr_unsized *)&localaddr; + int i, addr_len, result = 0; - outmessage.msg_name = &rem_addr; - outmessage.msg_namelen = addrlen; - outmessage.msg_control = outcmsg; - outmessage.msg_controllen = sizeof(outcmsg); - outmessage.msg_flags = MSG_EOR; + for (i = 0; i < dlm_local_count; i++) { + memcpy(&localaddr, &dlm_local_addr[i], sizeof(localaddr)); + make_sockaddr(&localaddr, port, &addr_len); - spin_lock(&con->writequeue_lock); + if (!i) + result = kernel_bind(sock, addr, addr_len); + else + result = sock_bind_add(sock->sk, addr, addr_len); - if (list_empty(&con->writequeue)) { - spin_unlock(&con->writequeue_lock); - log_print("writequeue empty for nodeid %d", con->nodeid); - goto unlock; + if (result < 0) { + log_print("Can't bind to %d addr number %d, %d.\n", + port, i + 1, result); + break; + } } + return result; +} - e = list_first_entry(&con->writequeue, struct writequeue_entry, list); - len = e->len; - offset = e->offset; +/* Get local addresses */ +static void init_local(void) +{ + struct sockaddr_storage sas; + int i; - /* Send the first block off the write queue */ - iov[0].iov_base = page_address(e->page)+offset; - iov[0].iov_len = len; - spin_unlock(&con->writequeue_lock); + dlm_local_count = 0; + for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) { + if (dlm_our_addr(&sas, i)) + break; - if (rem_addr.ss_family == AF_INET) { - struct sockaddr_in *sin = (struct sockaddr_in *)&rem_addr; - log_print("Trying to connect to %pI4", &sin->sin_addr.s_addr); - } else { - struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&rem_addr; - log_print("Trying to connect to %pI6", &sin6->sin6_addr); + memcpy(&dlm_local_addr[dlm_local_count++], &sas, sizeof(sas)); } +} - cmsg = CMSG_FIRSTHDR(&outmessage); - cmsg->cmsg_level = IPPROTO_SCTP; - cmsg->cmsg_type = SCTP_SNDRCV; - cmsg->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); - sinfo = CMSG_DATA(cmsg); - memset(sinfo, 0x00, sizeof(struct sctp_sndrcvinfo)); - sinfo->sinfo_ppid = cpu_to_le32(con->nodeid); - outmessage.msg_controllen = cmsg->cmsg_len; - sinfo->sinfo_flags |= SCTP_ADDR_OVER; +static struct writequeue_entry *new_writequeue_entry(struct connection *con) +{ + struct writequeue_entry *entry; - ret = kernel_sendmsg(base_con->sock, &outmessage, iov, 1, len); - if (ret < 0) { - log_print("Send first packet to node %d failed: %d", - con->nodeid, ret); + entry = dlm_allocate_writequeue(); + if (!entry) + return NULL; - /* Try again later */ - clear_bit(CF_CONNECT_PENDING, &con->flags); - clear_bit(CF_INIT_PENDING, &con->flags); - } - else { - spin_lock(&con->writequeue_lock); - writequeue_entry_complete(e, ret); - spin_unlock(&con->writequeue_lock); + entry->page = alloc_page(GFP_ATOMIC | __GFP_ZERO); + if (!entry->page) { + dlm_free_writequeue(entry); + return NULL; } -unlock: - mutex_unlock(&con->sock_mutex); + entry->offset = 0; + entry->len = 0; + entry->end = 0; + entry->dirty = false; + entry->con = con; + entry->users = 1; + kref_init(&entry->ref); + return entry; } -/* Connect a new socket to its peer */ -static void tcp_connect_to_sock(struct connection *con) +static struct writequeue_entry *new_wq_entry(struct connection *con, int len, + char **ppc, void (*cb)(void *data), + void *data) { - struct sockaddr_storage saddr, src_addr; - int addr_len; - struct socket *sock = NULL; - int one = 1; - int result; + struct writequeue_entry *e; - if (con->nodeid == 0) { - log_print("attempt to connect sock 0 foiled"); - return; + spin_lock_bh(&con->writequeue_lock); + if (!list_empty(&con->writequeue)) { + e = list_last_entry(&con->writequeue, struct writequeue_entry, list); + if (DLM_WQ_REMAIN_BYTES(e) >= len) { + kref_get(&e->ref); + + *ppc = page_address(e->page) + e->end; + if (cb) + cb(data); + + e->end += len; + e->users++; + goto out; + } } - mutex_lock(&con->sock_mutex); - if (con->retries++ > MAX_CONNECT_RETRIES) + e = new_writequeue_entry(con); + if (!e) goto out; - /* Some odd races can cause double-connects, ignore them */ - if (con->sock) - goto out; + kref_get(&e->ref); + *ppc = page_address(e->page); + e->end += len; + if (cb) + cb(data); - /* Create a socket to communicate with */ - result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM, - IPPROTO_TCP, &sock); - if (result < 0) - goto out_err; + list_add_tail(&e->list, &con->writequeue); - memset(&saddr, 0, sizeof(saddr)); - result = nodeid_to_addr(con->nodeid, &saddr, NULL, false); - if (result < 0) { - log_print("no address for nodeid %d", con->nodeid); - goto out_err; - } +out: + spin_unlock_bh(&con->writequeue_lock); + return e; +}; - sock->sk->sk_user_data = con; - con->rx_action = receive_from_sock; - con->connect_action = tcp_connect_to_sock; - add_sock(sock, con); +static struct dlm_msg *dlm_lowcomms_new_msg_con(struct connection *con, int len, + char **ppc, void (*cb)(void *data), + void *data) +{ + struct writequeue_entry *e; + struct dlm_msg *msg; - /* Bind to our cluster-known address connecting to avoid - routing problems */ - memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr)); - make_sockaddr(&src_addr, 0, &addr_len); - result = sock->ops->bind(sock, (struct sockaddr *) &src_addr, - addr_len); - if (result < 0) { - log_print("could not bind for connect: %d", result); - /* This *may* not indicate a critical error */ + msg = dlm_allocate_msg(); + if (!msg) + return NULL; + + kref_init(&msg->ref); + + e = new_wq_entry(con, len, ppc, cb, data); + if (!e) { + dlm_free_msg(msg); + return NULL; } - make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len); + msg->retransmit = false; + msg->orig_msg = NULL; + msg->ppc = *ppc; + msg->len = len; + msg->entry = e; - log_print("connecting to %d", con->nodeid); + return msg; +} - /* Turn off Nagle's algorithm */ - kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one, - sizeof(one)); - - result = sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len, - O_NONBLOCK); - if (result == -EINPROGRESS) - result = 0; - if (result == 0) - goto out; +/* avoid false positive for nodes_srcu, unlock happens in + * dlm_lowcomms_commit_msg which is a must call if success + */ +#ifndef __CHECKER__ +struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, char **ppc, + void (*cb)(void *data), void *data) +{ + struct connection *con; + struct dlm_msg *msg; + int idx; + + if (len > DLM_MAX_SOCKET_BUFSIZE || + len < sizeof(struct dlm_header)) { + BUILD_BUG_ON(PAGE_SIZE < DLM_MAX_SOCKET_BUFSIZE); + log_print("failed to allocate a buffer of size %d", len); + WARN_ON_ONCE(1); + return NULL; + } -out_err: - if (con->sock) { - sock_release(con->sock); - con->sock = NULL; - } else if (sock) { - sock_release(sock); + idx = srcu_read_lock(&connections_srcu); + con = nodeid2con(nodeid, 0); + if (WARN_ON_ONCE(!con)) { + srcu_read_unlock(&connections_srcu, idx); + return NULL; } - /* - * Some errors are fatal and this list might need adjusting. For other - * errors we try again until the max number of retries is reached. - */ - if (result != -EHOSTUNREACH && - result != -ENETUNREACH && - result != -ENETDOWN && - result != -EINVAL && - result != -EPROTONOSUPPORT) { - log_print("connect %d try %d error %d", con->nodeid, - con->retries, result); - mutex_unlock(&con->sock_mutex); - msleep(1000); - lowcomms_connect_sock(con); - return; + + msg = dlm_lowcomms_new_msg_con(con, len, ppc, cb, data); + if (!msg) { + srcu_read_unlock(&connections_srcu, idx); + return NULL; } + + /* for dlm_lowcomms_commit_msg() */ + kref_get(&msg->ref); + /* we assume if successful commit must called */ + msg->idx = idx; + return msg; +} +#endif + +static void _dlm_lowcomms_commit_msg(struct dlm_msg *msg) +{ + struct writequeue_entry *e = msg->entry; + struct connection *con = e->con; + int users; + + spin_lock_bh(&con->writequeue_lock); + kref_get(&msg->ref); + list_add(&msg->list, &e->msgs); + + users = --e->users; + if (users) + goto out; + + e->len = DLM_WQ_LENGTH_BYTES(e); + + lowcomms_queue_swork(con); + out: - mutex_unlock(&con->sock_mutex); + spin_unlock_bh(&con->writequeue_lock); return; } -static struct socket *tcp_create_listen_sock(struct connection *con, - struct sockaddr_storage *saddr) +/* avoid false positive for nodes_srcu, lock was happen in + * dlm_lowcomms_new_msg + */ +#ifndef __CHECKER__ +void dlm_lowcomms_commit_msg(struct dlm_msg *msg) { - struct socket *sock = NULL; - int result = 0; - int one = 1; - int addr_len; - - if (dlm_local_addr[0]->ss_family == AF_INET) - addr_len = sizeof(struct sockaddr_in); - else - addr_len = sizeof(struct sockaddr_in6); + _dlm_lowcomms_commit_msg(msg); + srcu_read_unlock(&connections_srcu, msg->idx); + /* because dlm_lowcomms_new_msg() */ + kref_put(&msg->ref, dlm_msg_release); +} +#endif - /* Create a socket to communicate with */ - result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_STREAM, - IPPROTO_TCP, &sock); - if (result < 0) { - log_print("Can't create listening comms socket"); - goto create_out; - } +void dlm_lowcomms_put_msg(struct dlm_msg *msg) +{ + kref_put(&msg->ref, dlm_msg_release); +} - /* Turn off Nagle's algorithm */ - kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one, - sizeof(one)); +/* does not held connections_srcu, usage lowcomms_error_report only */ +int dlm_lowcomms_resend_msg(struct dlm_msg *msg) +{ + struct dlm_msg *msg_resend; + char *ppc; - result = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, - (char *)&one, sizeof(one)); + if (msg->retransmit) + return 1; - if (result < 0) { - log_print("Failed to set SO_REUSEADDR on socket: %d", result); - } - con->rx_action = tcp_accept_from_sock; - con->connect_action = tcp_connect_to_sock; + msg_resend = dlm_lowcomms_new_msg_con(msg->entry->con, msg->len, &ppc, + NULL, NULL); + if (!msg_resend) + return -ENOMEM; - /* Bind to our port */ - make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len); - result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len); - if (result < 0) { - log_print("Can't bind to port %d", dlm_config.ci_tcp_port); - sock_release(sock); - sock = NULL; - con->sock = NULL; - goto create_out; - } - result = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, - (char *)&one, sizeof(one)); - if (result < 0) { - log_print("Set keepalive failed: %d", result); - } + msg->retransmit = true; + kref_get(&msg->ref); + msg_resend->orig_msg = msg; - result = sock->ops->listen(sock, 5); - if (result < 0) { - log_print("Can't listen on port %d", dlm_config.ci_tcp_port); - sock_release(sock); - sock = NULL; - goto create_out; - } + memcpy(ppc, msg->ppc, msg->len); + _dlm_lowcomms_commit_msg(msg_resend); + dlm_lowcomms_put_msg(msg_resend); -create_out: - return sock; + return 0; } -/* Get local addresses */ -static void init_local(void) +/* Send a message */ +static int send_to_sock(struct connection *con) { - struct sockaddr_storage sas, *addr; - int i; + struct writequeue_entry *e; + struct bio_vec bvec; + struct msghdr msg = { + .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT | MSG_NOSIGNAL, + }; + int len, offset, ret; - dlm_local_count = 0; - for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) { - if (dlm_our_addr(&sas, i)) - break; + spin_lock_bh(&con->writequeue_lock); + e = con_next_wq(con); + if (!e) { + clear_bit(CF_SEND_PENDING, &con->flags); + spin_unlock_bh(&con->writequeue_lock); + return DLM_IO_END; + } - addr = kmalloc(sizeof(*addr), GFP_NOFS); - if (!addr) - break; - memcpy(addr, &sas, sizeof(*addr)); - dlm_local_addr[dlm_local_count++] = addr; + len = e->len; + offset = e->offset; + WARN_ON_ONCE(len == 0 && e->users == 0); + spin_unlock_bh(&con->writequeue_lock); + + bvec_set_page(&bvec, e->page, len, offset); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); + ret = sock_sendmsg(con->sock, &msg); + trace_dlm_send(con->nodeid, ret); + if (ret == -EAGAIN || ret == 0) { + lock_sock(con->sock->sk); + spin_lock_bh(&con->writequeue_lock); + if (test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) && + !test_and_set_bit(CF_APP_LIMITED, &con->flags)) { + /* Notify TCP that we're limited by the + * application window size. + */ + set_bit(SOCK_NOSPACE, &con->sock->sk->sk_socket->flags); + con->sock->sk->sk_write_pending++; + + clear_bit(CF_SEND_PENDING, &con->flags); + spin_unlock_bh(&con->writequeue_lock); + release_sock(con->sock->sk); + + /* wait for write_space() event */ + return DLM_IO_END; + } + spin_unlock_bh(&con->writequeue_lock); + release_sock(con->sock->sk); + + return DLM_IO_RESCHED; + } else if (ret < 0) { + return ret; } + + spin_lock_bh(&con->writequeue_lock); + writequeue_entry_complete(e, ret); + spin_unlock_bh(&con->writequeue_lock); + + return DLM_IO_SUCCESS; } -/* Bind to an IP address. SCTP allows multiple address so it can do - multi-homing */ -static int add_sctp_bind_addr(struct connection *sctp_con, - struct sockaddr_storage *addr, - int addr_len, int num) +static void clean_one_writequeue(struct connection *con) { - int result = 0; + struct writequeue_entry *e, *safe; - if (num == 1) - result = kernel_bind(sctp_con->sock, - (struct sockaddr *) addr, - addr_len); - else - result = kernel_setsockopt(sctp_con->sock, SOL_SCTP, - SCTP_SOCKOPT_BINDX_ADD, - (char *)addr, addr_len); + spin_lock_bh(&con->writequeue_lock); + list_for_each_entry_safe(e, safe, &con->writequeue, list) { + free_entry(e); + } + spin_unlock_bh(&con->writequeue_lock); +} - if (result < 0) - log_print("Can't bind to port %d addr number %d", - dlm_config.ci_tcp_port, num); +static void connection_release(struct rcu_head *rcu) +{ + struct connection *con = container_of(rcu, struct connection, rcu); - return result; + WARN_ON_ONCE(!list_empty(&con->writequeue)); + WARN_ON_ONCE(con->sock); + kfree(con); } -/* Initialise SCTP socket and bind to all interfaces */ -static int sctp_listen_for_all(void) +/* Called from recovery when it knows that a node has + left the cluster */ +int dlm_lowcomms_close(int nodeid) { - struct socket *sock = NULL; - struct sockaddr_storage localaddr; - struct sctp_event_subscribe subscribe; - int result = -EINVAL, num = 1, i, addr_len; - struct connection *con = nodeid2con(0, GFP_NOFS); - int bufsize = NEEDED_RMEM; - int one = 1; - - if (!con) - return -ENOMEM; + struct connection *con; + int idx; - log_print("Using SCTP for communications"); + log_print("closing connection to node %d", nodeid); - result = sock_create_kern(dlm_local_addr[0]->ss_family, SOCK_SEQPACKET, - IPPROTO_SCTP, &sock); - if (result < 0) { - log_print("Can't create comms socket, check SCTP is loaded"); - goto out; + idx = srcu_read_lock(&connections_srcu); + con = nodeid2con(nodeid, 0); + if (WARN_ON_ONCE(!con)) { + srcu_read_unlock(&connections_srcu, idx); + return -ENOENT; } - /* Listen for events */ - memset(&subscribe, 0, sizeof(subscribe)); - subscribe.sctp_data_io_event = 1; - subscribe.sctp_association_event = 1; - subscribe.sctp_send_failure_event = 1; - subscribe.sctp_shutdown_event = 1; - subscribe.sctp_partial_delivery_event = 1; + stop_connection_io(con); + log_print("io handling for node: %d stopped", nodeid); + close_connection(con, true); - result = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUFFORCE, - (char *)&bufsize, sizeof(bufsize)); - if (result) - log_print("Error increasing buffer space on socket %d", result); + spin_lock(&connections_lock); + hlist_del_rcu(&con->list); + spin_unlock(&connections_lock); - result = kernel_setsockopt(sock, SOL_SCTP, SCTP_EVENTS, - (char *)&subscribe, sizeof(subscribe)); - if (result < 0) { - log_print("Failed to set SCTP_EVENTS on socket: result=%d", - result); - goto create_delsock; + clean_one_writequeue(con); + call_srcu(&connections_srcu, &con->rcu, connection_release); + if (con->othercon) { + clean_one_writequeue(con->othercon); + call_srcu(&connections_srcu, &con->othercon->rcu, connection_release); } + srcu_read_unlock(&connections_srcu, idx); - result = kernel_setsockopt(sock, SOL_SCTP, SCTP_NODELAY, (char *)&one, - sizeof(one)); - if (result < 0) - log_print("Could not set SCTP NODELAY error %d\n", result); + /* for debugging we print when we are done to compare with other + * messages in between. This function need to be correctly synchronized + * with io handling + */ + log_print("closing connection to node %d done", nodeid); - /* Init con struct */ - sock->sk->sk_user_data = con; - con->sock = sock; - con->sock->sk->sk_data_ready = lowcomms_data_ready; - con->rx_action = receive_from_sock; - con->connect_action = sctp_init_assoc; + return 0; +} - /* Bind to all interfaces. */ - for (i = 0; i < dlm_local_count; i++) { - memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr)); - make_sockaddr(&localaddr, dlm_config.ci_tcp_port, &addr_len); +/* Receive worker function */ +static void process_recv_sockets(struct work_struct *work) +{ + struct connection *con = container_of(work, struct connection, rwork); + int ret, buflen; - result = add_sctp_bind_addr(con, &localaddr, addr_len, num); - if (result) - goto create_delsock; - ++num; + down_read(&con->sock_lock); + if (!con->sock) { + up_read(&con->sock_lock); + return; } - result = sock->ops->listen(sock, 5); - if (result < 0) { - log_print("Can't set socket listening"); - goto create_delsock; - } + buflen = READ_ONCE(dlm_config.ci_buffer_size); + do { + ret = receive_from_sock(con, buflen); + } while (ret == DLM_IO_SUCCESS); + up_read(&con->sock_lock); - return 0; + switch (ret) { + case DLM_IO_END: + /* CF_RECV_PENDING cleared */ + break; + case DLM_IO_EOF: + close_connection(con, false); + wake_up(&con->shutdown_wait); + /* CF_RECV_PENDING cleared */ + break; + case DLM_IO_FLUSH: + /* we can't flush the process_workqueue here because a + * WQ_MEM_RECLAIM workequeue can occurr a deadlock for a non + * WQ_MEM_RECLAIM workqueue such as process_workqueue. Instead + * we have a waitqueue to wait until all messages are + * processed. + * + * This handling is only necessary to backoff the sender and + * not queue all messages from the socket layer into DLM + * processqueue. When DLM is capable to parse multiple messages + * on an e.g. per socket basis this handling can might be + * removed. Especially in a message burst we are too slow to + * process messages and the queue will fill up memory. + */ + wait_event(processqueue_wq, !atomic_read(&processqueue_count)); + fallthrough; + case DLM_IO_RESCHED: + cond_resched(); + queue_work(io_workqueue, &con->rwork); + /* CF_RECV_PENDING not cleared */ + break; + default: + if (ret < 0) { + if (test_bit(CF_IS_OTHERCON, &con->flags)) { + close_connection(con, false); + } else { + spin_lock_bh(&con->writequeue_lock); + lowcomms_queue_swork(con); + spin_unlock_bh(&con->writequeue_lock); + } -create_delsock: - sock_release(sock); - con->sock = NULL; -out: - return result; + /* CF_RECV_PENDING cleared for othercon + * we trigger send queue if not already done + * and process_send_sockets will handle it + */ + break; + } + + WARN_ON_ONCE(1); + break; + } } -static int tcp_listen_for_all(void) +static void process_listen_recv_socket(struct work_struct *work) { - struct socket *sock = NULL; - struct connection *con = nodeid2con(0, GFP_NOFS); - int result = -EINVAL; + int ret; - if (!con) - return -ENOMEM; + if (WARN_ON_ONCE(!listen_con.sock)) + return; - /* We don't support multi-homed hosts */ - if (dlm_local_addr[1] != NULL) { - log_print("TCP protocol can't handle multi-homed hosts, " - "try SCTP"); - return -EINVAL; + do { + ret = accept_from_sock(); + } while (ret == DLM_IO_SUCCESS); + + if (ret < 0) + log_print("critical error accepting connection: %d", ret); +} + +static int dlm_connect(struct connection *con) +{ + struct sockaddr_storage addr; + int result, addr_len; + struct socket *sock; + unsigned int mark; + + memset(&addr, 0, sizeof(addr)); + result = nodeid_to_addr(con->nodeid, &addr, NULL, + dlm_proto_ops->try_new_addr, &mark); + if (result < 0) { + log_print("no address for nodeid %d", con->nodeid); + return result; } - log_print("Using TCP for communications"); + /* Create a socket to communicate with */ + result = sock_create_kern(&init_net, dlm_local_addr[0].ss_family, + SOCK_STREAM, dlm_proto_ops->proto, &sock); + if (result < 0) + return result; + + sock_set_mark(sock->sk, mark); + dlm_proto_ops->sockopts(sock); - sock = tcp_create_listen_sock(con, dlm_local_addr[0]); - if (sock) { - add_sock(sock, con); - result = 0; + result = dlm_proto_ops->bind(sock); + if (result < 0) { + sock_release(sock); + return result; } - else { - result = -EADDRINUSE; + + add_sock(sock, con); + + log_print_ratelimited("connecting to %d", con->nodeid); + make_sockaddr(&addr, dlm_config.ci_tcp_port, &addr_len); + result = kernel_connect(sock, (struct sockaddr_unsized *)&addr, addr_len, 0); + switch (result) { + case -EINPROGRESS: + /* not an error */ + fallthrough; + case 0: + break; + default: + if (result < 0) + dlm_close_sock(&con->sock); + + break; } return result; } - - -static struct writequeue_entry *new_writequeue_entry(struct connection *con, - gfp_t allocation) +/* Send worker function */ +static void process_send_sockets(struct work_struct *work) { - struct writequeue_entry *entry; - - entry = kmalloc(sizeof(struct writequeue_entry), allocation); - if (!entry) - return NULL; + struct connection *con = container_of(work, struct connection, swork); + int ret; - entry->page = alloc_page(allocation); - if (!entry->page) { - kfree(entry); - return NULL; + WARN_ON_ONCE(test_bit(CF_IS_OTHERCON, &con->flags)); + + down_read(&con->sock_lock); + if (!con->sock) { + up_read(&con->sock_lock); + down_write(&con->sock_lock); + if (!con->sock) { + ret = dlm_connect(con); + switch (ret) { + case 0: + break; + default: + /* CF_SEND_PENDING not cleared */ + up_write(&con->sock_lock); + log_print("connect to node %d try %d error %d", + con->nodeid, con->retries++, ret); + msleep(1000); + /* For now we try forever to reconnect. In + * future we should send a event to cluster + * manager to fence itself after certain amount + * of retries. + */ + queue_work(io_workqueue, &con->swork); + return; + } + } + downgrade_write(&con->sock_lock); } - entry->offset = 0; - entry->len = 0; - entry->end = 0; - entry->users = 0; - entry->con = con; + do { + ret = send_to_sock(con); + } while (ret == DLM_IO_SUCCESS); + up_read(&con->sock_lock); - return entry; + switch (ret) { + case DLM_IO_END: + /* CF_SEND_PENDING cleared */ + break; + case DLM_IO_RESCHED: + /* CF_SEND_PENDING not cleared */ + cond_resched(); + queue_work(io_workqueue, &con->swork); + break; + default: + if (ret < 0) { + close_connection(con, false); + + /* CF_SEND_PENDING cleared */ + spin_lock_bh(&con->writequeue_lock); + lowcomms_queue_swork(con); + spin_unlock_bh(&con->writequeue_lock); + break; + } + + WARN_ON_ONCE(1); + break; + } } -void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc) +static void work_stop(void) { - struct connection *con; - struct writequeue_entry *e; - int offset = 0; - - con = nodeid2con(nodeid, allocation); - if (!con) - return NULL; + if (io_workqueue) { + destroy_workqueue(io_workqueue); + io_workqueue = NULL; + } - spin_lock(&con->writequeue_lock); - e = list_entry(con->writequeue.prev, struct writequeue_entry, list); - if ((&e->list == &con->writequeue) || - (PAGE_CACHE_SIZE - e->end < len)) { - e = NULL; - } else { - offset = e->end; - e->end += len; - e->users++; + if (process_workqueue) { + destroy_workqueue(process_workqueue); + process_workqueue = NULL; } - spin_unlock(&con->writequeue_lock); +} - if (e) { - got_one: - *ppc = page_address(e->page) + offset; - return e; +static int work_start(void) +{ + io_workqueue = alloc_workqueue("dlm_io", WQ_HIGHPRI | WQ_MEM_RECLAIM | + WQ_UNBOUND, 0); + if (!io_workqueue) { + log_print("can't start dlm_io"); + return -ENOMEM; } - e = new_writequeue_entry(con, allocation); - if (e) { - spin_lock(&con->writequeue_lock); - offset = e->end; - e->end += len; - e->users++; - list_add_tail(&e->list, &con->writequeue); - spin_unlock(&con->writequeue_lock); - goto got_one; + process_workqueue = alloc_workqueue("dlm_process", WQ_HIGHPRI | WQ_BH | WQ_PERCPU, 0); + if (!process_workqueue) { + log_print("can't start dlm_process"); + destroy_workqueue(io_workqueue); + io_workqueue = NULL; + return -ENOMEM; } - return NULL; + + return 0; } -void dlm_lowcomms_commit_buffer(void *mh) +void dlm_lowcomms_shutdown(void) { - struct writequeue_entry *e = (struct writequeue_entry *)mh; - struct connection *con = e->con; - int users; + struct connection *con; + int i, idx; - spin_lock(&con->writequeue_lock); - users = --e->users; - if (users) - goto out; - e->len = e->end - e->offset; - spin_unlock(&con->writequeue_lock); + /* stop lowcomms_listen_data_ready calls */ + lock_sock(listen_con.sock->sk); + listen_con.sock->sk->sk_data_ready = listen_sock.sk_data_ready; + release_sock(listen_con.sock->sk); - if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) { - queue_work(send_workqueue, &con->swork); + cancel_work_sync(&listen_con.rwork); + dlm_close_sock(&listen_con.sock); + + idx = srcu_read_lock(&connections_srcu); + for (i = 0; i < CONN_HASH_SIZE; i++) { + hlist_for_each_entry_rcu(con, &connection_hash[i], list) { + shutdown_connection(con, true); + stop_connection_io(con); + flush_workqueue(process_workqueue); + close_connection(con, true); + + clean_one_writequeue(con); + if (con->othercon) + clean_one_writequeue(con->othercon); + allow_connection_io(con); + } } - return; + srcu_read_unlock(&connections_srcu, idx); +} -out: - spin_unlock(&con->writequeue_lock); - return; +void dlm_lowcomms_stop(void) +{ + work_stop(); + dlm_proto_ops = NULL; } -/* Send a message */ -static void send_to_sock(struct connection *con) +static int dlm_listen_for_all(void) { - int ret = 0; - const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL; - struct writequeue_entry *e; - int len, offset; - int count = 0; + struct socket *sock; + int result; - mutex_lock(&con->sock_mutex); - if (con->sock == NULL) - goto out_connect; + log_print("Using %s for communications", + dlm_proto_ops->name); - spin_lock(&con->writequeue_lock); - for (;;) { - e = list_entry(con->writequeue.next, struct writequeue_entry, - list); - if ((struct list_head *) e == &con->writequeue) - break; + result = dlm_proto_ops->listen_validate(); + if (result < 0) + return result; - len = e->len; - offset = e->offset; - BUG_ON(len == 0 && e->users == 0); - spin_unlock(&con->writequeue_lock); - - ret = 0; - if (len) { - ret = kernel_sendpage(con->sock, e->page, offset, len, - msg_flags); - if (ret == -EAGAIN || ret == 0) { - if (ret == -EAGAIN && - test_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags) && - !test_and_set_bit(CF_APP_LIMITED, &con->flags)) { - /* Notify TCP that we're limited by the - * application window size. - */ - set_bit(SOCK_NOSPACE, &con->sock->flags); - con->sock->sk->sk_write_pending++; - } - cond_resched(); - goto out; - } else if (ret < 0) - goto send_error; - } + result = sock_create_kern(&init_net, dlm_local_addr[0].ss_family, + SOCK_STREAM, dlm_proto_ops->proto, &sock); + if (result < 0) { + log_print("Can't create comms socket: %d", result); + return result; + } - /* Don't starve people filling buffers */ - if (++count >= MAX_SEND_MSG_COUNT) { - cond_resched(); - count = 0; - } + sock_set_mark(sock->sk, dlm_config.ci_mark); + dlm_proto_ops->listen_sockopts(sock); - spin_lock(&con->writequeue_lock); - writequeue_entry_complete(e, ret); - } - spin_unlock(&con->writequeue_lock); -out: - mutex_unlock(&con->sock_mutex); - return; + result = dlm_proto_ops->listen_bind(sock); + if (result < 0) + goto out; -send_error: - mutex_unlock(&con->sock_mutex); - close_connection(con, false); - lowcomms_connect_sock(con); - return; + lock_sock(sock->sk); + listen_sock.sk_data_ready = sock->sk->sk_data_ready; + listen_sock.sk_write_space = sock->sk->sk_write_space; + listen_sock.sk_error_report = sock->sk->sk_error_report; + listen_sock.sk_state_change = sock->sk->sk_state_change; -out_connect: - mutex_unlock(&con->sock_mutex); - if (!test_bit(CF_INIT_PENDING, &con->flags)) - lowcomms_connect_sock(con); -} + listen_con.sock = sock; -static void clean_one_writequeue(struct connection *con) -{ - struct writequeue_entry *e, *safe; + sock->sk->sk_allocation = GFP_NOFS; + sock->sk->sk_use_task_frag = false; + sock->sk->sk_data_ready = lowcomms_listen_data_ready; + release_sock(sock->sk); - spin_lock(&con->writequeue_lock); - list_for_each_entry_safe(e, safe, &con->writequeue, list) { - list_del(&e->list); - free_entry(e); + result = sock->ops->listen(sock, 128); + if (result < 0) { + dlm_close_sock(&listen_con.sock); + return result; } - spin_unlock(&con->writequeue_lock); + + return 0; + +out: + sock_release(sock); + return result; } -/* Called from recovery when it knows that a node has - left the cluster */ -int dlm_lowcomms_close(int nodeid) +static int dlm_tcp_bind(struct socket *sock) { - struct connection *con; - struct dlm_node_addr *na; + struct sockaddr_storage src_addr; + int result, addr_len; - log_print("closing connection to node %d", nodeid); - con = nodeid2con(nodeid, 0); - if (con) { - clear_bit(CF_CONNECT_PENDING, &con->flags); - clear_bit(CF_WRITE_PENDING, &con->flags); - set_bit(CF_CLOSE, &con->flags); - if (cancel_work_sync(&con->swork)) - log_print("canceled swork for node %d", nodeid); - if (cancel_work_sync(&con->rwork)) - log_print("canceled rwork for node %d", nodeid); - clean_one_writequeue(con); - close_connection(con, true); - } - - spin_lock(&dlm_node_addrs_spin); - na = find_node_addr(nodeid); - if (na) { - list_del(&na->list); - while (na->addr_count--) - kfree(na->addr[na->addr_count]); - kfree(na); - } - spin_unlock(&dlm_node_addrs_spin); + /* Bind to our cluster-known address connecting to avoid + * routing problems. + */ + memcpy(&src_addr, &dlm_local_addr[0], sizeof(src_addr)); + make_sockaddr(&src_addr, 0, &addr_len); + + result = kernel_bind(sock, (struct sockaddr_unsized *)&src_addr, + addr_len); + if (result < 0) { + /* This *may* not indicate a critical error */ + log_print("could not bind for connect: %d", result); + } return 0; } -/* Receive workqueue function */ -static void process_recv_sockets(struct work_struct *work) +static int dlm_tcp_listen_validate(void) { - struct connection *con = container_of(work, struct connection, rwork); - int err; + /* We don't support multi-homed hosts */ + if (dlm_local_count > 1) { + log_print("Detect multi-homed hosts but use only the first IP address."); + log_print("Try SCTP, if you want to enable multi-link."); + } - clear_bit(CF_READ_PENDING, &con->flags); - do { - err = con->rx_action(con); - } while (!err); + return 0; } -/* Send workqueue function */ -static void process_send_sockets(struct work_struct *work) +static void dlm_tcp_sockopts(struct socket *sock) { - struct connection *con = container_of(work, struct connection, swork); - - if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) { - con->connect_action(con); - set_bit(CF_WRITE_PENDING, &con->flags); - } - if (test_and_clear_bit(CF_WRITE_PENDING, &con->flags)) - send_to_sock(con); + /* Turn off Nagle's algorithm */ + tcp_sock_set_nodelay(sock->sk); } - -/* Discard all entries on the write queues */ -static void clean_writequeues(void) +static void dlm_tcp_listen_sockopts(struct socket *sock) { - foreach_conn(clean_one_writequeue); + dlm_tcp_sockopts(sock); + sock_set_reuseaddr(sock->sk); } -static void work_stop(void) +static int dlm_tcp_listen_bind(struct socket *sock) { - destroy_workqueue(recv_workqueue); - destroy_workqueue(send_workqueue); + int addr_len; + + /* Bind to our port */ + make_sockaddr(&dlm_local_addr[0], dlm_config.ci_tcp_port, &addr_len); + return kernel_bind(sock, (struct sockaddr_unsized *)&dlm_local_addr[0], + addr_len); } -static int work_start(void) +static const struct dlm_proto_ops dlm_tcp_ops = { + .name = "TCP", + .proto = IPPROTO_TCP, + .how = SHUT_WR, + .sockopts = dlm_tcp_sockopts, + .bind = dlm_tcp_bind, + .listen_validate = dlm_tcp_listen_validate, + .listen_sockopts = dlm_tcp_listen_sockopts, + .listen_bind = dlm_tcp_listen_bind, +}; + +static int dlm_sctp_bind(struct socket *sock) { - recv_workqueue = alloc_workqueue("dlm_recv", - WQ_UNBOUND | WQ_MEM_RECLAIM, 1); - if (!recv_workqueue) { - log_print("can't start dlm_recv"); - return -ENOMEM; - } + return sctp_bind_addrs(sock, 0); +} - send_workqueue = alloc_workqueue("dlm_send", - WQ_UNBOUND | WQ_MEM_RECLAIM, 1); - if (!send_workqueue) { - log_print("can't start dlm_send"); - destroy_workqueue(recv_workqueue); - return -ENOMEM; +static int dlm_sctp_listen_validate(void) +{ + if (!IS_ENABLED(CONFIG_IP_SCTP)) { + log_print("SCTP is not enabled by this kernel"); + return -EOPNOTSUPP; } + request_module("sctp"); return 0; } -static void stop_conn(struct connection *con) +static int dlm_sctp_bind_listen(struct socket *sock) { - con->flags |= 0x0F; - if (con->sock && con->sock->sk) - con->sock->sk->sk_user_data = NULL; + return sctp_bind_addrs(sock, dlm_config.ci_tcp_port); } -static void free_conn(struct connection *con) +static void dlm_sctp_sockopts(struct socket *sock) { - close_connection(con, true); - if (con->othercon) - kmem_cache_free(con_cache, con->othercon); - hlist_del(&con->list); - kmem_cache_free(con_cache, con); + /* Turn off Nagle's algorithm */ + sctp_sock_set_nodelay(sock->sk); + sock_set_rcvbuf(sock->sk, NEEDED_RMEM); } -void dlm_lowcomms_stop(void) -{ - /* Set all the flags to prevent any - socket activity. - */ - mutex_lock(&connections_lock); - dlm_allow_conn = 0; - foreach_conn(stop_conn); - mutex_unlock(&connections_lock); - - work_stop(); - - mutex_lock(&connections_lock); - clean_writequeues(); - - foreach_conn(free_conn); - - mutex_unlock(&connections_lock); - kmem_cache_destroy(con_cache); -} +static const struct dlm_proto_ops dlm_sctp_ops = { + .name = "SCTP", + .proto = IPPROTO_SCTP, + .how = SHUT_RDWR, + .try_new_addr = true, + .sockopts = dlm_sctp_sockopts, + .bind = dlm_sctp_bind, + .listen_validate = dlm_sctp_listen_validate, + .listen_sockopts = dlm_sctp_sockopts, + .listen_bind = dlm_sctp_bind_listen, +}; int dlm_lowcomms_start(void) { - int error = -EINVAL; - struct connection *con; - int i; - - for (i = 0; i < CONN_HASH_SIZE; i++) - INIT_HLIST_HEAD(&connection_hash[i]); + int error; init_local(); if (!dlm_local_count) { @@ -1766,52 +1918,66 @@ int dlm_lowcomms_start(void) goto fail; } - error = -ENOMEM; - con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection), - __alignof__(struct connection), 0, - NULL); - if (!con_cache) - goto fail; - error = work_start(); if (error) - goto fail_destroy; - - dlm_allow_conn = 1; + goto fail; /* Start listening */ - if (dlm_config.ci_protocol == 0) - error = tcp_listen_for_all(); - else - error = sctp_listen_for_all(); + switch (dlm_config.ci_protocol) { + case DLM_PROTO_TCP: + dlm_proto_ops = &dlm_tcp_ops; + break; + case DLM_PROTO_SCTP: + dlm_proto_ops = &dlm_sctp_ops; + break; + default: + log_print("Invalid protocol identifier %d set", + dlm_config.ci_protocol); + error = -EINVAL; + goto fail_proto_ops; + } + + error = dlm_listen_for_all(); if (error) - goto fail_unlisten; + goto fail_listen; return 0; -fail_unlisten: - dlm_allow_conn = 0; - con = nodeid2con(0,0); - if (con) { - close_connection(con, false); - kmem_cache_free(con_cache, con); - } -fail_destroy: - kmem_cache_destroy(con_cache); +fail_listen: + dlm_proto_ops = NULL; +fail_proto_ops: + work_stop(); fail: return error; } +void dlm_lowcomms_init(void) +{ + int i; + + for (i = 0; i < CONN_HASH_SIZE; i++) + INIT_HLIST_HEAD(&connection_hash[i]); + + INIT_WORK(&listen_con.rwork, process_listen_recv_socket); +} + void dlm_lowcomms_exit(void) { - struct dlm_node_addr *na, *safe; + struct connection *con; + int i, idx; - spin_lock(&dlm_node_addrs_spin); - list_for_each_entry_safe(na, safe, &dlm_node_addrs, list) { - list_del(&na->list); - while (na->addr_count--) - kfree(na->addr[na->addr_count]); - kfree(na); + idx = srcu_read_lock(&connections_srcu); + for (i = 0; i < CONN_HASH_SIZE; i++) { + hlist_for_each_entry_rcu(con, &connection_hash[i], list) { + spin_lock(&connections_lock); + hlist_del_rcu(&con->list); + spin_unlock(&connections_lock); + + if (con->othercon) + call_srcu(&connections_srcu, &con->othercon->rcu, + connection_release); + call_srcu(&connections_srcu, &con->rcu, connection_release); + } } - spin_unlock(&dlm_node_addrs_spin); + srcu_read_unlock(&connections_srcu, idx); } diff --git a/fs/dlm/lowcomms.h b/fs/dlm/lowcomms.h index 67462e54fc2f..fd0df604eb93 100644 --- a/fs/dlm/lowcomms.h +++ b/fs/dlm/lowcomms.h @@ -1,12 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -14,14 +12,44 @@ #ifndef __LOWCOMMS_DOT_H__ #define __LOWCOMMS_DOT_H__ +#include "dlm_internal.h" + +#define DLM_MIDCOMMS_OPT_LEN sizeof(struct dlm_opts) +#define DLM_MAX_APP_BUFSIZE (DLM_MAX_SOCKET_BUFSIZE - \ + DLM_MIDCOMMS_OPT_LEN) + +#define CONN_HASH_SIZE 32 + +/* This is deliberately very simple because most clusters have simple + * sequential nodeids, so we should be able to go straight to a connection + * struct in the array + */ +static inline int nodeid_hash(int nodeid) +{ + return nodeid & (CONN_HASH_SIZE-1); +} + +/* check if dlm is running */ +bool dlm_lowcomms_is_running(void); + int dlm_lowcomms_start(void); +void dlm_lowcomms_shutdown(void); +void dlm_lowcomms_shutdown_node(int nodeid, bool force); void dlm_lowcomms_stop(void); +void dlm_lowcomms_init(void); void dlm_lowcomms_exit(void); int dlm_lowcomms_close(int nodeid); -void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc); -void dlm_lowcomms_commit_buffer(void *mh); +struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, char **ppc, + void (*cb)(void *data), void *data); +void dlm_lowcomms_commit_msg(struct dlm_msg *msg); +void dlm_lowcomms_put_msg(struct dlm_msg *msg); +int dlm_lowcomms_resend_msg(struct dlm_msg *msg); int dlm_lowcomms_connect_node(int nodeid); -int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len); +int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark); +int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr); +void dlm_midcomms_receive_done(int nodeid); +struct kmem_cache *dlm_lowcomms_writequeue_cache_create(void); +struct kmem_cache *dlm_lowcomms_msg_cache_create(void); #endif /* __LOWCOMMS_DOT_H__ */ diff --git a/fs/dlm/lvb_table.h b/fs/dlm/lvb_table.h index cc3e92f3feef..09052d967174 100644 --- a/fs/dlm/lvb_table.h +++ b/fs/dlm/lvb_table.h @@ -1,11 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** ******************************************************************************* ** ** Copyright (C) 2005 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ diff --git a/fs/dlm/main.c b/fs/dlm/main.c index 079c0bd71ab7..a44d16da7187 100644 --- a/fs/dlm/main.c +++ b/fs/dlm/main.c @@ -1,23 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ +#include <linux/module.h> + #include "dlm_internal.h" #include "lockspace.h" #include "lock.h" #include "user.h" #include "memory.h" #include "config.h" -#include "lowcomms.h" +#include "midcomms.h" + +#define CREATE_TRACE_POINTS +#include <trace/events/dlm.h> + +struct workqueue_struct *dlm_wq; static int __init init_dlm(void) { @@ -27,6 +32,8 @@ static int __init init_dlm(void) if (error) goto out; + dlm_midcomms_init(); + error = dlm_lockspace_init(); if (error) goto out_mem; @@ -35,37 +42,37 @@ static int __init init_dlm(void) if (error) goto out_lockspace; - error = dlm_register_debugfs(); - if (error) - goto out_config; + dlm_register_debugfs(); error = dlm_user_init(); if (error) goto out_debug; - error = dlm_netlink_init(); + error = dlm_plock_init(); if (error) goto out_user; - error = dlm_plock_init(); - if (error) - goto out_netlink; + dlm_wq = alloc_workqueue("dlm_wq", WQ_PERCPU, 0); + if (!dlm_wq) { + error = -ENOMEM; + goto out_plock; + } printk("DLM installed\n"); return 0; - out_netlink: - dlm_netlink_exit(); + out_plock: + dlm_plock_exit(); out_user: dlm_user_exit(); out_debug: dlm_unregister_debugfs(); - out_config: dlm_config_exit(); out_lockspace: dlm_lockspace_exit(); out_mem: + dlm_midcomms_exit(); dlm_memory_exit(); out: return error; @@ -73,14 +80,15 @@ static int __init init_dlm(void) static void __exit exit_dlm(void) { + /* be sure every pending work e.g. freeing is done */ + destroy_workqueue(dlm_wq); dlm_plock_exit(); - dlm_netlink_exit(); dlm_user_exit(); dlm_config_exit(); - dlm_memory_exit(); dlm_lockspace_exit(); - dlm_lowcomms_exit(); + dlm_midcomms_exit(); dlm_unregister_debugfs(); + dlm_memory_exit(); } module_init(init_dlm); diff --git a/fs/dlm/member.c b/fs/dlm/member.c index 476557b54921..c0f557a80a75 100644 --- a/fs/dlm/member.c +++ b/fs/dlm/member.c @@ -1,11 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) 2005-2011 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -17,11 +15,12 @@ #include "recover.h" #include "rcom.h" #include "config.h" +#include "midcomms.h" #include "lowcomms.h" -int dlm_slots_version(struct dlm_header *h) +int dlm_slots_version(const struct dlm_header *h) { - if ((h->h_version & 0x0000FFFF) < DLM_HEADER_SLOTS) + if ((le32_to_cpu(h->h_version) & 0x0000FFFF) < DLM_HEADER_SLOTS) return 0; return 1; } @@ -60,18 +59,15 @@ void dlm_slots_copy_out(struct dlm_ls *ls, struct dlm_rcom *rc) #define SLOT_DEBUG_LINE 128 -static void log_debug_slots(struct dlm_ls *ls, uint32_t gen, int num_slots, - struct rcom_slot *ro0, struct dlm_slot *array, - int array_size) +static void log_slots(struct dlm_ls *ls, uint32_t gen, int num_slots, + struct rcom_slot *ro0, struct dlm_slot *array, + int array_size) { char line[SLOT_DEBUG_LINE]; int len = SLOT_DEBUG_LINE - 1; int pos = 0; int ret, i; - if (!dlm_config.ci_log_debug) - return; - memset(line, 0, sizeof(line)); if (array) { @@ -95,7 +91,7 @@ static void log_debug_slots(struct dlm_ls *ls, uint32_t gen, int num_slots, } } - log_debug(ls, "generation %u slots %d%s", gen, num_slots, line); + log_rinfo(ls, "generation %u slots %d%s", gen, num_slots, line); } int dlm_slots_copy_in(struct dlm_ls *ls) @@ -124,18 +120,13 @@ int dlm_slots_copy_in(struct dlm_ls *ls) ro0 = (struct rcom_slot *)(rc->rc_buf + sizeof(struct rcom_config)); - for (i = 0, ro = ro0; i < num_slots; i++, ro++) { - ro->ro_nodeid = le32_to_cpu(ro->ro_nodeid); - ro->ro_slot = le16_to_cpu(ro->ro_slot); - } - - log_debug_slots(ls, gen, num_slots, ro0, NULL, 0); + log_slots(ls, gen, num_slots, ro0, NULL, 0); list_for_each_entry(memb, &ls->ls_nodes, list) { for (i = 0, ro = ro0; i < num_slots; i++, ro++) { - if (ro->ro_nodeid != memb->nodeid) + if (le32_to_cpu(ro->ro_nodeid) != memb->nodeid) continue; - memb->slot = ro->ro_slot; + memb->slot = le16_to_cpu(ro->ro_slot); memb->slot_prev = memb->slot; break; } @@ -220,8 +211,7 @@ int dlm_slots_assign(struct dlm_ls *ls, int *num_slots, int *slots_size, } array_size = max + need; - - array = kzalloc(array_size * sizeof(struct dlm_slot), GFP_NOFS); + array = kcalloc(array_size, sizeof(*array), GFP_NOFS); if (!array) return -ENOMEM; @@ -274,9 +264,9 @@ int dlm_slots_assign(struct dlm_ls *ls, int *num_slots, int *slots_size, gen++; - log_debug_slots(ls, gen, num, NULL, array, array_size); + log_slots(ls, gen, num, NULL, array, array_size); - max_slots = (dlm_config.ci_buffer_size - sizeof(struct dlm_rcom) - + max_slots = (DLM_MAX_APP_BUFSIZE - sizeof(struct dlm_rcom) - sizeof(struct rcom_config)) / sizeof(struct rcom_slot); if (num > max_slots) { @@ -317,24 +307,40 @@ static void add_ordered_member(struct dlm_ls *ls, struct dlm_member *new) } } +static int add_remote_member(int nodeid) +{ + int error; + + if (nodeid == dlm_our_nodeid()) + return 0; + + error = dlm_lowcomms_connect_node(nodeid); + if (error < 0) + return error; + + dlm_midcomms_add_member(nodeid); + return 0; +} + static int dlm_add_member(struct dlm_ls *ls, struct dlm_config_node *node) { struct dlm_member *memb; int error; - memb = kzalloc(sizeof(struct dlm_member), GFP_NOFS); + memb = kzalloc(sizeof(*memb), GFP_NOFS); if (!memb) return -ENOMEM; - error = dlm_lowcomms_connect_node(node->nodeid); + memb->nodeid = node->nodeid; + memb->weight = node->weight; + memb->comm_seq = node->comm_seq; + + error = add_remote_member(node->nodeid); if (error < 0) { kfree(memb); return error; } - memb->nodeid = node->nodeid; - memb->weight = node->weight; - memb->comm_seq = node->comm_seq; add_ordered_member(ls, memb); ls->ls_num_nodes++; return 0; @@ -360,31 +366,44 @@ int dlm_is_member(struct dlm_ls *ls, int nodeid) int dlm_is_removed(struct dlm_ls *ls, int nodeid) { + WARN_ON_ONCE(!nodeid || nodeid == -1); + if (find_memb(&ls->ls_nodes_gone, nodeid)) return 1; return 0; } -static void clear_memb_list(struct list_head *head) +static void clear_memb_list(struct list_head *head, + void (*after_del)(int nodeid)) { struct dlm_member *memb; while (!list_empty(head)) { memb = list_entry(head->next, struct dlm_member, list); list_del(&memb->list); + if (after_del) + after_del(memb->nodeid); kfree(memb); } } +static void remove_remote_member(int nodeid) +{ + if (nodeid == dlm_our_nodeid()) + return; + + dlm_midcomms_remove_member(nodeid); +} + void dlm_clear_members(struct dlm_ls *ls) { - clear_memb_list(&ls->ls_nodes); + clear_memb_list(&ls->ls_nodes, remove_remote_member); ls->ls_num_nodes = 0; } void dlm_clear_members_gone(struct dlm_ls *ls) { - clear_memb_list(&ls->ls_nodes_gone); + clear_memb_list(&ls->ls_nodes_gone, NULL); } static void make_member_array(struct dlm_ls *ls) @@ -408,8 +427,7 @@ static void make_member_array(struct dlm_ls *ls) } ls->ls_total_weight = total; - - array = kmalloc(sizeof(int) * total, GFP_NOFS); + array = kmalloc_array(total, sizeof(*array), GFP_NOFS); if (!array) return; @@ -433,21 +451,22 @@ static void make_member_array(struct dlm_ls *ls) /* send a status request to all members just to establish comms connections */ -static int ping_members(struct dlm_ls *ls) +static int ping_members(struct dlm_ls *ls, uint64_t seq) { struct dlm_member *memb; int error = 0; list_for_each_entry(memb, &ls->ls_nodes, list) { - error = dlm_recovery_stopped(ls); - if (error) + if (dlm_recovery_stopped(ls)) { + error = -EINTR; break; - error = dlm_rcom_status(ls, memb->nodeid, 0); + } + error = dlm_rcom_status(ls, memb->nodeid, 0, seq); if (error) break; } if (error) - log_debug(ls, "ping_members aborted %d last nodeid %d", + log_rinfo(ls, "ping_members aborted %d last nodeid %d", error, ls->ls_recover_nodeid); return error; } @@ -459,7 +478,8 @@ static void dlm_lsop_recover_prep(struct dlm_ls *ls) ls->ls_ops->recover_prep(ls->ls_ops_arg); } -static void dlm_lsop_recover_slot(struct dlm_ls *ls, struct dlm_member *memb) +static void dlm_lsop_recover_slot(struct dlm_ls *ls, struct dlm_member *memb, + unsigned int release_recover) { struct dlm_slot slot; uint32_t seq; @@ -474,9 +494,9 @@ static void dlm_lsop_recover_slot(struct dlm_ls *ls, struct dlm_member *memb) we consider the node to have failed (versus being removed due to dlm_release_lockspace) */ - error = dlm_comm_seq(memb->nodeid, &seq); + error = dlm_comm_seq(memb->nodeid, &seq, false); - if (!error && seq == memb->comm_seq) + if (!release_recover && !error && seq == memb->comm_seq) return; slot.nodeid = memb->nodeid; @@ -495,8 +515,7 @@ void dlm_lsop_recover_done(struct dlm_ls *ls) return; num = ls->ls_num_nodes; - - slots = kzalloc(num * sizeof(struct dlm_slot), GFP_KERNEL); + slots = kcalloc(num, sizeof(*slots), GFP_KERNEL); if (!slots) return; @@ -534,12 +553,17 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out) struct dlm_member *memb, *safe; struct dlm_config_node *node; int i, error, neg = 0, low = -1; + unsigned int release_recover; /* previously removed members that we've not finished removing need to - count as a negative change so the "neg" recovery steps will happen */ + * count as a negative change so the "neg" recovery steps will happen + * + * This functionality must report all member changes to lsops or + * midcomms layer and must never return before. + */ list_for_each_entry(memb, &ls->ls_nodes_gone, list) { - log_debug(ls, "prev removed member %d", memb->nodeid); + log_rinfo(ls, "prev removed member %d", memb->nodeid); neg++; } @@ -547,31 +571,48 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out) list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) { node = find_config_node(rv, memb->nodeid); - if (node && !node->new) + if (!node) { + log_error(ls, "remove member %d invalid", + memb->nodeid); + return -EFAULT; + } + + if (!node->new && !node->gone) continue; - if (!node) { - log_debug(ls, "remove member %d", memb->nodeid); + release_recover = 0; + + if (node->gone) { + release_recover = node->release_recover; + log_rinfo(ls, "remove member %d%s", memb->nodeid, + release_recover ? " (release_recover)" : ""); } else { /* removed and re-added */ - log_debug(ls, "remove member %d comm_seq %u %u", + log_rinfo(ls, "remove member %d comm_seq %u %u", memb->nodeid, memb->comm_seq, node->comm_seq); } neg++; list_move(&memb->list, &ls->ls_nodes_gone); + remove_remote_member(memb->nodeid); ls->ls_num_nodes--; - dlm_lsop_recover_slot(ls, memb); + dlm_lsop_recover_slot(ls, memb, release_recover); } /* add new members to ls_nodes */ for (i = 0; i < rv->nodes_count; i++) { node = &rv->nodes[i]; + if (node->gone) + continue; + if (dlm_is_member(ls, node->nodeid)) continue; - dlm_add_member(ls, node); - log_debug(ls, "add member %d", node->nodeid); + error = dlm_add_member(ls, node); + if (error) + return error; + + log_rinfo(ls, "add member %d", node->nodeid); } list_for_each_entry(memb, &ls->ls_nodes, list) { @@ -583,15 +624,8 @@ int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out) make_member_array(ls); *neg_out = neg; - error = ping_members(ls); - if (!error || error == -EPROTO) { - /* new_lockspace() may be waiting to know if the config - is good or bad */ - ls->ls_members_result = error; - complete(&ls->ls_members_done); - } - - log_debug(ls, "dlm_recover_members %d nodes", ls->ls_num_nodes); + error = ping_members(ls, rv->seq); + log_rinfo(ls, "dlm_recover_members %d nodes", ls->ls_num_nodes); return error; } @@ -613,7 +647,7 @@ int dlm_ls_stop(struct dlm_ls *ls) * message to the requestqueue without races. */ - down_write(&ls->ls_recv_active); + write_lock_bh(&ls->ls_recv_active); /* * Abort any recovery that's in progress (see RECOVER_STOP, @@ -621,18 +655,25 @@ int dlm_ls_stop(struct dlm_ls *ls) * dlm to quit any processing (see RUNNING, dlm_locking_stopped()). */ - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); set_bit(LSFL_RECOVER_STOP, &ls->ls_flags); new = test_and_clear_bit(LSFL_RUNNING, &ls->ls_flags); + if (new) + timer_delete_sync(&ls->ls_scan_timer); ls->ls_recover_seq++; - spin_unlock(&ls->ls_recover_lock); + + /* activate requestqueue and stop processing */ + write_lock_bh(&ls->ls_requestqueue_lock); + set_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags); + write_unlock_bh(&ls->ls_requestqueue_lock); + spin_unlock_bh(&ls->ls_recover_lock); /* * Let dlm_recv run again, now any normal messages will be saved on the * requestqueue for later. */ - up_write(&ls->ls_recv_active); + write_unlock_bh(&ls->ls_recv_active); /* * This in_recovery lock does two things: @@ -657,43 +698,52 @@ int dlm_ls_stop(struct dlm_ls *ls) dlm_recoverd_suspend(ls); - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); kfree(ls->ls_slots); ls->ls_slots = NULL; ls->ls_num_slots = 0; ls->ls_slots_size = 0; ls->ls_recover_status = 0; - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); dlm_recoverd_resume(ls); if (!ls->ls_recover_begin) ls->ls_recover_begin = jiffies; - dlm_lsop_recover_prep(ls); + /* call recover_prep ops only once and not multiple times + * for each possible dlm_ls_stop() when recovery is already + * stopped. + * + * If we successful was able to clear LSFL_RUNNING bit and + * it was set we know it is the first dlm_ls_stop() call. + */ + if (new) + dlm_lsop_recover_prep(ls); + return 0; } int dlm_ls_start(struct dlm_ls *ls) { - struct dlm_recover *rv = NULL, *rv_old; - struct dlm_config_node *nodes; + struct dlm_recover *rv, *rv_old; + struct dlm_config_node *nodes = NULL; int error, count; - rv = kzalloc(sizeof(struct dlm_recover), GFP_NOFS); + rv = kzalloc(sizeof(*rv), GFP_NOFS); if (!rv) return -ENOMEM; error = dlm_config_nodes(ls->ls_name, &nodes, &count); if (error < 0) - goto fail; + goto fail_rv; - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); /* the lockspace needs to be stopped before it can be started */ if (!dlm_locking_stopped(ls)) { - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); log_error(ls, "start ignored: lockspace running"); error = -EINVAL; goto fail; @@ -704,7 +754,7 @@ int dlm_ls_start(struct dlm_ls *ls) rv->seq = ++ls->ls_recover_seq; rv_old = ls->ls_recover_args; ls->ls_recover_args = rv; - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); if (rv_old) { log_error(ls, "unused recovery %llx %d", @@ -718,8 +768,9 @@ int dlm_ls_start(struct dlm_ls *ls) return 0; fail: - kfree(rv); kfree(nodes); + fail_rv: + kfree(rv); return error; } diff --git a/fs/dlm/member.h b/fs/dlm/member.h index 3deb70661c69..f61cfde46314 100644 --- a/fs/dlm/member.h +++ b/fs/dlm/member.h @@ -1,11 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** ******************************************************************************* ** ** Copyright (C) 2005-2011 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -20,7 +18,7 @@ void dlm_clear_members_gone(struct dlm_ls *ls); int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv,int *neg_out); int dlm_is_removed(struct dlm_ls *ls, int nodeid); int dlm_is_member(struct dlm_ls *ls, int nodeid); -int dlm_slots_version(struct dlm_header *h); +int dlm_slots_version(const struct dlm_header *h); void dlm_slot_save(struct dlm_ls *ls, struct dlm_rcom *rc, struct dlm_member *memb); void dlm_slots_copy_out(struct dlm_ls *ls, struct dlm_rcom *rc); diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c index 7cd24bccd4fe..5c35cc67aca4 100644 --- a/fs/dlm/memory.c +++ b/fs/dlm/memory.c @@ -1,55 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ #include "dlm_internal.h" +#include "midcomms.h" +#include "lowcomms.h" #include "config.h" #include "memory.h" +#include "ast.h" +static struct kmem_cache *writequeue_cache; +static struct kmem_cache *mhandle_cache; +static struct kmem_cache *msg_cache; static struct kmem_cache *lkb_cache; static struct kmem_cache *rsb_cache; +static struct kmem_cache *cb_cache; int __init dlm_memory_init(void) { + writequeue_cache = dlm_lowcomms_writequeue_cache_create(); + if (!writequeue_cache) + goto out; + + mhandle_cache = dlm_midcomms_cache_create(); + if (!mhandle_cache) + goto mhandle; + lkb_cache = kmem_cache_create("dlm_lkb", sizeof(struct dlm_lkb), __alignof__(struct dlm_lkb), 0, NULL); if (!lkb_cache) - return -ENOMEM; + goto lkb; + + msg_cache = dlm_lowcomms_msg_cache_create(); + if (!msg_cache) + goto msg; rsb_cache = kmem_cache_create("dlm_rsb", sizeof(struct dlm_rsb), __alignof__(struct dlm_rsb), 0, NULL); - if (!rsb_cache) { - kmem_cache_destroy(lkb_cache); - return -ENOMEM; - } + if (!rsb_cache) + goto rsb; + + cb_cache = kmem_cache_create("dlm_cb", sizeof(struct dlm_callback), + __alignof__(struct dlm_callback), 0, + NULL); + if (!cb_cache) + goto cb; return 0; + +cb: + kmem_cache_destroy(rsb_cache); +rsb: + kmem_cache_destroy(msg_cache); +msg: + kmem_cache_destroy(lkb_cache); +lkb: + kmem_cache_destroy(mhandle_cache); +mhandle: + kmem_cache_destroy(writequeue_cache); +out: + return -ENOMEM; } void dlm_memory_exit(void) { - if (lkb_cache) - kmem_cache_destroy(lkb_cache); - if (rsb_cache) - kmem_cache_destroy(rsb_cache); + rcu_barrier(); + + kmem_cache_destroy(writequeue_cache); + kmem_cache_destroy(mhandle_cache); + kmem_cache_destroy(msg_cache); + kmem_cache_destroy(lkb_cache); + kmem_cache_destroy(rsb_cache); + kmem_cache_destroy(cb_cache); } char *dlm_allocate_lvb(struct dlm_ls *ls) { - char *p; - - p = kzalloc(ls->ls_lvblen, GFP_NOFS); - return p; + return kzalloc(ls->ls_lvblen, GFP_ATOMIC); } void dlm_free_lvb(char *p) @@ -57,40 +92,86 @@ void dlm_free_lvb(char *p) kfree(p); } -struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls) +struct dlm_rsb *dlm_allocate_rsb(void) { - struct dlm_rsb *r; - - r = kmem_cache_zalloc(rsb_cache, GFP_NOFS); - return r; + return kmem_cache_zalloc(rsb_cache, GFP_ATOMIC); } -void dlm_free_rsb(struct dlm_rsb *r) +static void __free_rsb_rcu(struct rcu_head *rcu) { + struct dlm_rsb *r = container_of(rcu, struct dlm_rsb, rcu); if (r->res_lvbptr) dlm_free_lvb(r->res_lvbptr); kmem_cache_free(rsb_cache, r); } -struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls) +void dlm_free_rsb(struct dlm_rsb *r) { - struct dlm_lkb *lkb; + call_rcu(&r->rcu, __free_rsb_rcu); +} - lkb = kmem_cache_zalloc(lkb_cache, GFP_NOFS); - return lkb; +struct dlm_lkb *dlm_allocate_lkb(void) +{ + return kmem_cache_zalloc(lkb_cache, GFP_ATOMIC); } -void dlm_free_lkb(struct dlm_lkb *lkb) +static void __free_lkb_rcu(struct rcu_head *rcu) { - if (lkb->lkb_flags & DLM_IFL_USER) { + struct dlm_lkb *lkb = container_of(rcu, struct dlm_lkb, rcu); + + if (test_bit(DLM_DFL_USER_BIT, &lkb->lkb_dflags)) { struct dlm_user_args *ua; ua = lkb->lkb_ua; if (ua) { - if (ua->lksb.sb_lvbptr) - kfree(ua->lksb.sb_lvbptr); + kfree(ua->lksb.sb_lvbptr); kfree(ua); } } + kmem_cache_free(lkb_cache, lkb); } +void dlm_free_lkb(struct dlm_lkb *lkb) +{ + call_rcu(&lkb->rcu, __free_lkb_rcu); +} + +struct dlm_mhandle *dlm_allocate_mhandle(void) +{ + return kmem_cache_alloc(mhandle_cache, GFP_ATOMIC); +} + +void dlm_free_mhandle(struct dlm_mhandle *mhandle) +{ + kmem_cache_free(mhandle_cache, mhandle); +} + +struct writequeue_entry *dlm_allocate_writequeue(void) +{ + return kmem_cache_alloc(writequeue_cache, GFP_ATOMIC); +} + +void dlm_free_writequeue(struct writequeue_entry *writequeue) +{ + kmem_cache_free(writequeue_cache, writequeue); +} + +struct dlm_msg *dlm_allocate_msg(void) +{ + return kmem_cache_alloc(msg_cache, GFP_ATOMIC); +} + +void dlm_free_msg(struct dlm_msg *msg) +{ + kmem_cache_free(msg_cache, msg); +} + +struct dlm_callback *dlm_allocate_cb(void) +{ + return kmem_cache_alloc(cb_cache, GFP_ATOMIC); +} + +void dlm_free_cb(struct dlm_callback *cb) +{ + kmem_cache_free(cb_cache, cb); +} diff --git a/fs/dlm/memory.h b/fs/dlm/memory.h index 177c11cbb0a6..551b6b788489 100644 --- a/fs/dlm/memory.h +++ b/fs/dlm/memory.h @@ -1,12 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -16,12 +14,20 @@ int dlm_memory_init(void); void dlm_memory_exit(void); -struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls); +struct dlm_rsb *dlm_allocate_rsb(void); void dlm_free_rsb(struct dlm_rsb *r); -struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls); +struct dlm_lkb *dlm_allocate_lkb(void); void dlm_free_lkb(struct dlm_lkb *l); char *dlm_allocate_lvb(struct dlm_ls *ls); void dlm_free_lvb(char *l); +struct dlm_mhandle *dlm_allocate_mhandle(void); +void dlm_free_mhandle(struct dlm_mhandle *mhandle); +struct writequeue_entry *dlm_allocate_writequeue(void); +void dlm_free_writequeue(struct writequeue_entry *writequeue); +struct dlm_msg *dlm_allocate_msg(void); +void dlm_free_msg(struct dlm_msg *msg); +struct dlm_callback *dlm_allocate_cb(void); +void dlm_free_cb(struct dlm_callback *cb); #endif /* __MEMORY_DOT_H__ */ diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c index f3396c622aec..2c101bbe261a 100644 --- a/fs/dlm/midcomms.c +++ b/fs/dlm/midcomms.c @@ -1,12 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. -** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. +** Copyright (C) 2004-2021 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -14,124 +12,1498 @@ /* * midcomms.c * - * This is the appallingly named "mid-level" comms layer. + * This is the appallingly named "mid-level" comms layer. It takes care about + * deliver an on application layer "reliable" communication above the used + * lowcomms transport layer. * - * Its purpose is to take packets from the "real" comms layer, - * split them up into packets and pass them to the interested - * part of the locking mechanism. + * How it works: * - * It also takes messages from the locking layer, formats them - * into packets and sends them to the comms layer. + * Each nodes keeps track of all send DLM messages in send_queue with a sequence + * number. The receive will send an DLM_ACK message back for every DLM message + * received at the other side. If a reconnect happens in lowcomms we will send + * all unacknowledged dlm messages again. The receiving side might drop any already + * received message by comparing sequence numbers. + * + * How version detection works: + * + * Due the fact that dlm has pre-configured node addresses on every side + * it is in it's nature that every side connects at starts to transmit + * dlm messages which ends in a race. However DLM_RCOM_NAMES, DLM_RCOM_STATUS + * and their replies are the first messages which are exchanges. Due backwards + * compatibility these messages are not covered by the midcomms re-transmission + * layer. These messages have their own re-transmission handling in the dlm + * application layer. The version field of every node will be set on these RCOM + * messages as soon as they arrived and the node isn't yet part of the nodes + * hash. There exists also logic to detect version mismatched if something weird + * going on or the first messages isn't an expected one. + * + * Termination: + * + * The midcomms layer does a 4 way handshake for termination on DLM protocol + * like TCP supports it with half-closed socket support. SCTP doesn't support + * half-closed socket, so we do it on DLM layer. Also socket shutdown() can be + * interrupted by .e.g. tcp reset itself. Additional there exists the othercon + * paradigm in lowcomms which cannot be easily without breaking backwards + * compatibility. A node cannot send anything to another node when a DLM_FIN + * message was send. There exists additional logic to print a warning if + * DLM wants to do it. There exists a state handling like RFC 793 but reduced + * to termination only. The event "member removal event" describes the cluster + * manager removed the node from internal lists, at this point DLM does not + * send any message to the other node. There exists two cases: + * + * 1. The cluster member was removed and we received a FIN + * OR + * 2. We received a FIN but the member was not removed yet + * + * One of these cases will do the CLOSE_WAIT to LAST_ACK change. + * + * + * +---------+ + * | CLOSED | + * +---------+ + * | add member/receive RCOM version + * | detection msg + * V + * +---------+ + * | ESTAB | + * +---------+ + * CLOSE | | rcv FIN + * ------- | | ------- + * +---------+ snd FIN / \ snd ACK +---------+ + * | FIN |<----------------- ------------------>| CLOSE | + * | WAIT-1 |------------------ | WAIT | + * +---------+ rcv FIN \ +---------+ + * | rcv ACK of FIN ------- | CLOSE | member + * | -------------- snd ACK | ------- | removal + * V x V snd FIN V event + * +---------+ +---------+ +---------+ + * |FINWAIT-2| | CLOSING | | LAST-ACK| + * +---------+ +---------+ +---------+ + * | rcv ACK of FIN | rcv ACK of FIN | + * | rcv FIN -------------- | -------------- | + * | ------- x V x V + * \ snd ACK +---------+ +---------+ + * ------------------------>| CLOSED | | CLOSED | + * +---------+ +---------+ + * + * NOTE: any state can interrupted by midcomms_close() and state will be + * switched to CLOSED in case of fencing. There exists also some timeout + * handling when we receive the version detection RCOM messages which is + * made by observation. + * + * Future improvements: + * + * There exists some known issues/improvements of the dlm handling. Some + * of them should be done in a next major dlm version bump which makes + * it incompatible with previous versions. + * + * Unaligned memory access: + * + * There exists cases when the dlm message buffer length is not aligned + * to 8 byte. However seems nobody detected any problem with it. This + * can be fixed in the next major version bump of dlm. + * + * Version detection: + * + * The version detection and how it's done is related to backwards + * compatibility. There exists better ways to make a better handling. + * However this should be changed in the next major version bump of dlm. + * + * Tail Size checking: + * + * There exists a message tail payload in e.g. DLM_MSG however we don't + * check it against the message length yet regarding to the receive buffer + * length. That need to be validated. + * + * Fencing bad nodes: + * + * At timeout places or weird sequence number behaviours we should send + * a fencing request to the cluster manager. */ +/* Debug switch to enable a 5 seconds sleep waiting of a termination. + * This can be useful to test fencing while termination is running. + * This requires a setup with only gfs2 as dlm user, so that the + * last umount will terminate the connection. + * + * However it became useful to test, while the 5 seconds block in umount + * just press the reset button. In a lot of dropping the termination + * process can could take several seconds. + */ +#define DLM_DEBUG_FENCE_TERMINATION 0 + +#include <trace/events/dlm.h> +#include <net/tcp.h> + #include "dlm_internal.h" #include "lowcomms.h" #include "config.h" +#include "memory.h" #include "lock.h" +#include "util.h" #include "midcomms.h" +/* init value for sequence numbers for testing purpose only e.g. overflows */ +#define DLM_SEQ_INIT 0 +/* 5 seconds wait to sync ending of dlm */ +#define DLM_SHUTDOWN_TIMEOUT msecs_to_jiffies(5000) +#define DLM_VERSION_NOT_SET 0 +#define DLM_SEND_ACK_BACK_MSG_THRESHOLD 32 +#define DLM_RECV_ACK_BACK_MSG_THRESHOLD (DLM_SEND_ACK_BACK_MSG_THRESHOLD * 8) + +struct midcomms_node { + int nodeid; + uint32_t version; + atomic_t seq_send; + atomic_t seq_next; + /* These queues are unbound because we cannot drop any message in dlm. + * We could send a fence signal for a specific node to the cluster + * manager if queues hits some maximum value, however this handling + * not supported yet. + */ + struct list_head send_queue; + spinlock_t send_queue_lock; + atomic_t send_queue_cnt; +#define DLM_NODE_FLAG_CLOSE 1 +#define DLM_NODE_FLAG_STOP_TX 2 +#define DLM_NODE_FLAG_STOP_RX 3 + atomic_t ulp_delivered; + unsigned long flags; + wait_queue_head_t shutdown_wait; + + /* dlm tcp termination state */ +#define DLM_CLOSED 1 +#define DLM_ESTABLISHED 2 +#define DLM_FIN_WAIT1 3 +#define DLM_FIN_WAIT2 4 +#define DLM_CLOSE_WAIT 5 +#define DLM_LAST_ACK 6 +#define DLM_CLOSING 7 + int state; + spinlock_t state_lock; -static void copy_from_cb(void *dst, const void *base, unsigned offset, - unsigned len, unsigned limit) + /* counts how many lockspaces are using this node + * this refcount is necessary to determine if the + * node wants to disconnect. + */ + int users; + + /* not protected by srcu, node_hash lifetime */ + void *debugfs; + + struct hlist_node hlist; + struct rcu_head rcu; +}; + +struct dlm_mhandle { + const union dlm_packet *inner_p; + struct midcomms_node *node; + struct dlm_opts *opts; + struct dlm_msg *msg; + bool committed; + uint32_t seq; + + void (*ack_rcv)(struct midcomms_node *node); + + /* get_mhandle/commit srcu idx exchange */ + int idx; + + struct list_head list; + struct rcu_head rcu; +}; + +static struct hlist_head node_hash[CONN_HASH_SIZE]; +static DEFINE_SPINLOCK(nodes_lock); +DEFINE_STATIC_SRCU(nodes_srcu); + +/* This mutex prevents that midcomms_close() is running while + * stop() or remove(). As I experienced invalid memory access + * behaviours when DLM_DEBUG_FENCE_TERMINATION is enabled and + * resetting machines. I will end in some double deletion in nodes + * datastructure. + */ +static DEFINE_MUTEX(close_lock); + +struct kmem_cache *dlm_midcomms_cache_create(void) { - unsigned copy = len; + return KMEM_CACHE(dlm_mhandle, 0); +} - if ((copy + offset) > limit) - copy = limit - offset; - memcpy(dst, base + offset, copy); - len -= copy; - if (len) - memcpy(dst + copy, base, len); +static inline const char *dlm_state_str(int state) +{ + switch (state) { + case DLM_CLOSED: + return "CLOSED"; + case DLM_ESTABLISHED: + return "ESTABLISHED"; + case DLM_FIN_WAIT1: + return "FIN_WAIT1"; + case DLM_FIN_WAIT2: + return "FIN_WAIT2"; + case DLM_CLOSE_WAIT: + return "CLOSE_WAIT"; + case DLM_LAST_ACK: + return "LAST_ACK"; + case DLM_CLOSING: + return "CLOSING"; + default: + return "UNKNOWN"; + } } -/* - * Called from the low-level comms layer to process a buffer of - * commands. - * - * Only complete messages are processed here, any "spare" bytes from - * the end of a buffer are saved and tacked onto the front of the next - * message that comes in. I doubt this will happen very often but we - * need to be able to cope with it and I don't want the task to be waiting - * for packets to come in when there is useful work to be done. - */ +const char *dlm_midcomms_state(struct midcomms_node *node) +{ + return dlm_state_str(node->state); +} -int dlm_process_incoming_buffer(int nodeid, const void *base, - unsigned offset, unsigned len, unsigned limit) +unsigned long dlm_midcomms_flags(struct midcomms_node *node) { - union { - unsigned char __buf[DLM_INBUF_LEN]; - /* this is to force proper alignment on some arches */ - union dlm_packet p; - } __tmp; - union dlm_packet *p = &__tmp.p; - int ret = 0; - int err = 0; - uint16_t msglen; - uint32_t lockspace; + return node->flags; +} + +int dlm_midcomms_send_queue_cnt(struct midcomms_node *node) +{ + return atomic_read(&node->send_queue_cnt); +} - while (len > sizeof(struct dlm_header)) { +uint32_t dlm_midcomms_version(struct midcomms_node *node) +{ + return node->version; +} - /* Copy just the header to check the total length. The - message may wrap around the end of the buffer back to the - start, so we need to use a temp buffer and copy_from_cb. */ +static struct midcomms_node *__find_node(int nodeid, int r) +{ + struct midcomms_node *node; - copy_from_cb(p, base, offset, sizeof(struct dlm_header), - limit); + hlist_for_each_entry_rcu(node, &node_hash[r], hlist) { + if (node->nodeid == nodeid) + return node; + } - msglen = le16_to_cpu(p->header.h_length); - lockspace = p->header.h_lockspace; + return NULL; +} + +static void dlm_mhandle_release(struct rcu_head *rcu) +{ + struct dlm_mhandle *mh = container_of(rcu, struct dlm_mhandle, rcu); + + dlm_lowcomms_put_msg(mh->msg); + dlm_free_mhandle(mh); +} + +static void dlm_mhandle_delete(struct midcomms_node *node, + struct dlm_mhandle *mh) +{ + list_del_rcu(&mh->list); + atomic_dec(&node->send_queue_cnt); + call_rcu(&mh->rcu, dlm_mhandle_release); +} + +static void dlm_send_queue_flush(struct midcomms_node *node) +{ + struct dlm_mhandle *mh; + + pr_debug("flush midcomms send queue of node %d\n", node->nodeid); + + rcu_read_lock(); + spin_lock_bh(&node->send_queue_lock); + list_for_each_entry_rcu(mh, &node->send_queue, list) { + dlm_mhandle_delete(node, mh); + } + spin_unlock_bh(&node->send_queue_lock); + rcu_read_unlock(); +} + +static void midcomms_node_reset(struct midcomms_node *node) +{ + pr_debug("reset node %d\n", node->nodeid); + + atomic_set(&node->seq_next, DLM_SEQ_INIT); + atomic_set(&node->seq_send, DLM_SEQ_INIT); + atomic_set(&node->ulp_delivered, 0); + node->version = DLM_VERSION_NOT_SET; + node->flags = 0; + + dlm_send_queue_flush(node); + node->state = DLM_CLOSED; + wake_up(&node->shutdown_wait); +} - err = -EINVAL; - if (msglen < sizeof(struct dlm_header)) +static struct midcomms_node *nodeid2node(int nodeid) +{ + return __find_node(nodeid, nodeid_hash(nodeid)); +} + +int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr) +{ + int ret, idx, r = nodeid_hash(nodeid); + struct midcomms_node *node; + + ret = dlm_lowcomms_addr(nodeid, addr); + if (ret) + return ret; + + idx = srcu_read_lock(&nodes_srcu); + node = __find_node(nodeid, r); + if (node) { + srcu_read_unlock(&nodes_srcu, idx); + return 0; + } + srcu_read_unlock(&nodes_srcu, idx); + + node = kmalloc(sizeof(*node), GFP_NOFS); + if (!node) + return -ENOMEM; + + node->nodeid = nodeid; + spin_lock_init(&node->state_lock); + spin_lock_init(&node->send_queue_lock); + atomic_set(&node->send_queue_cnt, 0); + INIT_LIST_HEAD(&node->send_queue); + init_waitqueue_head(&node->shutdown_wait); + node->users = 0; + midcomms_node_reset(node); + + spin_lock_bh(&nodes_lock); + hlist_add_head_rcu(&node->hlist, &node_hash[r]); + spin_unlock_bh(&nodes_lock); + + node->debugfs = dlm_create_debug_comms_file(nodeid, node); + return 0; +} + +static int dlm_send_ack(int nodeid, uint32_t seq) +{ + int mb_len = sizeof(struct dlm_header); + struct dlm_header *m_header; + struct dlm_msg *msg; + char *ppc; + + msg = dlm_lowcomms_new_msg(nodeid, mb_len, &ppc, NULL, NULL); + if (!msg) + return -ENOMEM; + + m_header = (struct dlm_header *)ppc; + + m_header->h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR); + m_header->h_nodeid = cpu_to_le32(dlm_our_nodeid()); + m_header->h_length = cpu_to_le16(mb_len); + m_header->h_cmd = DLM_ACK; + m_header->u.h_seq = cpu_to_le32(seq); + + dlm_lowcomms_commit_msg(msg); + dlm_lowcomms_put_msg(msg); + + return 0; +} + +static void dlm_send_ack_threshold(struct midcomms_node *node, + uint32_t threshold) +{ + uint32_t oval, nval; + bool send_ack; + + /* let only send one user trigger threshold to send ack back */ + do { + oval = atomic_read(&node->ulp_delivered); + send_ack = (oval > threshold); + /* abort if threshold is not reached */ + if (!send_ack) break; - if (p->header.h_cmd == DLM_MSG) { - if (msglen < sizeof(struct dlm_message)) - break; + + nval = 0; + /* try to reset ulp_delivered counter */ + } while (atomic_cmpxchg(&node->ulp_delivered, oval, nval) != oval); + + if (send_ack) + dlm_send_ack(node->nodeid, atomic_read(&node->seq_next)); +} + +static int dlm_send_fin(struct midcomms_node *node, + void (*ack_rcv)(struct midcomms_node *node)) +{ + int mb_len = sizeof(struct dlm_header); + struct dlm_header *m_header; + struct dlm_mhandle *mh; + char *ppc; + + mh = dlm_midcomms_get_mhandle(node->nodeid, mb_len, &ppc); + if (!mh) + return -ENOMEM; + + set_bit(DLM_NODE_FLAG_STOP_TX, &node->flags); + mh->ack_rcv = ack_rcv; + + m_header = (struct dlm_header *)ppc; + + m_header->h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR); + m_header->h_nodeid = cpu_to_le32(dlm_our_nodeid()); + m_header->h_length = cpu_to_le16(mb_len); + m_header->h_cmd = DLM_FIN; + + pr_debug("sending fin msg to node %d\n", node->nodeid); + dlm_midcomms_commit_mhandle(mh, NULL, 0); + + return 0; +} + +static void dlm_receive_ack(struct midcomms_node *node, uint32_t seq) +{ + struct dlm_mhandle *mh; + + rcu_read_lock(); + list_for_each_entry_rcu(mh, &node->send_queue, list) { + if (before(mh->seq, seq)) { + if (mh->ack_rcv) + mh->ack_rcv(node); + } else { + /* send queue should be ordered */ + break; + } + } + + spin_lock_bh(&node->send_queue_lock); + list_for_each_entry_rcu(mh, &node->send_queue, list) { + if (before(mh->seq, seq)) { + dlm_mhandle_delete(node, mh); } else { - if (msglen < sizeof(struct dlm_rcom)) + /* send queue should be ordered */ + break; + } + } + spin_unlock_bh(&node->send_queue_lock); + rcu_read_unlock(); +} + +static void dlm_pas_fin_ack_rcv(struct midcomms_node *node) +{ + spin_lock_bh(&node->state_lock); + pr_debug("receive passive fin ack from node %d with state %s\n", + node->nodeid, dlm_state_str(node->state)); + + switch (node->state) { + case DLM_LAST_ACK: + /* DLM_CLOSED */ + midcomms_node_reset(node); + break; + case DLM_CLOSED: + /* not valid but somehow we got what we want */ + wake_up(&node->shutdown_wait); + break; + default: + spin_unlock_bh(&node->state_lock); + log_print("%s: unexpected state: %d", + __func__, node->state); + WARN_ON_ONCE(1); + return; + } + spin_unlock_bh(&node->state_lock); +} + +static void dlm_receive_buffer_3_2_trace(uint32_t seq, + const union dlm_packet *p) +{ + switch (p->header.h_cmd) { + case DLM_MSG: + trace_dlm_recv_message(dlm_our_nodeid(), seq, &p->message); + break; + case DLM_RCOM: + trace_dlm_recv_rcom(dlm_our_nodeid(), seq, &p->rcom); + break; + default: + break; + } +} + +static void dlm_midcomms_receive_buffer(const union dlm_packet *p, + struct midcomms_node *node, + uint32_t seq) +{ + bool is_expected_seq; + uint32_t oval, nval; + + do { + oval = atomic_read(&node->seq_next); + is_expected_seq = (oval == seq); + if (!is_expected_seq) + break; + + nval = oval + 1; + } while (atomic_cmpxchg(&node->seq_next, oval, nval) != oval); + + if (is_expected_seq) { + switch (p->header.h_cmd) { + case DLM_FIN: + spin_lock_bh(&node->state_lock); + pr_debug("receive fin msg from node %d with state %s\n", + node->nodeid, dlm_state_str(node->state)); + + switch (node->state) { + case DLM_ESTABLISHED: + dlm_send_ack(node->nodeid, nval); + + /* passive shutdown DLM_LAST_ACK case 1 + * additional we check if the node is used by + * cluster manager events at all. + */ + if (node->users == 0) { + node->state = DLM_LAST_ACK; + pr_debug("switch node %d to state %s case 1\n", + node->nodeid, dlm_state_str(node->state)); + set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags); + dlm_send_fin(node, dlm_pas_fin_ack_rcv); + } else { + node->state = DLM_CLOSE_WAIT; + pr_debug("switch node %d to state %s\n", + node->nodeid, dlm_state_str(node->state)); + } + break; + case DLM_FIN_WAIT1: + dlm_send_ack(node->nodeid, nval); + node->state = DLM_CLOSING; + set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags); + pr_debug("switch node %d to state %s\n", + node->nodeid, dlm_state_str(node->state)); break; + case DLM_FIN_WAIT2: + dlm_send_ack(node->nodeid, nval); + midcomms_node_reset(node); + pr_debug("switch node %d to state %s\n", + node->nodeid, dlm_state_str(node->state)); + break; + case DLM_LAST_ACK: + /* probably remove_member caught it, do nothing */ + break; + default: + spin_unlock_bh(&node->state_lock); + log_print("%s: unexpected state: %d", + __func__, node->state); + WARN_ON_ONCE(1); + return; + } + spin_unlock_bh(&node->state_lock); + break; + default: + WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags)); + dlm_receive_buffer_3_2_trace(seq, p); + dlm_receive_buffer(p, node->nodeid); + atomic_inc(&node->ulp_delivered); + /* unlikely case to send ack back when we don't transmit */ + dlm_send_ack_threshold(node, DLM_RECV_ACK_BACK_MSG_THRESHOLD); + break; + } + } else { + /* retry to ack message which we already have by sending back + * current node->seq_next number as ack. + */ + if (seq < oval) + dlm_send_ack(node->nodeid, oval); + + log_print_ratelimited("ignore dlm msg because seq mismatch, seq: %u, expected: %u, nodeid: %d", + seq, oval, node->nodeid); + } +} + +static int dlm_opts_check_msglen(const union dlm_packet *p, uint16_t msglen, + int nodeid) +{ + int len = msglen; + + /* we only trust outer header msglen because + * it's checked against receive buffer length. + */ + if (len < sizeof(struct dlm_opts)) + return -1; + len -= sizeof(struct dlm_opts); + + if (len < le16_to_cpu(p->opts.o_optlen)) + return -1; + len -= le16_to_cpu(p->opts.o_optlen); + + switch (p->opts.o_nextcmd) { + case DLM_FIN: + if (len < sizeof(struct dlm_header)) { + log_print("fin too small: %d, will skip this message from node %d", + len, nodeid); + return -1; } - err = -E2BIG; - if (msglen > dlm_config.ci_buffer_size) { - log_print("message size %d from %d too big, buf len %d", - msglen, nodeid, len); + + break; + case DLM_MSG: + if (len < sizeof(struct dlm_message)) { + log_print("msg too small: %d, will skip this message from node %d", + msglen, nodeid); + return -1; + } + + break; + case DLM_RCOM: + if (len < sizeof(struct dlm_rcom)) { + log_print("rcom msg too small: %d, will skip this message from node %d", + len, nodeid); + return -1; + } + + break; + default: + log_print("unsupported o_nextcmd received: %u, will skip this message from node %d", + p->opts.o_nextcmd, nodeid); + return -1; + } + + return 0; +} + +static void dlm_midcomms_receive_buffer_3_2(const union dlm_packet *p, int nodeid) +{ + uint16_t msglen = le16_to_cpu(p->header.h_length); + struct midcomms_node *node; + uint32_t seq; + int ret, idx; + + idx = srcu_read_lock(&nodes_srcu); + node = nodeid2node(nodeid); + if (WARN_ON_ONCE(!node)) + goto out; + + switch (node->version) { + case DLM_VERSION_NOT_SET: + node->version = DLM_VERSION_3_2; + wake_up(&node->shutdown_wait); + log_print("version 0x%08x for node %d detected", DLM_VERSION_3_2, + node->nodeid); + + spin_lock(&node->state_lock); + switch (node->state) { + case DLM_CLOSED: + node->state = DLM_ESTABLISHED; + pr_debug("switch node %d to state %s\n", + node->nodeid, dlm_state_str(node->state)); + break; + default: + break; + } + spin_unlock(&node->state_lock); + + break; + case DLM_VERSION_3_2: + break; + default: + log_print_ratelimited("version mismatch detected, assumed 0x%08x but node %d has 0x%08x", + DLM_VERSION_3_2, node->nodeid, node->version); + goto out; + } + + switch (p->header.h_cmd) { + case DLM_RCOM: + /* these rcom message we use to determine version. + * they have their own retransmission handling and + * are the first messages of dlm. + * + * length already checked. + */ + switch (p->rcom.rc_type) { + case cpu_to_le32(DLM_RCOM_NAMES): + fallthrough; + case cpu_to_le32(DLM_RCOM_NAMES_REPLY): + fallthrough; + case cpu_to_le32(DLM_RCOM_STATUS): + fallthrough; + case cpu_to_le32(DLM_RCOM_STATUS_REPLY): break; + default: + log_print("unsupported rcom type received: %u, will skip this message from node %d", + le32_to_cpu(p->rcom.rc_type), nodeid); + goto out; } - err = 0; - /* If only part of the full message is contained in this - buffer, then do nothing and wait for lowcomms to call - us again later with more data. We return 0 meaning - we've consumed none of the input buffer. */ + WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_RX, &node->flags)); + dlm_receive_buffer(p, nodeid); + break; + case DLM_OPTS: + seq = le32_to_cpu(p->header.u.h_seq); + + ret = dlm_opts_check_msglen(p, msglen, nodeid); + if (ret < 0) { + log_print("opts msg too small: %u, will skip this message from node %d", + msglen, nodeid); + goto out; + } + + p = (union dlm_packet *)((unsigned char *)p->opts.o_opts + + le16_to_cpu(p->opts.o_optlen)); + + /* recheck inner msglen just if it's not garbage */ + msglen = le16_to_cpu(p->header.h_length); + switch (p->header.h_cmd) { + case DLM_RCOM: + if (msglen < sizeof(struct dlm_rcom)) { + log_print("inner rcom msg too small: %u, will skip this message from node %d", + msglen, nodeid); + goto out; + } - if (msglen > len) break; + case DLM_MSG: + if (msglen < sizeof(struct dlm_message)) { + log_print("inner msg too small: %u, will skip this message from node %d", + msglen, nodeid); + goto out; + } - /* Allocate a larger temp buffer if the full message won't fit - in the buffer on the stack (which should work for most - ordinary messages). */ + break; + case DLM_FIN: + if (msglen < sizeof(struct dlm_header)) { + log_print("inner fin too small: %u, will skip this message from node %d", + msglen, nodeid); + goto out; + } - if (msglen > sizeof(__tmp) && p == &__tmp.p) { - p = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS); - if (p == NULL) - return ret; + break; + default: + log_print("unsupported inner h_cmd received: %u, will skip this message from node %d", + msglen, nodeid); + goto out; } - copy_from_cb(p, base, offset, msglen, limit); + dlm_midcomms_receive_buffer(p, node, seq); + break; + case DLM_ACK: + seq = le32_to_cpu(p->header.u.h_seq); + dlm_receive_ack(node, seq); + break; + default: + log_print("unsupported h_cmd received: %u, will skip this message from node %d", + p->header.h_cmd, nodeid); + break; + } + +out: + srcu_read_unlock(&nodes_srcu, idx); +} - BUG_ON(lockspace != p->header.h_lockspace); +static void dlm_midcomms_receive_buffer_3_1(const union dlm_packet *p, int nodeid) +{ + uint16_t msglen = le16_to_cpu(p->header.h_length); + struct midcomms_node *node; + int idx; + + idx = srcu_read_lock(&nodes_srcu); + node = nodeid2node(nodeid); + if (WARN_ON_ONCE(!node)) { + srcu_read_unlock(&nodes_srcu, idx); + return; + } + + switch (node->version) { + case DLM_VERSION_NOT_SET: + node->version = DLM_VERSION_3_1; + wake_up(&node->shutdown_wait); + log_print("version 0x%08x for node %d detected", DLM_VERSION_3_1, + node->nodeid); + break; + case DLM_VERSION_3_1: + break; + default: + log_print_ratelimited("version mismatch detected, assumed 0x%08x but node %d has 0x%08x", + DLM_VERSION_3_1, node->nodeid, node->version); + srcu_read_unlock(&nodes_srcu, idx); + return; + } + srcu_read_unlock(&nodes_srcu, idx); + + switch (p->header.h_cmd) { + case DLM_RCOM: + /* length already checked */ + break; + case DLM_MSG: + if (msglen < sizeof(struct dlm_message)) { + log_print("msg too small: %u, will skip this message from node %d", + msglen, nodeid); + return; + } + + break; + default: + log_print("unsupported h_cmd received: %u, will skip this message from node %d", + p->header.h_cmd, nodeid); + return; + } + + dlm_receive_buffer(p, nodeid); +} + +int dlm_validate_incoming_buffer(int nodeid, unsigned char *buf, int len) +{ + const unsigned char *ptr = buf; + const struct dlm_header *hd; + uint16_t msglen; + int ret = 0; + + while (len >= sizeof(struct dlm_header)) { + hd = (struct dlm_header *)ptr; + + /* no message should be more than DLM_MAX_SOCKET_BUFSIZE or + * less than dlm_header size. + * + * Some messages does not have a 8 byte length boundary yet + * which can occur in a unaligned memory access of some dlm + * messages. However this problem need to be fixed at the + * sending side, for now it seems nobody run into architecture + * related issues yet but it slows down some processing. + * Fixing this issue should be scheduled in future by doing + * the next major version bump. + */ + msglen = le16_to_cpu(hd->h_length); + if (msglen > DLM_MAX_SOCKET_BUFSIZE || + msglen < sizeof(struct dlm_header)) { + log_print("received invalid length header: %u from node %d, will abort message parsing", + msglen, nodeid); + return -EBADMSG; + } + + /* caller will take care that leftover + * will be parsed next call with more data + */ + if (msglen > len) + break; ret += msglen; - offset += msglen; - offset &= (limit - 1); len -= msglen; + ptr += msglen; + } - dlm_receive_buffer(p, nodeid); + return ret; +} + +/* + * Called from the low-level comms layer to process a buffer of + * commands. + */ +int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int len) +{ + const unsigned char *ptr = buf; + const struct dlm_header *hd; + uint16_t msglen; + int ret = 0; + + while (len >= sizeof(struct dlm_header)) { + hd = (struct dlm_header *)ptr; + + msglen = le16_to_cpu(hd->h_length); + if (msglen > len) + break; + + switch (hd->h_version) { + case cpu_to_le32(DLM_VERSION_3_1): + dlm_midcomms_receive_buffer_3_1((const union dlm_packet *)ptr, nodeid); + break; + case cpu_to_le32(DLM_VERSION_3_2): + dlm_midcomms_receive_buffer_3_2((const union dlm_packet *)ptr, nodeid); + break; + default: + log_print("received invalid version header: %u from node %d, will skip this message", + le32_to_cpu(hd->h_version), nodeid); + break; + } + + ret += msglen; + len -= msglen; + ptr += msglen; + } + + return ret; +} + +void dlm_midcomms_unack_msg_resend(int nodeid) +{ + struct midcomms_node *node; + struct dlm_mhandle *mh; + int idx, ret; + + idx = srcu_read_lock(&nodes_srcu); + node = nodeid2node(nodeid); + if (WARN_ON_ONCE(!node)) { + srcu_read_unlock(&nodes_srcu, idx); + return; + } + + /* old protocol, we don't support to retransmit on failure */ + switch (node->version) { + case DLM_VERSION_3_2: + break; + default: + srcu_read_unlock(&nodes_srcu, idx); + return; + } + + rcu_read_lock(); + list_for_each_entry_rcu(mh, &node->send_queue, list) { + if (!mh->committed) + continue; + + ret = dlm_lowcomms_resend_msg(mh->msg); + if (!ret) + log_print_ratelimited("retransmit dlm msg, seq %u, nodeid %d", + mh->seq, node->nodeid); + } + rcu_read_unlock(); + srcu_read_unlock(&nodes_srcu, idx); +} + +static void dlm_fill_opts_header(struct dlm_opts *opts, uint16_t inner_len, + uint32_t seq) +{ + opts->o_header.h_cmd = DLM_OPTS; + opts->o_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR); + opts->o_header.h_nodeid = cpu_to_le32(dlm_our_nodeid()); + opts->o_header.h_length = cpu_to_le16(DLM_MIDCOMMS_OPT_LEN + inner_len); + opts->o_header.u.h_seq = cpu_to_le32(seq); +} + +static void midcomms_new_msg_cb(void *data) +{ + struct dlm_mhandle *mh = data; + + atomic_inc(&mh->node->send_queue_cnt); + + spin_lock_bh(&mh->node->send_queue_lock); + list_add_tail_rcu(&mh->list, &mh->node->send_queue); + spin_unlock_bh(&mh->node->send_queue_lock); + + mh->seq = atomic_fetch_inc(&mh->node->seq_send); +} + +static struct dlm_msg *dlm_midcomms_get_msg_3_2(struct dlm_mhandle *mh, int nodeid, + int len, char **ppc) +{ + struct dlm_opts *opts; + struct dlm_msg *msg; + + msg = dlm_lowcomms_new_msg(nodeid, len + DLM_MIDCOMMS_OPT_LEN, + ppc, midcomms_new_msg_cb, mh); + if (!msg) + return NULL; + + opts = (struct dlm_opts *)*ppc; + mh->opts = opts; + + /* add possible options here */ + dlm_fill_opts_header(opts, len, mh->seq); + + *ppc += sizeof(*opts); + mh->inner_p = (const union dlm_packet *)*ppc; + return msg; +} + +/* avoid false positive for nodes_srcu, unlock happens in + * dlm_midcomms_commit_mhandle which is a must call if success + */ +#ifndef __CHECKER__ +struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len, char **ppc) +{ + struct midcomms_node *node; + struct dlm_mhandle *mh; + struct dlm_msg *msg; + int idx; + + idx = srcu_read_lock(&nodes_srcu); + node = nodeid2node(nodeid); + if (WARN_ON_ONCE(!node)) + goto err; + + /* this is a bug, however we going on and hope it will be resolved */ + WARN_ON_ONCE(test_bit(DLM_NODE_FLAG_STOP_TX, &node->flags)); + + mh = dlm_allocate_mhandle(); + if (!mh) + goto err; + + mh->committed = false; + mh->ack_rcv = NULL; + mh->idx = idx; + mh->node = node; + + switch (node->version) { + case DLM_VERSION_3_1: + msg = dlm_lowcomms_new_msg(nodeid, len, ppc, NULL, NULL); + if (!msg) { + dlm_free_mhandle(mh); + goto err; + } + + break; + case DLM_VERSION_3_2: + /* send ack back if necessary */ + dlm_send_ack_threshold(node, DLM_SEND_ACK_BACK_MSG_THRESHOLD); + + msg = dlm_midcomms_get_msg_3_2(mh, nodeid, len, ppc); + if (!msg) { + dlm_free_mhandle(mh); + goto err; + } + break; + default: + dlm_free_mhandle(mh); + WARN_ON_ONCE(1); + goto err; + } + + mh->msg = msg; + + /* keep in mind that is a must to call + * dlm_midcomms_commit_msg() which releases + * nodes_srcu using mh->idx which is assumed + * here that the application will call it. + */ + return mh; + +err: + srcu_read_unlock(&nodes_srcu, idx); + return NULL; +} +#endif + +static void dlm_midcomms_commit_msg_3_2_trace(const struct dlm_mhandle *mh, + const void *name, int namelen) +{ + switch (mh->inner_p->header.h_cmd) { + case DLM_MSG: + trace_dlm_send_message(mh->node->nodeid, mh->seq, + &mh->inner_p->message, + name, namelen); + break; + case DLM_RCOM: + trace_dlm_send_rcom(mh->node->nodeid, mh->seq, + &mh->inner_p->rcom); + break; + default: + /* nothing to trace */ + break; } +} + +static void dlm_midcomms_commit_msg_3_2(struct dlm_mhandle *mh, + const void *name, int namelen) +{ + /* nexthdr chain for fast lookup */ + mh->opts->o_nextcmd = mh->inner_p->header.h_cmd; + mh->committed = true; + dlm_midcomms_commit_msg_3_2_trace(mh, name, namelen); + dlm_lowcomms_commit_msg(mh->msg); +} + +/* avoid false positive for nodes_srcu, lock was happen in + * dlm_midcomms_get_mhandle + */ +#ifndef __CHECKER__ +void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh, + const void *name, int namelen) +{ + + switch (mh->node->version) { + case DLM_VERSION_3_1: + srcu_read_unlock(&nodes_srcu, mh->idx); + + dlm_lowcomms_commit_msg(mh->msg); + dlm_lowcomms_put_msg(mh->msg); + /* mh is not part of rcu list in this case */ + dlm_free_mhandle(mh); + break; + case DLM_VERSION_3_2: + /* held rcu read lock here, because we sending the + * dlm message out, when we do that we could receive + * an ack back which releases the mhandle and we + * get a use after free. + */ + rcu_read_lock(); + dlm_midcomms_commit_msg_3_2(mh, name, namelen); + srcu_read_unlock(&nodes_srcu, mh->idx); + rcu_read_unlock(); + break; + default: + srcu_read_unlock(&nodes_srcu, mh->idx); + WARN_ON_ONCE(1); + break; + } +} +#endif + +int dlm_midcomms_start(void) +{ + return dlm_lowcomms_start(); +} + +void dlm_midcomms_stop(void) +{ + dlm_lowcomms_stop(); +} + +void dlm_midcomms_init(void) +{ + int i; + + for (i = 0; i < CONN_HASH_SIZE; i++) + INIT_HLIST_HEAD(&node_hash[i]); + + dlm_lowcomms_init(); +} + +static void midcomms_node_release(struct rcu_head *rcu) +{ + struct midcomms_node *node = container_of(rcu, struct midcomms_node, rcu); + + WARN_ON_ONCE(atomic_read(&node->send_queue_cnt)); + dlm_send_queue_flush(node); + kfree(node); +} + +void dlm_midcomms_exit(void) +{ + struct midcomms_node *node; + int i, idx; + + idx = srcu_read_lock(&nodes_srcu); + for (i = 0; i < CONN_HASH_SIZE; i++) { + hlist_for_each_entry_rcu(node, &node_hash[i], hlist) { + dlm_delete_debug_comms_file(node->debugfs); + + spin_lock(&nodes_lock); + hlist_del_rcu(&node->hlist); + spin_unlock(&nodes_lock); + + call_srcu(&nodes_srcu, &node->rcu, midcomms_node_release); + } + } + srcu_read_unlock(&nodes_srcu, idx); + + dlm_lowcomms_exit(); +} + +static void dlm_act_fin_ack_rcv(struct midcomms_node *node) +{ + spin_lock_bh(&node->state_lock); + pr_debug("receive active fin ack from node %d with state %s\n", + node->nodeid, dlm_state_str(node->state)); + + switch (node->state) { + case DLM_FIN_WAIT1: + node->state = DLM_FIN_WAIT2; + pr_debug("switch node %d to state %s\n", + node->nodeid, dlm_state_str(node->state)); + break; + case DLM_CLOSING: + midcomms_node_reset(node); + pr_debug("switch node %d to state %s\n", + node->nodeid, dlm_state_str(node->state)); + break; + case DLM_CLOSED: + /* not valid but somehow we got what we want */ + wake_up(&node->shutdown_wait); + break; + default: + spin_unlock_bh(&node->state_lock); + log_print("%s: unexpected state: %d", + __func__, node->state); + WARN_ON_ONCE(1); + return; + } + spin_unlock_bh(&node->state_lock); +} + +void dlm_midcomms_add_member(int nodeid) +{ + struct midcomms_node *node; + int idx; + + idx = srcu_read_lock(&nodes_srcu); + node = nodeid2node(nodeid); + if (WARN_ON_ONCE(!node)) { + srcu_read_unlock(&nodes_srcu, idx); + return; + } + + spin_lock_bh(&node->state_lock); + if (!node->users) { + pr_debug("receive add member from node %d with state %s\n", + node->nodeid, dlm_state_str(node->state)); + switch (node->state) { + case DLM_ESTABLISHED: + break; + case DLM_CLOSED: + node->state = DLM_ESTABLISHED; + pr_debug("switch node %d to state %s\n", + node->nodeid, dlm_state_str(node->state)); + break; + default: + /* some invalid state passive shutdown + * was failed, we try to reset and + * hope it will go on. + */ + log_print("reset node %d because shutdown stuck", + node->nodeid); + + midcomms_node_reset(node); + node->state = DLM_ESTABLISHED; + break; + } + } + + node->users++; + pr_debug("node %d users inc count %d\n", nodeid, node->users); + spin_unlock_bh(&node->state_lock); + + srcu_read_unlock(&nodes_srcu, idx); +} + +void dlm_midcomms_remove_member(int nodeid) +{ + struct midcomms_node *node; + int idx; + + idx = srcu_read_lock(&nodes_srcu); + node = nodeid2node(nodeid); + /* in case of dlm_midcomms_close() removes node */ + if (!node) { + srcu_read_unlock(&nodes_srcu, idx); + return; + } + + spin_lock_bh(&node->state_lock); + /* case of dlm_midcomms_addr() created node but + * was not added before because dlm_midcomms_close() + * removed the node + */ + if (!node->users) { + spin_unlock_bh(&node->state_lock); + srcu_read_unlock(&nodes_srcu, idx); + return; + } + + node->users--; + pr_debug("node %d users dec count %d\n", nodeid, node->users); + + /* hitting users count to zero means the + * other side is running dlm_midcomms_stop() + * we meet us to have a clean disconnect. + */ + if (node->users == 0) { + pr_debug("receive remove member from node %d with state %s\n", + node->nodeid, dlm_state_str(node->state)); + switch (node->state) { + case DLM_ESTABLISHED: + break; + case DLM_CLOSE_WAIT: + /* passive shutdown DLM_LAST_ACK case 2 */ + node->state = DLM_LAST_ACK; + pr_debug("switch node %d to state %s case 2\n", + node->nodeid, dlm_state_str(node->state)); + set_bit(DLM_NODE_FLAG_STOP_RX, &node->flags); + dlm_send_fin(node, dlm_pas_fin_ack_rcv); + break; + case DLM_LAST_ACK: + /* probably receive fin caught it, do nothing */ + break; + case DLM_CLOSED: + /* already gone, do nothing */ + break; + default: + log_print("%s: unexpected state: %d", + __func__, node->state); + break; + } + } + spin_unlock_bh(&node->state_lock); + + srcu_read_unlock(&nodes_srcu, idx); +} + +void dlm_midcomms_version_wait(void) +{ + struct midcomms_node *node; + int i, idx, ret; + + idx = srcu_read_lock(&nodes_srcu); + for (i = 0; i < CONN_HASH_SIZE; i++) { + hlist_for_each_entry_rcu(node, &node_hash[i], hlist) { + ret = wait_event_timeout(node->shutdown_wait, + node->version != DLM_VERSION_NOT_SET || + node->state == DLM_CLOSED || + test_bit(DLM_NODE_FLAG_CLOSE, &node->flags), + DLM_SHUTDOWN_TIMEOUT); + if (!ret || test_bit(DLM_NODE_FLAG_CLOSE, &node->flags)) + pr_debug("version wait timed out for node %d with state %s\n", + node->nodeid, dlm_state_str(node->state)); + } + } + srcu_read_unlock(&nodes_srcu, idx); +} + +static void midcomms_shutdown(struct midcomms_node *node) +{ + int ret; + + /* old protocol, we don't wait for pending operations */ + switch (node->version) { + case DLM_VERSION_3_2: + break; + default: + return; + } + + spin_lock_bh(&node->state_lock); + pr_debug("receive active shutdown for node %d with state %s\n", + node->nodeid, dlm_state_str(node->state)); + switch (node->state) { + case DLM_ESTABLISHED: + node->state = DLM_FIN_WAIT1; + pr_debug("switch node %d to state %s case 2\n", + node->nodeid, dlm_state_str(node->state)); + dlm_send_fin(node, dlm_act_fin_ack_rcv); + break; + case DLM_CLOSED: + /* we have what we want */ + break; + default: + /* busy to enter DLM_FIN_WAIT1, wait until passive + * done in shutdown_wait to enter DLM_CLOSED. + */ + break; + } + spin_unlock_bh(&node->state_lock); + + if (DLM_DEBUG_FENCE_TERMINATION) + msleep(5000); + + /* wait for other side dlm + fin */ + ret = wait_event_timeout(node->shutdown_wait, + node->state == DLM_CLOSED || + test_bit(DLM_NODE_FLAG_CLOSE, &node->flags), + DLM_SHUTDOWN_TIMEOUT); + if (!ret) + pr_debug("active shutdown timed out for node %d with state %s\n", + node->nodeid, dlm_state_str(node->state)); + else + pr_debug("active shutdown done for node %d with state %s\n", + node->nodeid, dlm_state_str(node->state)); +} + +void dlm_midcomms_shutdown(void) +{ + struct midcomms_node *node; + int i, idx; + + mutex_lock(&close_lock); + idx = srcu_read_lock(&nodes_srcu); + for (i = 0; i < CONN_HASH_SIZE; i++) { + hlist_for_each_entry_rcu(node, &node_hash[i], hlist) { + midcomms_shutdown(node); + } + } + + dlm_lowcomms_shutdown(); + + for (i = 0; i < CONN_HASH_SIZE; i++) { + hlist_for_each_entry_rcu(node, &node_hash[i], hlist) { + midcomms_node_reset(node); + } + } + srcu_read_unlock(&nodes_srcu, idx); + mutex_unlock(&close_lock); +} + +int dlm_midcomms_close(int nodeid) +{ + struct midcomms_node *node; + int idx, ret; + + idx = srcu_read_lock(&nodes_srcu); + /* Abort pending close/remove operation */ + node = nodeid2node(nodeid); + if (node) { + /* let shutdown waiters leave */ + set_bit(DLM_NODE_FLAG_CLOSE, &node->flags); + wake_up(&node->shutdown_wait); + } + srcu_read_unlock(&nodes_srcu, idx); + + synchronize_srcu(&nodes_srcu); + + mutex_lock(&close_lock); + idx = srcu_read_lock(&nodes_srcu); + node = nodeid2node(nodeid); + if (!node) { + srcu_read_unlock(&nodes_srcu, idx); + mutex_unlock(&close_lock); + return dlm_lowcomms_close(nodeid); + } + + ret = dlm_lowcomms_close(nodeid); + dlm_delete_debug_comms_file(node->debugfs); + + spin_lock_bh(&nodes_lock); + hlist_del_rcu(&node->hlist); + spin_unlock_bh(&nodes_lock); + srcu_read_unlock(&nodes_srcu, idx); + + /* wait that all readers left until flush send queue */ + synchronize_srcu(&nodes_srcu); + + /* drop all pending dlm messages, this is fine as + * this function get called when the node is fenced + */ + dlm_send_queue_flush(node); + + call_srcu(&nodes_srcu, &node->rcu, midcomms_node_release); + mutex_unlock(&close_lock); + + return ret; +} + +/* debug functionality to send raw dlm msg from user space */ +struct dlm_rawmsg_data { + struct midcomms_node *node; + void *buf; +}; + +static void midcomms_new_rawmsg_cb(void *data) +{ + struct dlm_rawmsg_data *rd = data; + struct dlm_header *h = rd->buf; + + switch (h->h_version) { + case cpu_to_le32(DLM_VERSION_3_1): + break; + default: + switch (h->h_cmd) { + case DLM_OPTS: + if (!h->u.h_seq) + h->u.h_seq = cpu_to_le32(atomic_fetch_inc(&rd->node->seq_send)); + break; + default: + break; + } + break; + } +} + +int dlm_midcomms_rawmsg_send(struct midcomms_node *node, void *buf, + int buflen) +{ + struct dlm_rawmsg_data rd; + struct dlm_msg *msg; + char *msgbuf; + + rd.node = node; + rd.buf = buf; - if (p != &__tmp.p) - kfree(p); + msg = dlm_lowcomms_new_msg(node->nodeid, buflen, &msgbuf, + midcomms_new_rawmsg_cb, &rd); + if (!msg) + return -ENOMEM; - return err ? err : ret; + memcpy(msgbuf, buf, buflen); + dlm_lowcomms_commit_msg(msg); + return 0; } diff --git a/fs/dlm/midcomms.h b/fs/dlm/midcomms.h index 95852a5f111d..7fad1d170bba 100644 --- a/fs/dlm/midcomms.h +++ b/fs/dlm/midcomms.h @@ -1,12 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -14,8 +12,31 @@ #ifndef __MIDCOMMS_DOT_H__ #define __MIDCOMMS_DOT_H__ -int dlm_process_incoming_buffer(int nodeid, const void *base, unsigned offset, - unsigned len, unsigned limit); +struct midcomms_node; + +int dlm_validate_incoming_buffer(int nodeid, unsigned char *buf, int len); +int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int buflen); +struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len, char **ppc); +void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh, const void *name, + int namelen); +int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr); +void dlm_midcomms_version_wait(void); +int dlm_midcomms_close(int nodeid); +int dlm_midcomms_start(void); +void dlm_midcomms_stop(void); +void dlm_midcomms_init(void); +void dlm_midcomms_exit(void); +void dlm_midcomms_shutdown(void); +void dlm_midcomms_add_member(int nodeid); +void dlm_midcomms_remove_member(int nodeid); +void dlm_midcomms_unack_msg_resend(int nodeid); +const char *dlm_midcomms_state(struct midcomms_node *node); +unsigned long dlm_midcomms_flags(struct midcomms_node *node); +int dlm_midcomms_send_queue_cnt(struct midcomms_node *node); +uint32_t dlm_midcomms_version(struct midcomms_node *node); +int dlm_midcomms_rawmsg_send(struct midcomms_node *node, void *buf, + int buflen); +struct kmem_cache *dlm_midcomms_cache_create(void); #endif /* __MIDCOMMS_DOT_H__ */ diff --git a/fs/dlm/netlink.c b/fs/dlm/netlink.c deleted file mode 100644 index 60a327863b11..000000000000 --- a/fs/dlm/netlink.c +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright (C) 2007 Red Hat, Inc. All rights reserved. - * - * This copyrighted material is made available to anyone wishing to use, - * modify, copy, or redistribute it subject to the terms and conditions - * of the GNU General Public License v.2. - */ - -#include <net/genetlink.h> -#include <linux/dlm.h> -#include <linux/dlm_netlink.h> -#include <linux/gfp.h> - -#include "dlm_internal.h" - -static uint32_t dlm_nl_seqnum; -static uint32_t listener_nlportid; - -static struct genl_family family = { - .id = GENL_ID_GENERATE, - .name = DLM_GENL_NAME, - .version = DLM_GENL_VERSION, -}; - -static int prepare_data(u8 cmd, struct sk_buff **skbp, size_t size) -{ - struct sk_buff *skb; - void *data; - - skb = genlmsg_new(size, GFP_NOFS); - if (!skb) - return -ENOMEM; - - /* add the message headers */ - data = genlmsg_put(skb, 0, dlm_nl_seqnum++, &family, 0, cmd); - if (!data) { - nlmsg_free(skb); - return -EINVAL; - } - - *skbp = skb; - return 0; -} - -static struct dlm_lock_data *mk_data(struct sk_buff *skb) -{ - struct nlattr *ret; - - ret = nla_reserve(skb, DLM_TYPE_LOCK, sizeof(struct dlm_lock_data)); - if (!ret) - return NULL; - return nla_data(ret); -} - -static int send_data(struct sk_buff *skb) -{ - struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data); - void *data = genlmsg_data(genlhdr); - int rv; - - rv = genlmsg_end(skb, data); - if (rv < 0) { - nlmsg_free(skb); - return rv; - } - - return genlmsg_unicast(&init_net, skb, listener_nlportid); -} - -static int user_cmd(struct sk_buff *skb, struct genl_info *info) -{ - listener_nlportid = info->snd_portid; - printk("user_cmd nlpid %u\n", listener_nlportid); - return 0; -} - -static struct genl_ops dlm_nl_ops = { - .cmd = DLM_CMD_HELLO, - .doit = user_cmd, -}; - -int __init dlm_netlink_init(void) -{ - return genl_register_family_with_ops(&family, &dlm_nl_ops, 1); -} - -void dlm_netlink_exit(void) -{ - genl_unregister_family(&family); -} - -static void fill_data(struct dlm_lock_data *data, struct dlm_lkb *lkb) -{ - struct dlm_rsb *r = lkb->lkb_resource; - - memset(data, 0, sizeof(struct dlm_lock_data)); - - data->version = DLM_LOCK_DATA_VERSION; - data->nodeid = lkb->lkb_nodeid; - data->ownpid = lkb->lkb_ownpid; - data->id = lkb->lkb_id; - data->remid = lkb->lkb_remid; - data->status = lkb->lkb_status; - data->grmode = lkb->lkb_grmode; - data->rqmode = lkb->lkb_rqmode; - if (lkb->lkb_ua) - data->xid = lkb->lkb_ua->xid; - if (r) { - data->lockspace_id = r->res_ls->ls_global_id; - data->resource_namelen = r->res_length; - memcpy(data->resource_name, r->res_name, r->res_length); - } -} - -void dlm_timeout_warn(struct dlm_lkb *lkb) -{ - struct sk_buff *uninitialized_var(send_skb); - struct dlm_lock_data *data; - size_t size; - int rv; - - size = nla_total_size(sizeof(struct dlm_lock_data)) + - nla_total_size(0); /* why this? */ - - rv = prepare_data(DLM_CMD_TIMEOUT, &send_skb, size); - if (rv < 0) - return; - - data = mk_data(send_skb); - if (!data) { - nlmsg_free(send_skb); - return; - } - - fill_data(data, lkb); - - send_data(send_skb); -} - diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c index f704458ea5f5..9ca83ef70ed1 100644 --- a/fs/dlm/plock.c +++ b/fs/dlm/plock.c @@ -1,41 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. - * - * This copyrighted material is made available to anyone wishing to use, - * modify, copy, or redistribute it subject to the terms and conditions - * of the GNU General Public License version 2. */ #include <linux/fs.h> +#include <linux/filelock.h> #include <linux/miscdevice.h> #include <linux/poll.h> #include <linux/dlm.h> #include <linux/dlm_plock.h> #include <linux/slab.h> +#include <trace/events/dlm.h> + #include "dlm_internal.h" #include "lockspace.h" -static spinlock_t ops_lock; -static struct list_head send_list; -static struct list_head recv_list; -static wait_queue_head_t send_wq; -static wait_queue_head_t recv_wq; - -struct plock_op { - struct list_head list; - int done; - struct dlm_plock_info info; -}; +static DEFINE_SPINLOCK(ops_lock); +static LIST_HEAD(send_list); +static LIST_HEAD(recv_list); +static DECLARE_WAIT_QUEUE_HEAD(send_wq); +static DECLARE_WAIT_QUEUE_HEAD(recv_wq); -struct plock_xop { - struct plock_op xop; - void *callback; +struct plock_async_data { void *fl; void *file; struct file_lock flc; + int (*callback)(struct file_lock *fl, int result); }; +struct plock_op { + struct list_head list; + int done; + struct dlm_plock_info info; + /* if set indicates async handling */ + struct plock_async_data *data; +}; static inline void set_version(struct dlm_plock_info *info) { @@ -44,6 +44,27 @@ static inline void set_version(struct dlm_plock_info *info) info->version[2] = DLM_PLOCK_VERSION_PATCH; } +static struct plock_op *plock_lookup_waiter(const struct dlm_plock_info *info) +{ + struct plock_op *op = NULL, *iter; + + list_for_each_entry(iter, &recv_list, list) { + if (iter->info.fsid == info->fsid && + iter->info.number == info->number && + iter->info.owner == info->owner && + iter->info.pid == info->pid && + iter->info.start == info->start && + iter->info.end == info->end && + iter->info.ex == info->ex && + iter->info.wait) { + op = iter; + break; + } + } + + return op; +} + static int check_version(struct dlm_plock_info *info) { if ((DLM_PLOCK_VERSION_MAJOR != info->version[0]) || @@ -61,123 +82,152 @@ static int check_version(struct dlm_plock_info *info) return 0; } +static void dlm_release_plock_op(struct plock_op *op) +{ + kfree(op->data); + kfree(op); +} + static void send_op(struct plock_op *op) { set_version(&op->info); - INIT_LIST_HEAD(&op->list); spin_lock(&ops_lock); list_add_tail(&op->list, &send_list); spin_unlock(&ops_lock); wake_up(&send_wq); } -/* If a process was killed while waiting for the only plock on a file, - locks_remove_posix will not see any lock on the file so it won't - send an unlock-close to us to pass on to userspace to clean up the - abandoned waiter. So, we have to insert the unlock-close when the - lock call is interrupted. */ - -static void do_unlock_close(struct dlm_ls *ls, u64 number, - struct file *file, struct file_lock *fl) +static int do_lock_cancel(const struct dlm_plock_info *orig_info) { struct plock_op *op; + int rv; op = kzalloc(sizeof(*op), GFP_NOFS); if (!op) - return; + return -ENOMEM; + + op->info = *orig_info; + op->info.optype = DLM_PLOCK_OP_CANCEL; + op->info.wait = 0; - op->info.optype = DLM_PLOCK_OP_UNLOCK; - op->info.pid = fl->fl_pid; - op->info.fsid = ls->ls_global_id; - op->info.number = number; - op->info.start = 0; - op->info.end = OFFSET_MAX; - if (fl->fl_lmops && fl->fl_lmops->lm_grant) - op->info.owner = (__u64) fl->fl_pid; - else - op->info.owner = (__u64)(long) fl->fl_owner; - - op->info.flags |= DLM_PLOCK_FL_CLOSE; send_op(op); + wait_event(recv_wq, (op->done != 0)); + + rv = op->info.rv; + + dlm_release_plock_op(op); + return rv; } int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file, int cmd, struct file_lock *fl) { + struct plock_async_data *op_data; struct dlm_ls *ls; struct plock_op *op; - struct plock_xop *xop; int rv; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; - xop = kzalloc(sizeof(*xop), GFP_NOFS); - if (!xop) { + op = kzalloc(sizeof(*op), GFP_NOFS); + if (!op) { rv = -ENOMEM; goto out; } - op = &xop->xop; op->info.optype = DLM_PLOCK_OP_LOCK; - op->info.pid = fl->fl_pid; - op->info.ex = (fl->fl_type == F_WRLCK); - op->info.wait = IS_SETLKW(cmd); + op->info.pid = fl->c.flc_pid; + op->info.ex = lock_is_write(fl); + op->info.wait = !!(fl->c.flc_flags & FL_SLEEP); op->info.fsid = ls->ls_global_id; op->info.number = number; op->info.start = fl->fl_start; op->info.end = fl->fl_end; + op->info.owner = (__u64)(long) fl->c.flc_owner; + /* async handling */ if (fl->fl_lmops && fl->fl_lmops->lm_grant) { - /* fl_owner is lockd which doesn't distinguish - processes on the nfs client */ - op->info.owner = (__u64) fl->fl_pid; - xop->callback = fl->fl_lmops->lm_grant; - locks_init_lock(&xop->flc); - locks_copy_lock(&xop->flc, fl); - xop->fl = fl; - xop->file = file; - } else { - op->info.owner = (__u64)(long) fl->fl_owner; - xop->callback = NULL; + op_data = kzalloc(sizeof(*op_data), GFP_NOFS); + if (!op_data) { + dlm_release_plock_op(op); + rv = -ENOMEM; + goto out; + } + + op_data->callback = fl->fl_lmops->lm_grant; + locks_init_lock(&op_data->flc); + locks_copy_lock(&op_data->flc, fl); + op_data->fl = fl; + op_data->file = file; + + op->data = op_data; + + send_op(op); + rv = FILE_LOCK_DEFERRED; + goto out; } send_op(op); - if (xop->callback == NULL) { - rv = wait_event_killable(recv_wq, (op->done != 0)); + if (op->info.wait) { + rv = wait_event_interruptible(recv_wq, (op->done != 0)); if (rv == -ERESTARTSYS) { - log_debug(ls, "dlm_posix_lock: wait killed %llx", - (unsigned long long)number); spin_lock(&ops_lock); - list_del(&op->list); + /* recheck under ops_lock if we got a done != 0, + * if so this interrupt case should be ignored + */ + if (op->done != 0) { + spin_unlock(&ops_lock); + goto do_lock_wait; + } spin_unlock(&ops_lock); - kfree(xop); - do_unlock_close(ls, number, file, fl); + + rv = do_lock_cancel(&op->info); + switch (rv) { + case 0: + /* waiter was deleted in user space, answer will never come + * remove original request. The original request must be + * on recv_list because the answer of do_lock_cancel() + * synchronized it. + */ + spin_lock(&ops_lock); + list_del(&op->list); + spin_unlock(&ops_lock); + rv = -EINTR; + break; + case -ENOENT: + /* cancellation wasn't successful but op should be done */ + fallthrough; + default: + /* internal error doing cancel we need to wait */ + goto wait; + } + + log_debug(ls, "%s: wait interrupted %x %llx pid %d", + __func__, ls->ls_global_id, + (unsigned long long)number, op->info.pid); + dlm_release_plock_op(op); goto out; } } else { - rv = FILE_LOCK_DEFERRED; - goto out; +wait: + wait_event(recv_wq, (op->done != 0)); } - spin_lock(&ops_lock); - if (!list_empty(&op->list)) { - log_error(ls, "dlm_posix_lock: op on list %llx", - (unsigned long long)number); - list_del(&op->list); - } - spin_unlock(&ops_lock); +do_lock_wait: + + WARN_ON(!list_empty(&op->list)); rv = op->info.rv; if (!rv) { - if (posix_lock_file_wait(file, fl) < 0) + if (locks_lock_file_wait(file, fl) < 0) log_error(ls, "dlm_posix_lock: vfs lock error %llx", (unsigned long long)number); } - kfree(xop); + dlm_release_plock_op(op); out: dlm_put_lockspace(ls); return rv; @@ -187,34 +237,28 @@ EXPORT_SYMBOL_GPL(dlm_posix_lock); /* Returns failure iff a successful lock operation should be canceled */ static int dlm_plock_callback(struct plock_op *op) { + struct plock_async_data *op_data = op->data; struct file *file; struct file_lock *fl; struct file_lock *flc; - int (*notify)(void *, void *, int) = NULL; - struct plock_xop *xop = (struct plock_xop *)op; + int (*notify)(struct file_lock *fl, int result) = NULL; int rv = 0; - spin_lock(&ops_lock); - if (!list_empty(&op->list)) { - log_print("dlm_plock_callback: op on list %llx", - (unsigned long long)op->info.number); - list_del(&op->list); - } - spin_unlock(&ops_lock); + WARN_ON(!list_empty(&op->list)); /* check if the following 2 are still valid or make a copy */ - file = xop->file; - flc = &xop->flc; - fl = xop->fl; - notify = xop->callback; + file = op_data->file; + flc = &op_data->flc; + fl = op_data->fl; + notify = op_data->callback; if (op->info.rv) { - notify(fl, NULL, op->info.rv); + notify(fl, op->info.rv); goto out; } /* got fs lock; bookkeep locally as well: */ - flc->fl_flags &= ~FL_SLEEP; + flc->c.flc_flags &= ~FL_SLEEP; if (posix_lock_file(file, flc, NULL)) { /* * This can only happen in the case of kmalloc() failure. @@ -228,16 +272,16 @@ static int dlm_plock_callback(struct plock_op *op) (unsigned long long)op->info.number, file, fl); } - rv = notify(fl, NULL, 0); + rv = notify(fl, 0); if (rv) { /* XXX: We need to cancel the fs lock here: */ - log_print("dlm_plock_callback: lock granted after lock request " - "failed; dangling lock!\n"); + log_print("%s: lock granted after lock request failed; dangling lock!", + __func__); goto out; } out: - kfree(xop); + dlm_release_plock_op(op); return rv; } @@ -247,7 +291,7 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file, struct dlm_ls *ls; struct plock_op *op; int rv; - unsigned char fl_flags = fl->fl_flags; + unsigned char saved_flags = fl->c.flc_flags; ls = dlm_find_lockspace_local(lockspace); if (!ls) @@ -260,9 +304,9 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file, } /* cause the vfs unlock to return ENOENT if lock is not found */ - fl->fl_flags |= FL_EXISTS; + fl->c.flc_flags |= FL_EXISTS; - rv = posix_lock_file_wait(file, fl); + rv = locks_lock_file_wait(file, fl); if (rv == -ENOENT) { rv = 0; goto out_free; @@ -273,17 +317,14 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file, } op->info.optype = DLM_PLOCK_OP_UNLOCK; - op->info.pid = fl->fl_pid; + op->info.pid = fl->c.flc_pid; op->info.fsid = ls->ls_global_id; op->info.number = number; op->info.start = fl->fl_start; op->info.end = fl->fl_end; - if (fl->fl_lmops && fl->fl_lmops->lm_grant) - op->info.owner = (__u64) fl->fl_pid; - else - op->info.owner = (__u64)(long) fl->fl_owner; + op->info.owner = (__u64)(long) fl->c.flc_owner; - if (fl->fl_flags & FL_CLOSE) { + if (fl->c.flc_flags & FL_CLOSE) { op->info.flags |= DLM_PLOCK_FL_CLOSE; send_op(op); rv = 0; @@ -293,13 +334,7 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file, send_op(op); wait_event(recv_wq, (op->done != 0)); - spin_lock(&ops_lock); - if (!list_empty(&op->list)) { - log_error(ls, "dlm_posix_unlock: op on list %llx", - (unsigned long long)number); - list_del(&op->list); - } - spin_unlock(&ops_lock); + WARN_ON(!list_empty(&op->list)); rv = op->info.rv; @@ -307,14 +342,83 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file, rv = 0; out_free: - kfree(op); + dlm_release_plock_op(op); out: dlm_put_lockspace(ls); - fl->fl_flags = fl_flags; + fl->c.flc_flags = saved_flags; return rv; } EXPORT_SYMBOL_GPL(dlm_posix_unlock); +/* + * NOTE: This implementation can only handle async lock requests as nfs + * do it. It cannot handle cancellation of a pending lock request sitting + * in wait_event(), but for now only nfs is the only user local kernel + * user. + */ +int dlm_posix_cancel(dlm_lockspace_t *lockspace, u64 number, struct file *file, + struct file_lock *fl) +{ + struct dlm_plock_info info; + struct plock_op *op; + struct dlm_ls *ls; + int rv; + + /* this only works for async request for now and nfs is the only + * kernel user right now. + */ + if (WARN_ON_ONCE(!fl->fl_lmops || !fl->fl_lmops->lm_grant)) + return -EOPNOTSUPP; + + ls = dlm_find_lockspace_local(lockspace); + if (!ls) + return -EINVAL; + + memset(&info, 0, sizeof(info)); + info.pid = fl->c.flc_pid; + info.ex = lock_is_write(fl); + info.fsid = ls->ls_global_id; + dlm_put_lockspace(ls); + info.number = number; + info.start = fl->fl_start; + info.end = fl->fl_end; + info.owner = (__u64)(long) fl->c.flc_owner; + + rv = do_lock_cancel(&info); + switch (rv) { + case 0: + spin_lock(&ops_lock); + /* lock request to cancel must be on recv_list because + * do_lock_cancel() synchronizes it. + */ + op = plock_lookup_waiter(&info); + if (WARN_ON_ONCE(!op)) { + spin_unlock(&ops_lock); + rv = -ENOLCK; + break; + } + + list_del(&op->list); + spin_unlock(&ops_lock); + WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK); + op->data->callback(op->data->fl, -EINTR); + dlm_release_plock_op(op); + rv = -EINTR; + break; + case -ENOENT: + /* if cancel wasn't successful we probably were to late + * or it was a non-blocking lock request, so just unlock it. + */ + rv = dlm_posix_unlock(lockspace, number, file, fl); + break; + default: + break; + } + + return rv; +} +EXPORT_SYMBOL_GPL(dlm_posix_cancel); + int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file, struct file_lock *fl) { @@ -333,47 +437,40 @@ int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file, } op->info.optype = DLM_PLOCK_OP_GET; - op->info.pid = fl->fl_pid; - op->info.ex = (fl->fl_type == F_WRLCK); + op->info.pid = fl->c.flc_pid; + op->info.ex = lock_is_write(fl); op->info.fsid = ls->ls_global_id; op->info.number = number; op->info.start = fl->fl_start; op->info.end = fl->fl_end; - if (fl->fl_lmops && fl->fl_lmops->lm_grant) - op->info.owner = (__u64) fl->fl_pid; - else - op->info.owner = (__u64)(long) fl->fl_owner; + op->info.owner = (__u64)(long) fl->c.flc_owner; send_op(op); wait_event(recv_wq, (op->done != 0)); - spin_lock(&ops_lock); - if (!list_empty(&op->list)) { - log_error(ls, "dlm_posix_get: op on list %llx", - (unsigned long long)number); - list_del(&op->list); - } - spin_unlock(&ops_lock); + WARN_ON(!list_empty(&op->list)); /* info.rv from userspace is 1 for conflict, 0 for no-conflict, -ENOENT if there are no locks on the file */ rv = op->info.rv; - fl->fl_type = F_UNLCK; + fl->c.flc_type = F_UNLCK; if (rv == -ENOENT) rv = 0; else if (rv > 0) { locks_init_lock(fl); - fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK; - fl->fl_flags = FL_POSIX; - fl->fl_pid = op->info.pid; + fl->c.flc_type = (op->info.ex) ? F_WRLCK : F_RDLCK; + fl->c.flc_flags = FL_POSIX; + fl->c.flc_pid = op->info.pid; + if (op->info.nodeid != dlm_our_nodeid()) + fl->c.flc_pid = -fl->c.flc_pid; fl->fl_start = op->info.start; fl->fl_end = op->info.end; rv = 0; } - kfree(op); + dlm_release_plock_op(op); out: dlm_put_lockspace(ls); return rv; @@ -392,11 +489,11 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count, spin_lock(&ops_lock); if (!list_empty(&send_list)) { - op = list_entry(send_list.next, struct plock_op, list); + op = list_first_entry(&send_list, struct plock_op, list); if (op->info.flags & DLM_PLOCK_FL_CLOSE) list_del(&op->list); else - list_move(&op->list, &recv_list); + list_move_tail(&op->list, &recv_list); memcpy(&info, &op->info, sizeof(info)); } spin_unlock(&ops_lock); @@ -404,12 +501,14 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count, if (!op) return -EAGAIN; + trace_dlm_plock_read(&info); + /* there is no need to get a reply from userspace for unlocks that were generated by the vfs cleaning up for a close (the process did not make an unlock call). */ if (op->info.flags & DLM_PLOCK_FL_CLOSE) - kfree(op); + dlm_release_plock_op(op); if (copy_to_user(u, &info, sizeof(info))) return -EFAULT; @@ -421,9 +520,9 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count, static ssize_t dev_write(struct file *file, const char __user *u, size_t count, loff_t *ppos) { + struct plock_op *op = NULL, *iter; struct dlm_plock_info info; - struct plock_op *op; - int found = 0, do_callback = 0; + int do_callback = 0; if (count != sizeof(info)) return -EINVAL; @@ -431,47 +530,68 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count, if (copy_from_user(&info, u, sizeof(info))) return -EFAULT; + trace_dlm_plock_write(&info); + if (check_version(&info)) return -EINVAL; + /* + * The results for waiting ops (SETLKW) can be returned in any + * order, so match all fields to find the op. The results for + * non-waiting ops are returned in the order that they were sent + * to userspace, so match the result with the first non-waiting op. + */ spin_lock(&ops_lock); - list_for_each_entry(op, &recv_list, list) { - if (op->info.fsid == info.fsid && - op->info.number == info.number && - op->info.owner == info.owner) { - struct plock_xop *xop = (struct plock_xop *)op; - list_del_init(&op->list); - memcpy(&op->info, &info, sizeof(info)); - if (xop->callback) - do_callback = 1; - else - op->done = 1; - found = 1; - break; + if (info.wait) { + op = plock_lookup_waiter(&info); + } else { + list_for_each_entry(iter, &recv_list, list) { + if (!iter->info.wait && + iter->info.fsid == info.fsid) { + op = iter; + break; + } } } + + if (op) { + /* Sanity check that op and info match. */ + if (info.wait) + WARN_ON(op->info.optype != DLM_PLOCK_OP_LOCK); + else + WARN_ON(op->info.number != info.number || + op->info.owner != info.owner || + op->info.optype != info.optype); + + list_del_init(&op->list); + memcpy(&op->info, &info, sizeof(info)); + if (op->data) + do_callback = 1; + else + op->done = 1; + } spin_unlock(&ops_lock); - if (found) { + if (op) { if (do_callback) dlm_plock_callback(op); else wake_up(&recv_wq); } else - log_print("dev_write no op %x %llx", info.fsid, - (unsigned long long)info.number); + pr_debug("%s: no op %x %llx", __func__, + info.fsid, (unsigned long long)info.number); return count; } -static unsigned int dev_poll(struct file *file, poll_table *wait) +static __poll_t dev_poll(struct file *file, poll_table *wait) { - unsigned int mask = 0; + __poll_t mask = 0; poll_wait(file, &send_wq, wait); spin_lock(&ops_lock); if (!list_empty(&send_list)) - mask = POLLIN | POLLRDNORM; + mask = EPOLLIN | EPOLLRDNORM; spin_unlock(&ops_lock); return mask; @@ -495,12 +615,6 @@ int dlm_plock_init(void) { int rv; - spin_lock_init(&ops_lock); - INIT_LIST_HEAD(&send_list); - INIT_LIST_HEAD(&recv_list); - init_waitqueue_head(&send_wq); - init_waitqueue_head(&recv_wq); - rv = misc_register(&plock_dev_misc); if (rv) log_print("dlm_plock_init: misc_register failed %d", rv); @@ -509,7 +623,8 @@ int dlm_plock_init(void) void dlm_plock_exit(void) { - if (misc_deregister(&plock_dev_misc) < 0) - log_print("dlm_plock_exit: misc_deregister failed"); + misc_deregister(&plock_dev_misc); + WARN_ON(!list_empty(&send_list)); + WARN_ON(!list_empty(&recv_list)); } diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c index 9d61947d473a..be1a71a6303a 100644 --- a/fs/dlm/rcom.c +++ b/fs/dlm/rcom.c @@ -1,12 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -29,46 +27,75 @@ static int rcom_response(struct dlm_ls *ls) return test_bit(LSFL_RCOM_READY, &ls->ls_flags); } -static int create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len, - struct dlm_rcom **rc_ret, struct dlm_mhandle **mh_ret) +static void _create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len, + struct dlm_rcom **rc_ret, char *mb, int mb_len, + uint64_t seq) { struct dlm_rcom *rc; + + rc = (struct dlm_rcom *) mb; + + rc->rc_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR); + rc->rc_header.u.h_lockspace = cpu_to_le32(ls->ls_global_id); + rc->rc_header.h_nodeid = cpu_to_le32(dlm_our_nodeid()); + rc->rc_header.h_length = cpu_to_le16(mb_len); + rc->rc_header.h_cmd = DLM_RCOM; + + rc->rc_type = cpu_to_le32(type); + rc->rc_seq = cpu_to_le64(seq); + + *rc_ret = rc; +} + +static int create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len, + struct dlm_rcom **rc_ret, struct dlm_mhandle **mh_ret, + uint64_t seq) +{ + int mb_len = sizeof(struct dlm_rcom) + len; struct dlm_mhandle *mh; char *mb; - int mb_len = sizeof(struct dlm_rcom) + len; - mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb); + mh = dlm_midcomms_get_mhandle(to_nodeid, mb_len, &mb); if (!mh) { - log_print("create_rcom to %d type %d len %d ENOBUFS", - to_nodeid, type, len); + log_print("%s to %d type %d len %d ENOBUFS", + __func__, to_nodeid, type, len); return -ENOBUFS; } - memset(mb, 0, mb_len); - rc = (struct dlm_rcom *) mb; - - rc->rc_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR); - rc->rc_header.h_lockspace = ls->ls_global_id; - rc->rc_header.h_nodeid = dlm_our_nodeid(); - rc->rc_header.h_length = mb_len; - rc->rc_header.h_cmd = DLM_RCOM; + _create_rcom(ls, to_nodeid, type, len, rc_ret, mb, mb_len, seq); + *mh_ret = mh; + return 0; +} - rc->rc_type = type; +static int create_rcom_stateless(struct dlm_ls *ls, int to_nodeid, int type, + int len, struct dlm_rcom **rc_ret, + struct dlm_msg **msg_ret, uint64_t seq) +{ + int mb_len = sizeof(struct dlm_rcom) + len; + struct dlm_msg *msg; + char *mb; - spin_lock(&ls->ls_recover_lock); - rc->rc_seq = ls->ls_recover_seq; - spin_unlock(&ls->ls_recover_lock); + msg = dlm_lowcomms_new_msg(to_nodeid, mb_len, &mb, NULL, NULL); + if (!msg) { + log_print("create_rcom to %d type %d len %d ENOBUFS", + to_nodeid, type, len); + return -ENOBUFS; + } - *mh_ret = mh; - *rc_ret = rc; + _create_rcom(ls, to_nodeid, type, len, rc_ret, mb, mb_len, seq); + *msg_ret = msg; return 0; } -static void send_rcom(struct dlm_ls *ls, struct dlm_mhandle *mh, - struct dlm_rcom *rc) +static void send_rcom(struct dlm_mhandle *mh, struct dlm_rcom *rc) +{ + dlm_midcomms_commit_mhandle(mh, NULL, 0); +} + +static void send_rcom_stateless(struct dlm_msg *msg, struct dlm_rcom *rc) { - dlm_rcom_out(rc); - dlm_lowcomms_commit_buffer(mh); + dlm_lowcomms_commit_msg(msg); + dlm_lowcomms_put_msg(msg); } static void set_rcom_status(struct dlm_ls *ls, struct rcom_status *rs, @@ -96,10 +123,10 @@ static int check_rcom_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) { struct rcom_config *rf = (struct rcom_config *) rc->rc_buf; - if ((rc->rc_header.h_version & 0xFFFF0000) != DLM_HEADER_MAJOR) { + if ((le32_to_cpu(rc->rc_header.h_version) & 0xFFFF0000) != DLM_HEADER_MAJOR) { log_error(ls, "version mismatch: %x nodeid %d: %x", DLM_HEADER_MAJOR | DLM_HEADER_MINOR, nodeid, - rc->rc_header.h_version); + le32_to_cpu(rc->rc_header.h_version)); return -EPROTO; } @@ -114,20 +141,20 @@ static int check_rcom_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) return 0; } -static void allow_sync_reply(struct dlm_ls *ls, uint64_t *new_seq) +static void allow_sync_reply(struct dlm_ls *ls, __le64 *new_seq) { - spin_lock(&ls->ls_rcom_spin); - *new_seq = ++ls->ls_rcom_seq; + spin_lock_bh(&ls->ls_rcom_spin); + *new_seq = cpu_to_le64(++ls->ls_rcom_seq); set_bit(LSFL_RCOM_WAIT, &ls->ls_flags); - spin_unlock(&ls->ls_rcom_spin); + spin_unlock_bh(&ls->ls_rcom_spin); } static void disallow_sync_reply(struct dlm_ls *ls) { - spin_lock(&ls->ls_rcom_spin); + spin_lock_bh(&ls->ls_rcom_spin); clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags); clear_bit(LSFL_RCOM_READY, &ls->ls_flags); - spin_unlock(&ls->ls_rcom_spin); + spin_unlock_bh(&ls->ls_rcom_spin); } /* @@ -141,40 +168,45 @@ static void disallow_sync_reply(struct dlm_ls *ls) * node's rcom_config. */ -int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags) +int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags, + uint64_t seq) { struct dlm_rcom *rc; - struct dlm_mhandle *mh; + struct dlm_msg *msg; int error = 0; ls->ls_recover_nodeid = nodeid; if (nodeid == dlm_our_nodeid()) { rc = ls->ls_recover_buf; - rc->rc_result = dlm_recover_status(ls); + rc->rc_result = cpu_to_le32(dlm_recover_status(ls)); goto out; } - error = create_rcom(ls, nodeid, DLM_RCOM_STATUS, - sizeof(struct rcom_status), &rc, &mh); +retry: + error = create_rcom_stateless(ls, nodeid, DLM_RCOM_STATUS, + sizeof(struct rcom_status), &rc, &msg, + seq); if (error) goto out; set_rcom_status(ls, (struct rcom_status *)rc->rc_buf, status_flags); allow_sync_reply(ls, &rc->rc_id); - memset(ls->ls_recover_buf, 0, dlm_config.ci_buffer_size); + memset(ls->ls_recover_buf, 0, DLM_MAX_SOCKET_BUFSIZE); - send_rcom(ls, mh, rc); + send_rcom_stateless(msg, rc); error = dlm_wait_function(ls, &rcom_response); disallow_sync_reply(ls); + if (error == -ETIMEDOUT) + goto retry; if (error) goto out; rc = ls->ls_recover_buf; - if (rc->rc_result == -ESRCH) { + if (rc->rc_result == cpu_to_le32(-ESRCH)) { /* we pretend the remote lockspace exists with 0 status */ log_debug(ls, "remote node %d not ready", nodeid); rc->rc_result = 0; @@ -188,14 +220,16 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags) return error; } -static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in) +static void receive_rcom_status(struct dlm_ls *ls, + const struct dlm_rcom *rc_in, + uint64_t seq) { struct dlm_rcom *rc; - struct dlm_mhandle *mh; struct rcom_status *rs; uint32_t status; - int nodeid = rc_in->rc_header.h_nodeid; + int nodeid = le32_to_cpu(rc_in->rc_header.h_nodeid); int len = sizeof(struct rcom_config); + struct dlm_msg *msg; int num_slots = 0; int error; @@ -206,35 +240,35 @@ static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in) rs = (struct rcom_status *)rc_in->rc_buf; - if (!(rs->rs_flags & DLM_RSF_NEED_SLOTS)) { + if (!(le32_to_cpu(rs->rs_flags) & DLM_RSF_NEED_SLOTS)) { status = dlm_recover_status(ls); goto do_create; } - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); status = ls->ls_recover_status; num_slots = ls->ls_num_slots; - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); len += num_slots * sizeof(struct rcom_slot); do_create: - error = create_rcom(ls, nodeid, DLM_RCOM_STATUS_REPLY, - len, &rc, &mh); + error = create_rcom_stateless(ls, nodeid, DLM_RCOM_STATUS_REPLY, + len, &rc, &msg, seq); if (error) return; rc->rc_id = rc_in->rc_id; rc->rc_seq_reply = rc_in->rc_seq; - rc->rc_result = status; + rc->rc_result = cpu_to_le32(status); set_rcom_config(ls, (struct rcom_config *)rc->rc_buf, num_slots); if (!num_slots) goto do_send; - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); if (ls->ls_num_slots != num_slots) { - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); log_debug(ls, "receive_rcom_status num_slots %d to %d", num_slots, ls->ls_num_slots); rc->rc_result = 0; @@ -243,66 +277,76 @@ static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in) } dlm_slots_copy_out(ls, rc); - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); do_send: - send_rcom(ls, mh, rc); + send_rcom_stateless(msg, rc); } -static void receive_sync_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in) +static void receive_sync_reply(struct dlm_ls *ls, const struct dlm_rcom *rc_in) { - spin_lock(&ls->ls_rcom_spin); + spin_lock_bh(&ls->ls_rcom_spin); if (!test_bit(LSFL_RCOM_WAIT, &ls->ls_flags) || - rc_in->rc_id != ls->ls_rcom_seq) { + le64_to_cpu(rc_in->rc_id) != ls->ls_rcom_seq) { log_debug(ls, "reject reply %d from %d seq %llx expect %llx", - rc_in->rc_type, rc_in->rc_header.h_nodeid, - (unsigned long long)rc_in->rc_id, + le32_to_cpu(rc_in->rc_type), + le32_to_cpu(rc_in->rc_header.h_nodeid), + (unsigned long long)le64_to_cpu(rc_in->rc_id), (unsigned long long)ls->ls_rcom_seq); goto out; } - memcpy(ls->ls_recover_buf, rc_in, rc_in->rc_header.h_length); + memcpy(ls->ls_recover_buf, rc_in, + le16_to_cpu(rc_in->rc_header.h_length)); set_bit(LSFL_RCOM_READY, &ls->ls_flags); clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags); wake_up(&ls->ls_wait_general); out: - spin_unlock(&ls->ls_rcom_spin); + spin_unlock_bh(&ls->ls_rcom_spin); } -int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, int last_len) +int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, + int last_len, uint64_t seq) { - struct dlm_rcom *rc; struct dlm_mhandle *mh; + struct dlm_rcom *rc; int error = 0; ls->ls_recover_nodeid = nodeid; - error = create_rcom(ls, nodeid, DLM_RCOM_NAMES, last_len, &rc, &mh); +retry: + error = create_rcom(ls, nodeid, DLM_RCOM_NAMES, last_len, + &rc, &mh, seq); if (error) goto out; memcpy(rc->rc_buf, last_name, last_len); allow_sync_reply(ls, &rc->rc_id); - memset(ls->ls_recover_buf, 0, dlm_config.ci_buffer_size); + memset(ls->ls_recover_buf, 0, DLM_MAX_SOCKET_BUFSIZE); - send_rcom(ls, mh, rc); + send_rcom(mh, rc); error = dlm_wait_function(ls, &rcom_response); disallow_sync_reply(ls); + if (error == -ETIMEDOUT) + goto retry; out: return error; } -static void receive_rcom_names(struct dlm_ls *ls, struct dlm_rcom *rc_in) +static void receive_rcom_names(struct dlm_ls *ls, const struct dlm_rcom *rc_in, + uint64_t seq) { - struct dlm_rcom *rc; struct dlm_mhandle *mh; + struct dlm_rcom *rc; int error, inlen, outlen, nodeid; - nodeid = rc_in->rc_header.h_nodeid; - inlen = rc_in->rc_header.h_length - sizeof(struct dlm_rcom); - outlen = dlm_config.ci_buffer_size - sizeof(struct dlm_rcom); + nodeid = le32_to_cpu(rc_in->rc_header.h_nodeid); + inlen = le16_to_cpu(rc_in->rc_header.h_length) - + sizeof(struct dlm_rcom); + outlen = DLM_MAX_APP_BUFSIZE - sizeof(struct dlm_rcom); - error = create_rcom(ls, nodeid, DLM_RCOM_NAMES_REPLY, outlen, &rc, &mh); + error = create_rcom(ls, nodeid, DLM_RCOM_NAMES_REPLY, outlen, + &rc, &mh, seq); if (error) return; rc->rc_id = rc_in->rc_id; @@ -310,10 +354,10 @@ static void receive_rcom_names(struct dlm_ls *ls, struct dlm_rcom *rc_in) dlm_copy_master_names(ls, rc_in->rc_buf, inlen, rc->rc_buf, outlen, nodeid); - send_rcom(ls, mh, rc); + send_rcom(mh, rc); } -int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid) +int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid, uint64_t seq) { struct dlm_rcom *rc; struct dlm_mhandle *mh; @@ -321,65 +365,51 @@ int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid) int error; error = create_rcom(ls, dir_nodeid, DLM_RCOM_LOOKUP, r->res_length, - &rc, &mh); - if (error) - goto out; - memcpy(rc->rc_buf, r->res_name, r->res_length); - rc->rc_id = (unsigned long) r->res_id; - - send_rcom(ls, mh, rc); - out: - return error; -} - -int dlm_send_rcom_lookup_dump(struct dlm_rsb *r, int to_nodeid) -{ - struct dlm_rcom *rc; - struct dlm_mhandle *mh; - struct dlm_ls *ls = r->res_ls; - int error; - - error = create_rcom(ls, to_nodeid, DLM_RCOM_LOOKUP, r->res_length, - &rc, &mh); + &rc, &mh, seq); if (error) goto out; memcpy(rc->rc_buf, r->res_name, r->res_length); - rc->rc_id = 0xFFFFFFFF; + rc->rc_id = cpu_to_le64(r->res_id); - send_rcom(ls, mh, rc); + send_rcom(mh, rc); out: return error; } -static void receive_rcom_lookup(struct dlm_ls *ls, struct dlm_rcom *rc_in) +static void receive_rcom_lookup(struct dlm_ls *ls, + const struct dlm_rcom *rc_in, uint64_t seq) { struct dlm_rcom *rc; struct dlm_mhandle *mh; - int error, ret_nodeid, nodeid = rc_in->rc_header.h_nodeid; - int len = rc_in->rc_header.h_length - sizeof(struct dlm_rcom); - - error = create_rcom(ls, nodeid, DLM_RCOM_LOOKUP_REPLY, 0, &rc, &mh); - if (error) - return; + int error, ret_nodeid, nodeid = le32_to_cpu(rc_in->rc_header.h_nodeid); + int len = le16_to_cpu(rc_in->rc_header.h_length) - + sizeof(struct dlm_rcom); - if (rc_in->rc_id == 0xFFFFFFFF) { + /* Old code would send this special id to trigger a debug dump. */ + if (rc_in->rc_id == cpu_to_le64(0xFFFFFFFF)) { log_error(ls, "receive_rcom_lookup dump from %d", nodeid); dlm_dump_rsb_name(ls, rc_in->rc_buf, len); return; } + error = create_rcom(ls, nodeid, DLM_RCOM_LOOKUP_REPLY, 0, &rc, &mh, + seq); + if (error) + return; + error = dlm_master_lookup(ls, nodeid, rc_in->rc_buf, len, DLM_LU_RECOVER_MASTER, &ret_nodeid, NULL); if (error) ret_nodeid = error; - rc->rc_result = ret_nodeid; + rc->rc_result = cpu_to_le32(ret_nodeid); rc->rc_id = rc_in->rc_id; rc->rc_seq_reply = rc_in->rc_seq; - send_rcom(ls, mh, rc); + send_rcom(mh, rc); } -static void receive_rcom_lookup_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in) +static void receive_rcom_lookup_reply(struct dlm_ls *ls, + const struct dlm_rcom *rc_in) { dlm_recover_master_reply(ls, rc_in); } @@ -392,7 +422,7 @@ static void pack_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb, rl->rl_ownpid = cpu_to_le32(lkb->lkb_ownpid); rl->rl_lkid = cpu_to_le32(lkb->lkb_id); rl->rl_exflags = cpu_to_le32(lkb->lkb_exflags); - rl->rl_flags = cpu_to_le32(lkb->lkb_flags); + rl->rl_flags = cpu_to_le32(dlm_dflags_val(lkb)); rl->rl_lvbseq = cpu_to_le32(lkb->lkb_lvbseq); rl->rl_rqmode = lkb->lkb_rqmode; rl->rl_grmode = lkb->lkb_grmode; @@ -414,7 +444,7 @@ static void pack_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb, memcpy(rl->rl_lvb, lkb->lkb_lvbptr, r->res_ls->ls_lvblen); } -int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) +int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb, uint64_t seq) { struct dlm_ls *ls = r->res_ls; struct dlm_rcom *rc; @@ -425,47 +455,53 @@ int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb) if (lkb->lkb_lvbptr) len += ls->ls_lvblen; - error = create_rcom(ls, r->res_nodeid, DLM_RCOM_LOCK, len, &rc, &mh); + error = create_rcom(ls, r->res_nodeid, DLM_RCOM_LOCK, len, &rc, &mh, + seq); if (error) goto out; rl = (struct rcom_lock *) rc->rc_buf; pack_rcom_lock(r, lkb, rl); - rc->rc_id = (unsigned long) r; + rc->rc_id = cpu_to_le64((uintptr_t)r); - send_rcom(ls, mh, rc); + send_rcom(mh, rc); out: return error; } /* needs at least dlm_rcom + rcom_lock */ -static void receive_rcom_lock(struct dlm_ls *ls, struct dlm_rcom *rc_in) +static void receive_rcom_lock(struct dlm_ls *ls, const struct dlm_rcom *rc_in, + uint64_t seq) { + __le32 rl_remid, rl_result; + struct rcom_lock *rl; struct dlm_rcom *rc; struct dlm_mhandle *mh; - int error, nodeid = rc_in->rc_header.h_nodeid; + int error, nodeid = le32_to_cpu(rc_in->rc_header.h_nodeid); - dlm_recover_master_copy(ls, rc_in); + dlm_recover_master_copy(ls, rc_in, &rl_remid, &rl_result); error = create_rcom(ls, nodeid, DLM_RCOM_LOCK_REPLY, - sizeof(struct rcom_lock), &rc, &mh); + sizeof(struct rcom_lock), &rc, &mh, seq); if (error) return; - /* We send back the same rcom_lock struct we received, but - dlm_recover_master_copy() has filled in rl_remid and rl_result */ - memcpy(rc->rc_buf, rc_in->rc_buf, sizeof(struct rcom_lock)); + rl = (struct rcom_lock *)rc->rc_buf; + /* set rl_remid and rl_result from dlm_recover_master_copy() */ + rl->rl_remid = rl_remid; + rl->rl_result = rl_result; + rc->rc_id = rc_in->rc_id; rc->rc_seq_reply = rc_in->rc_seq; - send_rcom(ls, mh, rc); + send_rcom(mh, rc); } /* If the lockspace doesn't exist then still send a status message back; it's possible that it just doesn't have its global_id yet. */ -int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in) +int dlm_send_ls_not_ready(int nodeid, const struct dlm_rcom *rc_in) { struct dlm_rcom *rc; struct rcom_config *rf; @@ -473,29 +509,27 @@ int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in) char *mb; int mb_len = sizeof(struct dlm_rcom) + sizeof(struct rcom_config); - mh = dlm_lowcomms_get_buffer(nodeid, mb_len, GFP_NOFS, &mb); + mh = dlm_midcomms_get_mhandle(nodeid, mb_len, &mb); if (!mh) return -ENOBUFS; - memset(mb, 0, mb_len); rc = (struct dlm_rcom *) mb; - rc->rc_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR); - rc->rc_header.h_lockspace = rc_in->rc_header.h_lockspace; - rc->rc_header.h_nodeid = dlm_our_nodeid(); - rc->rc_header.h_length = mb_len; + rc->rc_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR); + rc->rc_header.u.h_lockspace = rc_in->rc_header.u.h_lockspace; + rc->rc_header.h_nodeid = cpu_to_le32(dlm_our_nodeid()); + rc->rc_header.h_length = cpu_to_le16(mb_len); rc->rc_header.h_cmd = DLM_RCOM; - rc->rc_type = DLM_RCOM_STATUS_REPLY; + rc->rc_type = cpu_to_le32(DLM_RCOM_STATUS_REPLY); rc->rc_id = rc_in->rc_id; rc->rc_seq_reply = rc_in->rc_seq; - rc->rc_result = -ESRCH; + rc->rc_result = cpu_to_le32(-ESRCH); rf = (struct rcom_config *) rc->rc_buf; rf->rf_lvblen = cpu_to_le32(~0U); - dlm_rcom_out(rc); - dlm_lowcomms_commit_buffer(mh); + dlm_midcomms_commit_mhandle(mh, NULL, 0); return 0; } @@ -545,7 +579,7 @@ int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in) /* Called by dlm_recv; corresponds to dlm_receive_message() but special recovery-only comms are sent through here. */ -void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) +void dlm_receive_rcom(struct dlm_ls *ls, const struct dlm_rcom *rc, int nodeid) { int lock_size = sizeof(struct dlm_rcom) + sizeof(struct rcom_lock); int stop, reply = 0, names = 0, lookup = 0, lock = 0; @@ -553,42 +587,42 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) uint64_t seq; switch (rc->rc_type) { - case DLM_RCOM_STATUS_REPLY: + case cpu_to_le32(DLM_RCOM_STATUS_REPLY): reply = 1; break; - case DLM_RCOM_NAMES: + case cpu_to_le32(DLM_RCOM_NAMES): names = 1; break; - case DLM_RCOM_NAMES_REPLY: + case cpu_to_le32(DLM_RCOM_NAMES_REPLY): names = 1; reply = 1; break; - case DLM_RCOM_LOOKUP: + case cpu_to_le32(DLM_RCOM_LOOKUP): lookup = 1; break; - case DLM_RCOM_LOOKUP_REPLY: + case cpu_to_le32(DLM_RCOM_LOOKUP_REPLY): lookup = 1; reply = 1; break; - case DLM_RCOM_LOCK: + case cpu_to_le32(DLM_RCOM_LOCK): lock = 1; break; - case DLM_RCOM_LOCK_REPLY: + case cpu_to_le32(DLM_RCOM_LOCK_REPLY): lock = 1; reply = 1; break; - }; + } - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); status = ls->ls_recover_status; - stop = test_bit(LSFL_RECOVER_STOP, &ls->ls_flags); + stop = dlm_recovery_stopped(ls); seq = ls->ls_recover_seq; - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); - if (stop && (rc->rc_type != DLM_RCOM_STATUS)) + if (stop && (rc->rc_type != cpu_to_le32(DLM_RCOM_STATUS))) goto ignore; - if (reply && (rc->rc_seq_reply != seq)) + if (reply && (le64_to_cpu(rc->rc_seq_reply) != seq)) goto ignore; if (!(status & DLM_RS_NODES) && (names || lookup || lock)) @@ -598,59 +632,60 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid) goto ignore; switch (rc->rc_type) { - case DLM_RCOM_STATUS: - receive_rcom_status(ls, rc); + case cpu_to_le32(DLM_RCOM_STATUS): + receive_rcom_status(ls, rc, seq); break; - case DLM_RCOM_NAMES: - receive_rcom_names(ls, rc); + case cpu_to_le32(DLM_RCOM_NAMES): + receive_rcom_names(ls, rc, seq); break; - case DLM_RCOM_LOOKUP: - receive_rcom_lookup(ls, rc); + case cpu_to_le32(DLM_RCOM_LOOKUP): + receive_rcom_lookup(ls, rc, seq); break; - case DLM_RCOM_LOCK: - if (rc->rc_header.h_length < lock_size) + case cpu_to_le32(DLM_RCOM_LOCK): + if (le16_to_cpu(rc->rc_header.h_length) < lock_size) goto Eshort; - receive_rcom_lock(ls, rc); + receive_rcom_lock(ls, rc, seq); break; - case DLM_RCOM_STATUS_REPLY: + case cpu_to_le32(DLM_RCOM_STATUS_REPLY): receive_sync_reply(ls, rc); break; - case DLM_RCOM_NAMES_REPLY: + case cpu_to_le32(DLM_RCOM_NAMES_REPLY): receive_sync_reply(ls, rc); break; - case DLM_RCOM_LOOKUP_REPLY: + case cpu_to_le32(DLM_RCOM_LOOKUP_REPLY): receive_rcom_lookup_reply(ls, rc); break; - case DLM_RCOM_LOCK_REPLY: - if (rc->rc_header.h_length < lock_size) + case cpu_to_le32(DLM_RCOM_LOCK_REPLY): + if (le16_to_cpu(rc->rc_header.h_length) < lock_size) goto Eshort; - dlm_recover_process_copy(ls, rc); + dlm_recover_process_copy(ls, rc, seq); break; default: - log_error(ls, "receive_rcom bad type %d", rc->rc_type); + log_error(ls, "receive_rcom bad type %d", + le32_to_cpu(rc->rc_type)); } return; ignore: log_limit(ls, "dlm_receive_rcom ignore msg %d " "from %d %llu %llu recover seq %llu sts %x gen %u", - rc->rc_type, + le32_to_cpu(rc->rc_type), nodeid, - (unsigned long long)rc->rc_seq, - (unsigned long long)rc->rc_seq_reply, + (unsigned long long)le64_to_cpu(rc->rc_seq), + (unsigned long long)le64_to_cpu(rc->rc_seq_reply), (unsigned long long)seq, status, ls->ls_generation); return; Eshort: log_error(ls, "recovery message %d from %d is too short", - rc->rc_type, nodeid); + le32_to_cpu(rc->rc_type), nodeid); } diff --git a/fs/dlm/rcom.h b/fs/dlm/rcom.h index f8e243463c15..765926ae0020 100644 --- a/fs/dlm/rcom.h +++ b/fs/dlm/rcom.h @@ -1,12 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -14,13 +12,15 @@ #ifndef __RCOM_DOT_H__ #define __RCOM_DOT_H__ -int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags); -int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name,int last_len); -int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid); -int dlm_send_rcom_lookup_dump(struct dlm_rsb *r, int to_nodeid); -int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb); -void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid); -int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in); +int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags, + uint64_t seq); +int dlm_rcom_names(struct dlm_ls *ls, int nodeid, char *last_name, + int last_len, uint64_t seq); +int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid, uint64_t seq); +int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb, uint64_t seq); +void dlm_receive_rcom(struct dlm_ls *ls, const struct dlm_rcom *rc, + int nodeid); +int dlm_send_ls_not_ready(int nodeid, const struct dlm_rcom *rc_in); #endif diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c index a6bc63f6e31b..3ac020fb8139 100644 --- a/fs/dlm/recover.c +++ b/fs/dlm/recover.c @@ -1,12 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -52,6 +50,10 @@ int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls)) dlm_config.ci_recover_timer * HZ); if (rv) break; + if (test_bit(LSFL_RCOM_WAIT, &ls->ls_flags)) { + log_debug(ls, "dlm_wait_function timed out"); + return -ETIMEDOUT; + } } if (dlm_recovery_stopped(ls)) { @@ -72,9 +74,9 @@ int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls)) uint32_t dlm_recover_status(struct dlm_ls *ls) { uint32_t status; - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); status = ls->ls_recover_status; - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); return status; } @@ -85,13 +87,13 @@ static void _set_recover_status(struct dlm_ls *ls, uint32_t status) void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status) { - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); _set_recover_status(ls, status); - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); } static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status, - int save_slots) + int save_slots, uint64_t seq) { struct dlm_rcom *rc = ls->ls_recover_buf; struct dlm_member *memb; @@ -105,14 +107,14 @@ static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status, goto out; } - error = dlm_rcom_status(ls, memb->nodeid, 0); + error = dlm_rcom_status(ls, memb->nodeid, 0, seq); if (error) goto out; if (save_slots) dlm_slot_save(ls, rc, memb); - if (rc->rc_result & wait_status) + if (le32_to_cpu(rc->rc_result) & wait_status) break; if (delay < 1000) delay += 20; @@ -124,7 +126,7 @@ static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status, } static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status, - uint32_t status_flags) + uint32_t status_flags, uint64_t seq) { struct dlm_rcom *rc = ls->ls_recover_buf; int error = 0, delay = 0, nodeid = ls->ls_low_nodeid; @@ -135,11 +137,11 @@ static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status, goto out; } - error = dlm_rcom_status(ls, nodeid, status_flags); + error = dlm_rcom_status(ls, nodeid, status_flags, seq); if (error) break; - if (rc->rc_result & wait_status) + if (le32_to_cpu(rc->rc_result) & wait_status) break; if (delay < 1000) delay += 20; @@ -149,22 +151,22 @@ static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status, return error; } -static int wait_status(struct dlm_ls *ls, uint32_t status) +static int wait_status(struct dlm_ls *ls, uint32_t status, uint64_t seq) { uint32_t status_all = status << 1; int error; if (ls->ls_low_nodeid == dlm_our_nodeid()) { - error = wait_status_all(ls, status, 0); + error = wait_status_all(ls, status, 0, seq); if (!error) dlm_set_recover_status(ls, status_all); } else - error = wait_status_low(ls, status_all, 0); + error = wait_status_low(ls, status_all, 0, seq); return error; } -int dlm_recover_members_wait(struct dlm_ls *ls) +int dlm_recover_members_wait(struct dlm_ls *ls, uint64_t seq) { struct dlm_member *memb; struct dlm_slot *slots; @@ -178,7 +180,7 @@ int dlm_recover_members_wait(struct dlm_ls *ls) } if (ls->ls_low_nodeid == dlm_our_nodeid()) { - error = wait_status_all(ls, DLM_RS_NODES, 1); + error = wait_status_all(ls, DLM_RS_NODES, 1, seq); if (error) goto out; @@ -186,18 +188,19 @@ int dlm_recover_members_wait(struct dlm_ls *ls) rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen); if (!rv) { - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); _set_recover_status(ls, DLM_RS_NODES_ALL); ls->ls_num_slots = num_slots; ls->ls_slots_size = slots_size; ls->ls_slots = slots; ls->ls_generation = gen; - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); } else { dlm_set_recover_status(ls, DLM_RS_NODES_ALL); } } else { - error = wait_status_low(ls, DLM_RS_NODES_ALL, DLM_RSF_NEED_SLOTS); + error = wait_status_low(ls, DLM_RS_NODES_ALL, + DLM_RSF_NEED_SLOTS, seq); if (error) goto out; @@ -207,19 +210,19 @@ int dlm_recover_members_wait(struct dlm_ls *ls) return error; } -int dlm_recover_directory_wait(struct dlm_ls *ls) +int dlm_recover_directory_wait(struct dlm_ls *ls, uint64_t seq) { - return wait_status(ls, DLM_RS_DIR); + return wait_status(ls, DLM_RS_DIR, seq); } -int dlm_recover_locks_wait(struct dlm_ls *ls) +int dlm_recover_locks_wait(struct dlm_ls *ls, uint64_t seq) { - return wait_status(ls, DLM_RS_LOCKS); + return wait_status(ls, DLM_RS_LOCKS, seq); } -int dlm_recover_done_wait(struct dlm_ls *ls) +int dlm_recover_done_wait(struct dlm_ls *ls, uint64_t seq) { - return wait_status(ls, DLM_RS_DONE); + return wait_status(ls, DLM_RS_DONE, seq); } /* @@ -238,9 +241,9 @@ static int recover_list_empty(struct dlm_ls *ls) { int empty; - spin_lock(&ls->ls_recover_list_lock); + spin_lock_bh(&ls->ls_recover_list_lock); empty = list_empty(&ls->ls_recover_list); - spin_unlock(&ls->ls_recover_list_lock); + spin_unlock_bh(&ls->ls_recover_list_lock); return empty; } @@ -249,23 +252,23 @@ static void recover_list_add(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; - spin_lock(&ls->ls_recover_list_lock); + spin_lock_bh(&ls->ls_recover_list_lock); if (list_empty(&r->res_recover_list)) { list_add_tail(&r->res_recover_list, &ls->ls_recover_list); ls->ls_recover_list_count++; dlm_hold_rsb(r); } - spin_unlock(&ls->ls_recover_list_lock); + spin_unlock_bh(&ls->ls_recover_list_lock); } static void recover_list_del(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; - spin_lock(&ls->ls_recover_list_lock); + spin_lock_bh(&ls->ls_recover_list_lock); list_del_init(&r->res_recover_list); ls->ls_recover_list_count--; - spin_unlock(&ls->ls_recover_list_lock); + spin_unlock_bh(&ls->ls_recover_list_lock); dlm_put_rsb(r); } @@ -274,7 +277,7 @@ static void recover_list_clear(struct dlm_ls *ls) { struct dlm_rsb *r, *s; - spin_lock(&ls->ls_recover_list_lock); + spin_lock_bh(&ls->ls_recover_list_lock); list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) { list_del_init(&r->res_recover_list); r->res_recover_locks_count = 0; @@ -287,78 +290,81 @@ static void recover_list_clear(struct dlm_ls *ls) ls->ls_recover_list_count); ls->ls_recover_list_count = 0; } - spin_unlock(&ls->ls_recover_list_lock); + spin_unlock_bh(&ls->ls_recover_list_lock); } -static int recover_idr_empty(struct dlm_ls *ls) +static int recover_xa_empty(struct dlm_ls *ls) { int empty = 1; - spin_lock(&ls->ls_recover_idr_lock); + spin_lock_bh(&ls->ls_recover_xa_lock); if (ls->ls_recover_list_count) empty = 0; - spin_unlock(&ls->ls_recover_idr_lock); + spin_unlock_bh(&ls->ls_recover_xa_lock); return empty; } -static int recover_idr_add(struct dlm_rsb *r) +static int recover_xa_add(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; + struct xa_limit limit = { + .min = 1, + .max = UINT_MAX, + }; + uint32_t id; int rv; - idr_preload(GFP_NOFS); - spin_lock(&ls->ls_recover_idr_lock); + spin_lock_bh(&ls->ls_recover_xa_lock); if (r->res_id) { rv = -1; goto out_unlock; } - rv = idr_alloc(&ls->ls_recover_idr, r, 1, 0, GFP_NOWAIT); + rv = xa_alloc(&ls->ls_recover_xa, &id, r, limit, GFP_ATOMIC); if (rv < 0) goto out_unlock; - r->res_id = rv; + r->res_id = id; ls->ls_recover_list_count++; dlm_hold_rsb(r); rv = 0; out_unlock: - spin_unlock(&ls->ls_recover_idr_lock); - idr_preload_end(); + spin_unlock_bh(&ls->ls_recover_xa_lock); return rv; } -static void recover_idr_del(struct dlm_rsb *r) +static void recover_xa_del(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; - spin_lock(&ls->ls_recover_idr_lock); - idr_remove(&ls->ls_recover_idr, r->res_id); + spin_lock_bh(&ls->ls_recover_xa_lock); + xa_erase_bh(&ls->ls_recover_xa, r->res_id); r->res_id = 0; ls->ls_recover_list_count--; - spin_unlock(&ls->ls_recover_idr_lock); + spin_unlock_bh(&ls->ls_recover_xa_lock); dlm_put_rsb(r); } -static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id) +static struct dlm_rsb *recover_xa_find(struct dlm_ls *ls, uint64_t id) { struct dlm_rsb *r; - spin_lock(&ls->ls_recover_idr_lock); - r = idr_find(&ls->ls_recover_idr, (int)id); - spin_unlock(&ls->ls_recover_idr_lock); + spin_lock_bh(&ls->ls_recover_xa_lock); + r = xa_load(&ls->ls_recover_xa, (int)id); + spin_unlock_bh(&ls->ls_recover_xa_lock); return r; } -static void recover_idr_clear(struct dlm_ls *ls) +static void recover_xa_clear(struct dlm_ls *ls) { struct dlm_rsb *r; - int id; + unsigned long id; - spin_lock(&ls->ls_recover_idr_lock); + spin_lock_bh(&ls->ls_recover_xa_lock); - idr_for_each_entry(&ls->ls_recover_idr, r, id) { - idr_remove(&ls->ls_recover_idr, id); + xa_for_each(&ls->ls_recover_xa, id, r) { + xa_erase_bh(&ls->ls_recover_xa, id); r->res_id = 0; r->res_recover_locks_count = 0; ls->ls_recover_list_count--; @@ -371,7 +377,7 @@ static void recover_idr_clear(struct dlm_ls *ls) ls->ls_recover_list_count); ls->ls_recover_list_count = 0; } - spin_unlock(&ls->ls_recover_idr_lock); + spin_unlock_bh(&ls->ls_recover_xa_lock); } @@ -401,7 +407,7 @@ static void set_lock_master(struct list_head *queue, int nodeid) struct dlm_lkb *lkb; list_for_each_entry(lkb, queue, lkb_statequeue) { - if (!(lkb->lkb_flags & DLM_IFL_MSTCPY)) { + if (!test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags)) { lkb->lkb_nodeid = nodeid; lkb->lkb_remid = 0; } @@ -439,17 +445,18 @@ static void set_new_master(struct dlm_rsb *r) * equals our_nodeid below). */ -static int recover_master(struct dlm_rsb *r, unsigned int *count) +static int recover_master(struct dlm_rsb *r, unsigned int *count, uint64_t seq) { struct dlm_ls *ls = r->res_ls; int our_nodeid, dir_nodeid; int is_removed = 0; int error; - if (is_master(r)) + if (r->res_nodeid != -1 && is_master(r)) return 0; - is_removed = dlm_is_removed(ls, r->res_nodeid); + if (r->res_nodeid != -1) + is_removed = dlm_is_removed(ls, r->res_nodeid); if (!is_removed && !rsb_flag(r, RSB_NEW_MASTER)) return 0; @@ -469,8 +476,8 @@ static int recover_master(struct dlm_rsb *r, unsigned int *count) set_new_master(r); error = 0; } else { - recover_idr_add(r); - error = dlm_send_rcom_lookup(r, dir_nodeid); + recover_xa_add(r); + error = dlm_send_rcom_lookup(r, dir_nodeid, seq); } (*count)++; @@ -518,7 +525,8 @@ static int recover_master_static(struct dlm_rsb *r, unsigned int *count) * the correct dir node. */ -int dlm_recover_masters(struct dlm_ls *ls) +int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq, + const struct list_head *root_list) { struct dlm_rsb *r; unsigned int total = 0; @@ -526,12 +534,10 @@ int dlm_recover_masters(struct dlm_ls *ls) int nodir = dlm_no_directory(ls); int error; - log_debug(ls, "dlm_recover_masters"); + log_rinfo(ls, "dlm_recover_masters"); - down_read(&ls->ls_root_sem); - list_for_each_entry(r, &ls->ls_root_list, res_root_list) { + list_for_each_entry(r, root_list, res_root_list) { if (dlm_recovery_stopped(ls)) { - up_read(&ls->ls_root_sem); error = -EINTR; goto out; } @@ -540,40 +546,37 @@ int dlm_recover_masters(struct dlm_ls *ls) if (nodir) error = recover_master_static(r, &count); else - error = recover_master(r, &count); + error = recover_master(r, &count, seq); unlock_rsb(r); cond_resched(); total++; - if (error) { - up_read(&ls->ls_root_sem); + if (error) goto out; - } } - up_read(&ls->ls_root_sem); - log_debug(ls, "dlm_recover_masters %u of %u", count, total); + log_rinfo(ls, "dlm_recover_masters %u of %u", count, total); - error = dlm_wait_function(ls, &recover_idr_empty); + error = dlm_wait_function(ls, &recover_xa_empty); out: if (error) - recover_idr_clear(ls); + recover_xa_clear(ls); return error; } -int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc) +int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc) { struct dlm_rsb *r; int ret_nodeid, new_master; - r = recover_idr_find(ls, rc->rc_id); + r = recover_xa_find(ls, le64_to_cpu(rc->rc_id)); if (!r) { log_error(ls, "dlm_recover_master_reply no id %llx", - (unsigned long long)rc->rc_id); + (unsigned long long)le64_to_cpu(rc->rc_id)); goto out; } - ret_nodeid = rc->rc_result; + ret_nodeid = le32_to_cpu(rc->rc_result); if (ret_nodeid == dlm_our_nodeid()) new_master = 0; @@ -585,9 +588,9 @@ int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc) r->res_nodeid = new_master; set_new_master(r); unlock_rsb(r); - recover_idr_del(r); + recover_xa_del(r); - if (recover_idr_empty(ls)) + if (recover_xa_empty(ls)) wake_up(&ls->ls_wait_general); out: return 0; @@ -612,13 +615,14 @@ int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc) * an equal number of replies then recovery for the rsb is done */ -static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head) +static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head, + uint64_t seq) { struct dlm_lkb *lkb; int error = 0; list_for_each_entry(lkb, head, lkb_statequeue) { - error = dlm_send_rcom_lock(r, lkb); + error = dlm_send_rcom_lock(r, lkb, seq); if (error) break; r->res_recover_locks_count++; @@ -627,7 +631,7 @@ static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head) return error; } -static int recover_locks(struct dlm_rsb *r) +static int recover_locks(struct dlm_rsb *r, uint64_t seq) { int error = 0; @@ -635,13 +639,13 @@ static int recover_locks(struct dlm_rsb *r) DLM_ASSERT(!r->res_recover_locks_count, dlm_dump_rsb(r);); - error = recover_locks_queue(r, &r->res_grantqueue); + error = recover_locks_queue(r, &r->res_grantqueue, seq); if (error) goto out; - error = recover_locks_queue(r, &r->res_convertqueue); + error = recover_locks_queue(r, &r->res_convertqueue, seq); if (error) goto out; - error = recover_locks_queue(r, &r->res_waitqueue); + error = recover_locks_queue(r, &r->res_waitqueue, seq); if (error) goto out; @@ -654,14 +658,14 @@ static int recover_locks(struct dlm_rsb *r) return error; } -int dlm_recover_locks(struct dlm_ls *ls) +int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq, + const struct list_head *root_list) { struct dlm_rsb *r; int error, count = 0; - down_read(&ls->ls_root_sem); - list_for_each_entry(r, &ls->ls_root_list, res_root_list) { - if (is_master(r)) { + list_for_each_entry(r, root_list, res_root_list) { + if (r->res_nodeid != -1 && is_master(r)) { rsb_clear_flag(r, RSB_NEW_MASTER); continue; } @@ -671,21 +675,17 @@ int dlm_recover_locks(struct dlm_ls *ls) if (dlm_recovery_stopped(ls)) { error = -EINTR; - up_read(&ls->ls_root_sem); goto out; } - error = recover_locks(r); - if (error) { - up_read(&ls->ls_root_sem); + error = recover_locks(r, seq); + if (error) goto out; - } count += r->res_recover_locks_count; } - up_read(&ls->ls_root_sem); - log_debug(ls, "dlm_recover_locks %d out", count); + log_rinfo(ls, "dlm_recover_locks %d out", count); error = dlm_wait_function(ls, &recover_list_empty); out: @@ -730,10 +730,9 @@ void dlm_recovered_lock(struct dlm_rsb *r) static void recover_lvb(struct dlm_rsb *r) { - struct dlm_lkb *lkb, *high_lkb = NULL; + struct dlm_lkb *big_lkb = NULL, *iter, *high_lkb = NULL; uint32_t high_seq = 0; int lock_lvb_exists = 0; - int big_lock_exists = 0; int lvblen = r->res_ls->ls_lvblen; if (!rsb_flag(r, RSB_NEW_MASTER2) && @@ -749,37 +748,37 @@ static void recover_lvb(struct dlm_rsb *r) /* we are the new master, so figure out if VALNOTVALID should be set, and set the rsb lvb from the best lkb available. */ - list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { - if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) + list_for_each_entry(iter, &r->res_grantqueue, lkb_statequeue) { + if (!(iter->lkb_exflags & DLM_LKF_VALBLK)) continue; lock_lvb_exists = 1; - if (lkb->lkb_grmode > DLM_LOCK_CR) { - big_lock_exists = 1; + if (iter->lkb_grmode > DLM_LOCK_CR) { + big_lkb = iter; goto setflag; } - if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) { - high_lkb = lkb; - high_seq = lkb->lkb_lvbseq; + if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) { + high_lkb = iter; + high_seq = iter->lkb_lvbseq; } } - list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { - if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) + list_for_each_entry(iter, &r->res_convertqueue, lkb_statequeue) { + if (!(iter->lkb_exflags & DLM_LKF_VALBLK)) continue; lock_lvb_exists = 1; - if (lkb->lkb_grmode > DLM_LOCK_CR) { - big_lock_exists = 1; + if (iter->lkb_grmode > DLM_LOCK_CR) { + big_lkb = iter; goto setflag; } - if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) { - high_lkb = lkb; - high_seq = lkb->lkb_lvbseq; + if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) { + high_lkb = iter; + high_seq = iter->lkb_lvbseq; } } @@ -788,7 +787,7 @@ static void recover_lvb(struct dlm_rsb *r) goto out; /* lvb is invalidated if only NL/CR locks remain */ - if (!big_lock_exists) + if (!big_lkb) rsb_set_flag(r, RSB_VALNOTVALID); if (!r->res_lvbptr) { @@ -797,9 +796,9 @@ static void recover_lvb(struct dlm_rsb *r) goto out; } - if (big_lock_exists) { - r->res_lvbseq = lkb->lkb_lvbseq; - memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen); + if (big_lkb) { + r->res_lvbseq = big_lkb->lkb_lvbseq; + memcpy(r->res_lvbptr, big_lkb->lkb_lvbptr, lvblen); } else if (high_lkb) { r->res_lvbseq = high_lkb->lkb_lvbseq; memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen); @@ -812,33 +811,42 @@ static void recover_lvb(struct dlm_rsb *r) } /* All master rsb's flagged RECOVER_CONVERT need to be looked at. The locks - converting PR->CW or CW->PR need to have their lkb_grmode set. */ + * converting PR->CW or CW->PR may need to have their lkb_grmode changed. + */ static void recover_conversion(struct dlm_rsb *r) { struct dlm_ls *ls = r->res_ls; + uint32_t other_lkid = 0; + int other_grmode = -1; struct dlm_lkb *lkb; - int grmode = -1; list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { if (lkb->lkb_grmode == DLM_LOCK_PR || lkb->lkb_grmode == DLM_LOCK_CW) { - grmode = lkb->lkb_grmode; + other_grmode = lkb->lkb_grmode; + other_lkid = lkb->lkb_id; break; } } + if (other_grmode == -1) + return; + list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { - if (lkb->lkb_grmode != DLM_LOCK_IV) - continue; - if (grmode == -1) { - log_debug(ls, "recover_conversion %x set gr to rq %d", - lkb->lkb_id, lkb->lkb_rqmode); - lkb->lkb_grmode = lkb->lkb_rqmode; - } else { - log_debug(ls, "recover_conversion %x set gr %d", - lkb->lkb_id, grmode); - lkb->lkb_grmode = grmode; + /* Lock recovery created incompatible granted modes, so + * change the granted mode of the converting lock to + * NL. The rqmode of the converting lock should be CW, + * which means the converting lock should be granted at + * the end of recovery. + */ + if (((lkb->lkb_grmode == DLM_LOCK_PR) && (other_grmode == DLM_LOCK_CW)) || + ((lkb->lkb_grmode == DLM_LOCK_CW) && (other_grmode == DLM_LOCK_PR))) { + log_rinfo(ls, "%s %x gr %d rq %d, remote %d %x, other_lkid %u, other gr %d, set gr=NL", + __func__, lkb->lkb_id, lkb->lkb_grmode, + lkb->lkb_rqmode, lkb->lkb_nodeid, + lkb->lkb_remid, other_lkid, other_grmode); + lkb->lkb_grmode = DLM_LOCK_NL; } } } @@ -853,15 +861,14 @@ static void recover_grant(struct dlm_rsb *r) rsb_set_flag(r, RSB_RECOVER_GRANT); } -void dlm_recover_rsbs(struct dlm_ls *ls) +void dlm_recover_rsbs(struct dlm_ls *ls, const struct list_head *root_list) { struct dlm_rsb *r; unsigned int count = 0; - down_read(&ls->ls_root_sem); - list_for_each_entry(r, &ls->ls_root_list, res_root_list) { + list_for_each_entry(r, root_list, res_root_list) { lock_rsb(r); - if (is_master(r)) { + if (r->res_nodeid != -1 && is_master(r)) { if (rsb_flag(r, RSB_RECOVER_CONVERT)) recover_conversion(r); @@ -880,76 +887,31 @@ void dlm_recover_rsbs(struct dlm_ls *ls) rsb_clear_flag(r, RSB_NEW_MASTER2); unlock_rsb(r); } - up_read(&ls->ls_root_sem); if (count) - log_debug(ls, "dlm_recover_rsbs %d done", count); -} - -/* Create a single list of all root rsb's to be used during recovery */ - -int dlm_create_root_list(struct dlm_ls *ls) -{ - struct rb_node *n; - struct dlm_rsb *r; - int i, error = 0; - - down_write(&ls->ls_root_sem); - if (!list_empty(&ls->ls_root_list)) { - log_error(ls, "root list not empty"); - error = -EINVAL; - goto out; - } - - for (i = 0; i < ls->ls_rsbtbl_size; i++) { - spin_lock(&ls->ls_rsbtbl[i].lock); - for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { - r = rb_entry(n, struct dlm_rsb, res_hashnode); - list_add(&r->res_root_list, &ls->ls_root_list); - dlm_hold_rsb(r); - } - - if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[i].toss)) - log_error(ls, "dlm_create_root_list toss not empty"); - spin_unlock(&ls->ls_rsbtbl[i].lock); - } - out: - up_write(&ls->ls_root_sem); - return error; + log_rinfo(ls, "dlm_recover_rsbs %d done", count); } -void dlm_release_root_list(struct dlm_ls *ls) +void dlm_clear_inactive(struct dlm_ls *ls) { struct dlm_rsb *r, *safe; + unsigned int count = 0; - down_write(&ls->ls_root_sem); - list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) { - list_del_init(&r->res_root_list); - dlm_put_rsb(r); - } - up_write(&ls->ls_root_sem); -} + write_lock_bh(&ls->ls_rsbtbl_lock); + list_for_each_entry_safe(r, safe, &ls->ls_slow_inactive, res_slow_list) { + list_del(&r->res_slow_list); + rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node, + dlm_rhash_rsb_params); -void dlm_clear_toss(struct dlm_ls *ls) -{ - struct rb_node *n, *next; - struct dlm_rsb *r; - unsigned int count = 0; - int i; - - for (i = 0; i < ls->ls_rsbtbl_size; i++) { - spin_lock(&ls->ls_rsbtbl[i].lock); - for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) { - next = rb_next(n); - r = rb_entry(n, struct dlm_rsb, res_hashnode); - rb_erase(n, &ls->ls_rsbtbl[i].toss); - dlm_free_rsb(r); - count++; - } - spin_unlock(&ls->ls_rsbtbl[i].lock); + if (!list_empty(&r->res_scan_list)) + list_del_init(&r->res_scan_list); + + free_inactive_rsb(r); + count++; } + write_unlock_bh(&ls->ls_rsbtbl_lock); if (count) - log_debug(ls, "dlm_clear_toss %u done", count); + log_rinfo(ls, "dlm_clear_inactive %u done", count); } diff --git a/fs/dlm/recover.h b/fs/dlm/recover.h index d8c8738c70eb..ec69896462fb 100644 --- a/fs/dlm/recover.h +++ b/fs/dlm/recover.h @@ -1,12 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -17,18 +15,18 @@ int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls)); uint32_t dlm_recover_status(struct dlm_ls *ls); void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status); -int dlm_recover_members_wait(struct dlm_ls *ls); -int dlm_recover_directory_wait(struct dlm_ls *ls); -int dlm_recover_locks_wait(struct dlm_ls *ls); -int dlm_recover_done_wait(struct dlm_ls *ls); -int dlm_recover_masters(struct dlm_ls *ls); -int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc); -int dlm_recover_locks(struct dlm_ls *ls); +int dlm_recover_members_wait(struct dlm_ls *ls, uint64_t seq); +int dlm_recover_directory_wait(struct dlm_ls *ls, uint64_t seq); +int dlm_recover_locks_wait(struct dlm_ls *ls, uint64_t seq); +int dlm_recover_done_wait(struct dlm_ls *ls, uint64_t seq); +int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq, + const struct list_head *root_list); +int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc); +int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq, + const struct list_head *root_list); void dlm_recovered_lock(struct dlm_rsb *r); -int dlm_create_root_list(struct dlm_ls *ls); -void dlm_release_root_list(struct dlm_ls *ls); -void dlm_clear_toss(struct dlm_ls *ls); -void dlm_recover_rsbs(struct dlm_ls *ls); +void dlm_clear_inactive(struct dlm_ls *ls); +void dlm_recover_rsbs(struct dlm_ls *ls, const struct list_head *root_list); #endif /* __RECOVER_DOT_H__ */ diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c index 32f9f8926ec3..12272a8f6d75 100644 --- a/fs/dlm/recoverd.c +++ b/fs/dlm/recoverd.c @@ -1,12 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -22,6 +20,67 @@ #include "requestqueue.h" #include "recoverd.h" +static int dlm_create_masters_list(struct dlm_ls *ls) +{ + struct dlm_rsb *r; + int error = 0; + + write_lock_bh(&ls->ls_masters_lock); + if (!list_empty(&ls->ls_masters_list)) { + log_error(ls, "root list not empty"); + error = -EINVAL; + goto out; + } + + read_lock_bh(&ls->ls_rsbtbl_lock); + list_for_each_entry(r, &ls->ls_slow_active, res_slow_list) { + if (r->res_nodeid) + continue; + + list_add(&r->res_masters_list, &ls->ls_masters_list); + dlm_hold_rsb(r); + } + read_unlock_bh(&ls->ls_rsbtbl_lock); + out: + write_unlock_bh(&ls->ls_masters_lock); + return error; +} + +static void dlm_release_masters_list(struct dlm_ls *ls) +{ + struct dlm_rsb *r, *safe; + + write_lock_bh(&ls->ls_masters_lock); + list_for_each_entry_safe(r, safe, &ls->ls_masters_list, res_masters_list) { + list_del_init(&r->res_masters_list); + dlm_put_rsb(r); + } + write_unlock_bh(&ls->ls_masters_lock); +} + +static void dlm_create_root_list(struct dlm_ls *ls, struct list_head *root_list) +{ + struct dlm_rsb *r; + + read_lock_bh(&ls->ls_rsbtbl_lock); + list_for_each_entry(r, &ls->ls_slow_active, res_slow_list) { + list_add(&r->res_root_list, root_list); + dlm_hold_rsb(r); + } + + WARN_ON_ONCE(!list_empty(&ls->ls_slow_inactive)); + read_unlock_bh(&ls->ls_rsbtbl_lock); +} + +static void dlm_release_root_list(struct list_head *root_list) +{ + struct dlm_rsb *r, *safe; + + list_for_each_entry_safe(r, safe, root_list, res_root_list) { + list_del_init(&r->res_root_list); + dlm_put_rsb(r); + } +} /* If the start for which we're re-enabling locking (seq) has been superseded by a newer stop (ls_recover_seq), we need to leave locking disabled. @@ -34,64 +93,95 @@ static int enable_locking(struct dlm_ls *ls, uint64_t seq) { int error = -EINTR; - down_write(&ls->ls_recv_active); + write_lock_bh(&ls->ls_recv_active); - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); if (ls->ls_recover_seq == seq) { set_bit(LSFL_RUNNING, &ls->ls_flags); + /* Schedule next timer if recovery put something on inactive. + * + * The rsbs that was queued while recovery on toss hasn't + * started yet because LSFL_RUNNING was set everything + * else recovery hasn't started as well because ls_in_recovery + * is still hold. So we should not run into the case that + * resume_scan_timer() queues a timer that can occur in + * a no op. + */ + resume_scan_timer(ls); /* unblocks processes waiting to enter the dlm */ up_write(&ls->ls_in_recovery); clear_bit(LSFL_RECOVER_LOCK, &ls->ls_flags); error = 0; } - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); - up_write(&ls->ls_recv_active); + write_unlock_bh(&ls->ls_recv_active); return error; } static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) { + LIST_HEAD(root_list); unsigned long start; int error, neg = 0; - log_debug(ls, "dlm_recover %llu", (unsigned long long)rv->seq); + log_rinfo(ls, "dlm_recover %llu", (unsigned long long)rv->seq); mutex_lock(&ls->ls_recoverd_active); dlm_callback_suspend(ls); - dlm_clear_toss(ls); + dlm_clear_inactive(ls); /* * This list of root rsb's will be the basis of most of the recovery * routines. */ - dlm_create_root_list(ls); + dlm_create_root_list(ls, &root_list); /* * Add or remove nodes from the lockspace's ls_nodes list. + * + * Due to the fact that we must report all membership changes to lsops + * or midcomms layer, it is not permitted to abort ls_recover() until + * this is done. */ error = dlm_recover_members(ls, rv, &neg); if (error) { - log_debug(ls, "dlm_recover_members error %d", error); - goto fail; + log_rinfo(ls, "dlm_recover_members error %d", error); + goto fail_root_list; } - dlm_recover_dir_nodeid(ls); + dlm_recover_dir_nodeid(ls, &root_list); + + /* Create a snapshot of all active rsbs were we are the master of. + * During the barrier between dlm_recover_members_wait() and + * dlm_recover_directory() other nodes can dump their necessary + * directory dlm_rsb (r->res_dir_nodeid == nodeid) in rcom + * communication dlm_copy_master_names() handling. + * + * TODO We should create a per lockspace list that contains rsbs + * that we are the master of. Instead of creating this list while + * recovery we keep track of those rsbs while locking handling and + * recovery can use it when necessary. + */ + error = dlm_create_masters_list(ls); + if (error) { + log_rinfo(ls, "dlm_create_masters_list error %d", error); + goto fail_root_list; + } - ls->ls_recover_dir_sent_res = 0; - ls->ls_recover_dir_sent_msg = 0; ls->ls_recover_locks_in = 0; dlm_set_recover_status(ls, DLM_RS_NODES); - error = dlm_recover_members_wait(ls); + error = dlm_recover_members_wait(ls, rv->seq); if (error) { - log_debug(ls, "dlm_recover_members_wait error %d", error); - goto fail; + log_rinfo(ls, "dlm_recover_members_wait error %d", error); + dlm_release_masters_list(ls); + goto fail_root_list; } start = jiffies; @@ -101,22 +191,23 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) * nodes their master rsb names that hash to us. */ - error = dlm_recover_directory(ls); + error = dlm_recover_directory(ls, rv->seq); if (error) { - log_debug(ls, "dlm_recover_directory error %d", error); - goto fail; + log_rinfo(ls, "dlm_recover_directory error %d", error); + dlm_release_masters_list(ls); + goto fail_root_list; } dlm_set_recover_status(ls, DLM_RS_DIR); - error = dlm_recover_directory_wait(ls); + error = dlm_recover_directory_wait(ls, rv->seq); if (error) { - log_debug(ls, "dlm_recover_directory_wait error %d", error); - goto fail; + log_rinfo(ls, "dlm_recover_directory_wait error %d", error); + dlm_release_masters_list(ls); + goto fail_root_list; } - log_debug(ls, "dlm_recover_directory %u out %u messages", - ls->ls_recover_dir_sent_res, ls->ls_recover_dir_sent_msg); + dlm_release_masters_list(ls); /* * We may have outstanding operations that are waiting for a reply from @@ -126,47 +217,48 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) dlm_recover_waiters_pre(ls); - error = dlm_recovery_stopped(ls); - if (error) - goto fail; + if (dlm_recovery_stopped(ls)) { + error = -EINTR; + goto fail_root_list; + } if (neg || dlm_no_directory(ls)) { /* * Clear lkb's for departed nodes. */ - dlm_recover_purge(ls); + dlm_recover_purge(ls, &root_list); /* * Get new master nodeid's for rsb's that were mastered on * departed nodes. */ - error = dlm_recover_masters(ls); + error = dlm_recover_masters(ls, rv->seq, &root_list); if (error) { - log_debug(ls, "dlm_recover_masters error %d", error); - goto fail; + log_rinfo(ls, "dlm_recover_masters error %d", error); + goto fail_root_list; } /* * Send our locks on remastered rsb's to the new masters. */ - error = dlm_recover_locks(ls); + error = dlm_recover_locks(ls, rv->seq, &root_list); if (error) { - log_debug(ls, "dlm_recover_locks error %d", error); - goto fail; + log_rinfo(ls, "dlm_recover_locks error %d", error); + goto fail_root_list; } dlm_set_recover_status(ls, DLM_RS_LOCKS); - error = dlm_recover_locks_wait(ls); + error = dlm_recover_locks_wait(ls, rv->seq); if (error) { - log_debug(ls, "dlm_recover_locks_wait error %d", error); - goto fail; + log_rinfo(ls, "dlm_recover_locks_wait error %d", error); + goto fail_root_list; } - log_debug(ls, "dlm_recover_locks %u in", + log_rinfo(ls, "dlm_recover_locks %u in", ls->ls_recover_locks_in); /* @@ -175,7 +267,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) * settings. */ - dlm_recover_rsbs(ls); + dlm_recover_rsbs(ls, &root_list); } else { /* * Other lockspace members may be going through the "neg" steps @@ -184,14 +276,14 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) */ dlm_set_recover_status(ls, DLM_RS_LOCKS); - error = dlm_recover_locks_wait(ls); + error = dlm_recover_locks_wait(ls, rv->seq); if (error) { - log_debug(ls, "dlm_recover_locks_wait error %d", error); - goto fail; + log_rinfo(ls, "dlm_recover_locks_wait error %d", error); + goto fail_root_list; } } - dlm_release_root_list(ls); + dlm_release_root_list(&root_list); /* * Purge directory-related requests that are saved in requestqueue. @@ -203,51 +295,48 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) dlm_set_recover_status(ls, DLM_RS_DONE); - error = dlm_recover_done_wait(ls); + error = dlm_recover_done_wait(ls, rv->seq); if (error) { - log_debug(ls, "dlm_recover_done_wait error %d", error); + log_rinfo(ls, "dlm_recover_done_wait error %d", error); goto fail; } dlm_clear_members_gone(ls); - dlm_adjust_timeouts(ls); - dlm_callback_resume(ls); error = enable_locking(ls, rv->seq); if (error) { - log_debug(ls, "enable_locking error %d", error); + log_rinfo(ls, "enable_locking error %d", error); goto fail; } error = dlm_process_requestqueue(ls); if (error) { - log_debug(ls, "dlm_process_requestqueue error %d", error); + log_rinfo(ls, "dlm_process_requestqueue error %d", error); goto fail; } error = dlm_recover_waiters_post(ls); if (error) { - log_debug(ls, "dlm_recover_waiters_post error %d", error); + log_rinfo(ls, "dlm_recover_waiters_post error %d", error); goto fail; } dlm_recover_grant(ls); - log_debug(ls, "dlm_recover %llu generation %u done: %u ms", + log_rinfo(ls, "dlm_recover %llu generation %u done: %u ms", (unsigned long long)rv->seq, ls->ls_generation, jiffies_to_msecs(jiffies - start)); mutex_unlock(&ls->ls_recoverd_active); - dlm_lsop_recover_done(ls); return 0; + fail_root_list: + dlm_release_root_list(&root_list); fail: - dlm_release_root_list(ls); - log_debug(ls, "dlm_recover %llu error %d", - (unsigned long long)rv->seq, error); mutex_unlock(&ls->ls_recoverd_active); + return error; } @@ -258,16 +347,41 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) static void do_ls_recovery(struct dlm_ls *ls) { struct dlm_recover *rv = NULL; + int error; - spin_lock(&ls->ls_recover_lock); + spin_lock_bh(&ls->ls_recover_lock); rv = ls->ls_recover_args; ls->ls_recover_args = NULL; if (rv && ls->ls_recover_seq == rv->seq) clear_bit(LSFL_RECOVER_STOP, &ls->ls_flags); - spin_unlock(&ls->ls_recover_lock); + spin_unlock_bh(&ls->ls_recover_lock); if (rv) { - ls_recover(ls, rv); + error = ls_recover(ls, rv); + switch (error) { + case 0: + ls->ls_recovery_result = 0; + complete(&ls->ls_recovery_done); + + dlm_lsop_recover_done(ls); + break; + case -EINTR: + /* if recovery was interrupted -EINTR we wait for the next + * ls_recover() iteration until it hopefully succeeds. + */ + log_rinfo(ls, "%s %llu interrupted and should be queued to run again", + __func__, (unsigned long long)rv->seq); + break; + default: + log_rinfo(ls, "%s %llu error %d", __func__, + (unsigned long long)rv->seq, error); + + /* let new_lockspace() get aware of critical error */ + ls->ls_recovery_result = error; + complete(&ls->ls_recovery_done); + break; + } + kfree(rv->nodes); kfree(rv); } @@ -287,11 +401,23 @@ static int dlm_recoverd(void *arg) set_bit(LSFL_RECOVER_LOCK, &ls->ls_flags); wake_up(&ls->ls_recover_lock_wait); - while (!kthread_should_stop()) { + while (1) { + /* + * We call kthread_should_stop() after set_current_state(). + * This is because it works correctly if kthread_stop() is + * called just before set_current_state(). + */ set_current_state(TASK_INTERRUPTIBLE); + if (kthread_should_stop()) { + set_current_state(TASK_RUNNING); + break; + } if (!test_bit(LSFL_RECOVER_WORK, &ls->ls_flags) && - !test_bit(LSFL_RECOVER_DOWN, &ls->ls_flags)) + !test_bit(LSFL_RECOVER_DOWN, &ls->ls_flags)) { + if (kthread_should_stop()) + break; schedule(); + } set_current_state(TASK_RUNNING); if (test_and_clear_bit(LSFL_RECOVER_DOWN, &ls->ls_flags)) { diff --git a/fs/dlm/recoverd.h b/fs/dlm/recoverd.h index 8856079733fa..d1944dc5f9e6 100644 --- a/fs/dlm/recoverd.h +++ b/fs/dlm/recoverd.h @@ -1,12 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c index 1695f1b0dd45..719a5243a069 100644 --- a/fs/dlm/requestqueue.c +++ b/fs/dlm/requestqueue.c @@ -1,11 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -16,6 +14,7 @@ #include "dir.h" #include "config.h" #include "requestqueue.h" +#include "util.h" struct rq_entry { struct list_head list; @@ -31,12 +30,14 @@ struct rq_entry { * lockspace is enabled on some while still suspended on others. */ -void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms) +void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, + const struct dlm_message *ms) { struct rq_entry *e; - int length = ms->m_header.h_length - sizeof(struct dlm_message); + int length = le16_to_cpu(ms->m_header.h_length) - + sizeof(struct dlm_message); - e = kmalloc(sizeof(struct rq_entry) + length, GFP_NOFS); + e = kmalloc(sizeof(struct rq_entry) + length, GFP_ATOMIC); if (!e) { log_print("dlm_add_requestqueue: out of memory len %d", length); return; @@ -44,11 +45,10 @@ void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms) e->recover_seq = ls->ls_recover_seq & 0xFFFFFFFF; e->nodeid = nodeid; - memcpy(&e->request, ms, ms->m_header.h_length); + memcpy(&e->request, ms, sizeof(*ms)); + memcpy(&e->request.m_extra, ms->m_extra, length); - mutex_lock(&ls->ls_requestqueue_mutex); list_add_tail(&e->list, &ls->ls_requestqueue); - mutex_unlock(&ls->ls_requestqueue_mutex); } /* @@ -68,71 +68,49 @@ int dlm_process_requestqueue(struct dlm_ls *ls) struct dlm_message *ms; int error = 0; - mutex_lock(&ls->ls_requestqueue_mutex); - + write_lock_bh(&ls->ls_requestqueue_lock); for (;;) { if (list_empty(&ls->ls_requestqueue)) { - mutex_unlock(&ls->ls_requestqueue_mutex); + clear_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags); error = 0; break; } - e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list); - mutex_unlock(&ls->ls_requestqueue_mutex); + e = list_first_entry(&ls->ls_requestqueue, struct rq_entry, list); ms = &e->request; log_limit(ls, "dlm_process_requestqueue msg %d from %d " "lkid %x remid %x result %d seq %u", - ms->m_type, ms->m_header.h_nodeid, - ms->m_lkid, ms->m_remid, ms->m_result, + le32_to_cpu(ms->m_type), + le32_to_cpu(ms->m_header.h_nodeid), + le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid), + from_dlm_errno(le32_to_cpu(ms->m_result)), e->recover_seq); dlm_receive_message_saved(ls, &e->request, e->recover_seq); - - mutex_lock(&ls->ls_requestqueue_mutex); list_del(&e->list); kfree(e); if (dlm_locking_stopped(ls)) { log_debug(ls, "process_requestqueue abort running"); - mutex_unlock(&ls->ls_requestqueue_mutex); error = -EINTR; break; } + write_unlock_bh(&ls->ls_requestqueue_lock); schedule(); + write_lock_bh(&ls->ls_requestqueue_lock); } + write_unlock_bh(&ls->ls_requestqueue_lock); return error; } -/* - * After recovery is done, locking is resumed and dlm_recoverd takes all the - * saved requests and processes them as they would have been by dlm_recv. At - * the same time, dlm_recv will start receiving new requests from remote nodes. - * We want to delay dlm_recv processing new requests until dlm_recoverd has - * finished processing the old saved requests. We don't check for locking - * stopped here because dlm_ls_stop won't stop locking until it's suspended us - * (dlm_recv). - */ - -void dlm_wait_requestqueue(struct dlm_ls *ls) -{ - for (;;) { - mutex_lock(&ls->ls_requestqueue_mutex); - if (list_empty(&ls->ls_requestqueue)) - break; - mutex_unlock(&ls->ls_requestqueue_mutex); - schedule(); - } - mutex_unlock(&ls->ls_requestqueue_mutex); -} - static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid) { - uint32_t type = ms->m_type; + __le32 type = ms->m_type; /* the ls is being cleaned up and freed by release_lockspace */ - if (!ls->ls_count) + if (!atomic_read(&ls->ls_count)) return 1; if (dlm_is_removed(ls, nodeid)) @@ -141,9 +119,9 @@ static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid) /* directory operations are always purged because the directory is always rebuilt during recovery and the lookups resent */ - if (type == DLM_MSG_REMOVE || - type == DLM_MSG_LOOKUP || - type == DLM_MSG_LOOKUP_REPLY) + if (type == cpu_to_le32(DLM_MSG_REMOVE) || + type == cpu_to_le32(DLM_MSG_LOOKUP) || + type == cpu_to_le32(DLM_MSG_LOOKUP_REPLY)) return 1; if (!dlm_no_directory(ls)) @@ -157,7 +135,7 @@ void dlm_purge_requestqueue(struct dlm_ls *ls) struct dlm_message *ms; struct rq_entry *e, *safe; - mutex_lock(&ls->ls_requestqueue_mutex); + write_lock_bh(&ls->ls_requestqueue_lock); list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) { ms = &e->request; @@ -166,6 +144,6 @@ void dlm_purge_requestqueue(struct dlm_ls *ls) kfree(e); } } - mutex_unlock(&ls->ls_requestqueue_mutex); + write_unlock_bh(&ls->ls_requestqueue_lock); } diff --git a/fs/dlm/requestqueue.h b/fs/dlm/requestqueue.h index 10ce449b77da..42bfe23ceabe 100644 --- a/fs/dlm/requestqueue.h +++ b/fs/dlm/requestqueue.h @@ -1,11 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** ******************************************************************************* ** ** Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -13,7 +11,8 @@ #ifndef __REQUESTQUEUE_DOT_H__ #define __REQUESTQUEUE_DOT_H__ -void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms); +void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, + const struct dlm_message *ms); int dlm_process_requestqueue(struct dlm_ls *ls); void dlm_wait_requestqueue(struct dlm_ls *ls); void dlm_purge_requestqueue(struct dlm_ls *ls); diff --git a/fs/dlm/user.c b/fs/dlm/user.c index 911649a47dd5..51daf4acbe31 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c @@ -1,15 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved. - * - * This copyrighted material is made available to anyone wishing to use, - * modify, copy, or redistribute it subject to the terms and conditions - * of the GNU General Public License v.2. */ #include <linux/miscdevice.h> #include <linux/init.h> #include <linux/wait.h> -#include <linux/module.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/poll.h> @@ -18,6 +14,9 @@ #include <linux/dlm.h> #include <linux/dlm_device.h> #include <linux/slab.h> +#include <linux/sched/signal.h> + +#include <trace/events/dlm.h> #include "dlm_internal.h" #include "lockspace.h" @@ -25,6 +24,8 @@ #include "lvb_table.h" #include "user.h" #include "ast.h" +#include "config.h" +#include "memory.h" static const char name_prefix[] = "dlm"; static const struct file_operations device_fops; @@ -48,7 +49,7 @@ struct dlm_lock_params32 { __u32 bastaddr; __u32 lksb; char lvb[DLM_USER_LVB_LEN]; - char name[0]; + char name[]; }; struct dlm_write_request32 { @@ -110,11 +111,11 @@ static void compat_input(struct dlm_write_request *kb, kb->i.lock.parent = kb32->i.lock.parent; kb->i.lock.xid = kb32->i.lock.xid; kb->i.lock.timeout = kb32->i.lock.timeout; - kb->i.lock.castparam = (void *)(long)kb32->i.lock.castparam; - kb->i.lock.castaddr = (void *)(long)kb32->i.lock.castaddr; - kb->i.lock.bastparam = (void *)(long)kb32->i.lock.bastparam; - kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr; - kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb; + kb->i.lock.castparam = (__user void *)(long)kb32->i.lock.castparam; + kb->i.lock.castaddr = (__user void *)(long)kb32->i.lock.castaddr; + kb->i.lock.bastparam = (__user void *)(long)kb32->i.lock.bastparam; + kb->i.lock.bastaddr = (__user void *)(long)kb32->i.lock.bastaddr; + kb->i.lock.lksb = (__user void *)(long)kb32->i.lock.lksb; memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN); memcpy(kb->i.lock.name, kb32->i.lock.name, namelen); } @@ -123,13 +124,15 @@ static void compat_input(struct dlm_write_request *kb, static void compat_output(struct dlm_lock_result *res, struct dlm_lock_result32 *res32) { + memset(res32, 0, sizeof(*res32)); + res32->version[0] = res->version[0]; res32->version[1] = res->version[1]; res32->version[2] = res->version[2]; - res32->user_astaddr = (__u32)(long)res->user_astaddr; - res32->user_astparam = (__u32)(long)res->user_astparam; - res32->user_lksb = (__u32)(long)res->user_lksb; + res32->user_astaddr = (__u32)(__force long)res->user_astaddr; + res32->user_astparam = (__u32)(__force long)res->user_astparam; + res32->user_lksb = (__u32)(__force long)res->user_lksb; res32->bast_mode = res->bast_mode; res32->lvb_offset = res->lvb_offset; @@ -173,18 +176,20 @@ static int lkb_is_endoflife(int mode, int status) being removed and then remove that lkb from the orphans list and free it */ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, - int status, uint32_t sbflags, uint64_t seq) + int status, uint32_t sbflags) { struct dlm_ls *ls; struct dlm_user_args *ua; struct dlm_user_proc *proc; - int rv; + struct dlm_callback *cb; + int rv, copy_lvb; - if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD)) + if (test_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags) || + test_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags)) return; ls = lkb->lkb_resource->res_ls; - mutex_lock(&ls->ls_clear_proc_locks); + spin_lock_bh(&ls->ls_clear_proc_locks); /* If ORPHAN/DEAD flag is set, it means the process is dead so an ast can't be delivered. For ORPHAN's, dlm_clear_proc_locks() freed @@ -192,7 +197,8 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, for cases where a completion ast is received for an operation that began before clear_proc_locks did its cancel/unlock. */ - if (lkb->lkb_flags & (DLM_IFL_ORPHAN | DLM_IFL_DEAD)) + if (test_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags) || + test_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags)) goto out; DLM_ASSERT(lkb->lkb_ua, dlm_print_lkb(lkb);); @@ -203,34 +209,40 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, goto out; if ((flags & DLM_CB_CAST) && lkb_is_endoflife(mode, status)) - lkb->lkb_flags |= DLM_IFL_ENDOFLIFE; - - spin_lock(&proc->asts_spin); - - rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, seq); - if (rv < 0) { - spin_unlock(&proc->asts_spin); - goto out; - } - - if (list_empty(&lkb->lkb_cb_list)) { - kref_get(&lkb->lkb_ref); - list_add_tail(&lkb->lkb_cb_list, &proc->asts); - wake_up_interruptible(&proc->wait); + set_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags); + + spin_lock_bh(&proc->asts_spin); + + if (!dlm_may_skip_callback(lkb, flags, mode, status, sbflags, + ©_lvb)) { + rv = dlm_get_cb(lkb, flags, mode, status, sbflags, &cb); + if (!rv) { + cb->copy_lvb = copy_lvb; + cb->ua = *ua; + cb->lkb_lksb = &cb->ua.lksb; + if (copy_lvb) { + memcpy(cb->lvbptr, ua->lksb.sb_lvbptr, + DLM_USER_LVB_LEN); + cb->lkb_lksb->sb_lvbptr = cb->lvbptr; + } + + list_add_tail(&cb->list, &proc->asts); + wake_up_interruptible(&proc->wait); + } } - spin_unlock(&proc->asts_spin); + spin_unlock_bh(&proc->asts_spin); - if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) { + if (test_bit(DLM_IFL_ENDOFLIFE_BIT, &lkb->lkb_iflags)) { /* N.B. spin_lock locks_spin, not asts_spin */ - spin_lock(&proc->locks_spin); + spin_lock_bh(&proc->locks_spin); if (!list_empty(&lkb->lkb_ownqueue)) { list_del_init(&lkb->lkb_ownqueue); dlm_put_lkb(lkb); } - spin_unlock(&proc->locks_spin); + spin_unlock_bh(&proc->locks_spin); } out: - mutex_unlock(&ls->ls_clear_proc_locks); + spin_unlock_bh(&ls->ls_clear_proc_locks); } static int device_user_lock(struct dlm_user_proc *proc, @@ -238,6 +250,7 @@ static int device_user_lock(struct dlm_user_proc *proc, { struct dlm_ls *ls; struct dlm_user_args *ua; + uint32_t lkid; int error = -ENOMEM; ls = dlm_find_lockspace_local(proc->lockspace); @@ -260,16 +273,21 @@ static int device_user_lock(struct dlm_user_proc *proc, ua->bastaddr = params->bastaddr; ua->xid = params->xid; - if (params->flags & DLM_LKF_CONVERT) + if (params->flags & DLM_LKF_CONVERT) { error = dlm_user_convert(ls, ua, - params->mode, params->flags, - params->lkid, params->lvb, - (unsigned long) params->timeout); - else { - error = dlm_user_request(ls, ua, + params->mode, params->flags, + params->lkid, params->lvb); + } else if (params->flags & DLM_LKF_ORPHAN) { + error = dlm_user_adopt_orphan(ls, ua, params->mode, params->flags, params->name, params->namelen, - (unsigned long) params->timeout); + &lkid); + if (!error) + error = lkid; + } else { + error = dlm_user_request(ls, ua, + params->mode, params->flags, + params->name, params->namelen); if (!error) error = ua->lksb.sb_lkid; } @@ -346,6 +364,10 @@ static int dlm_device_register(struct dlm_ls *ls, char *name) error = misc_register(&ls->ls_device); if (error) { kfree(ls->ls_device.name); + /* this has to be set to NULL + * to avoid a double-free in dlm_device_deregister + */ + ls->ls_device.name = NULL; } fail: return error; @@ -353,18 +375,15 @@ fail: int dlm_device_deregister(struct dlm_ls *ls) { - int error; - /* The device is not registered. This happens when the lockspace was never used from userspace, or when device_create_lockspace() calls dlm_release_lockspace() after the register fails. */ if (!ls->ls_device.name) return 0; - error = misc_deregister(&ls->ls_device); - if (!error) - kfree(ls->ls_device.name); - return error; + misc_deregister(&ls->ls_device); + kfree(ls->ls_device.name); + return 0; } static int device_user_purge(struct dlm_user_proc *proc, @@ -392,9 +411,9 @@ static int device_create_lockspace(struct dlm_lspace_params *params) if (!capable(CAP_SYS_ADMIN)) return -EPERM; - error = dlm_new_lockspace(params->name, NULL, params->flags, - DLM_USER_LVB_LEN, NULL, NULL, NULL, - &lockspace); + error = dlm_new_user_lockspace(params->name, dlm_config.ci_cluster_name, + params->flags, DLM_USER_LVB_LEN, NULL, + NULL, NULL, &lockspace); if (error) return error; @@ -406,7 +425,7 @@ static int device_create_lockspace(struct dlm_lspace_params *params) dlm_put_lockspace(ls); if (error) - dlm_release_lockspace(lockspace, 0); + dlm_release_lockspace(lockspace, DLM_RELEASE_NO_LOCKS); else error = ls->ls_device.minor; @@ -417,7 +436,7 @@ static int device_remove_lockspace(struct dlm_lspace_params *params) { dlm_lockspace_t *lockspace; struct dlm_ls *ls; - int error, force = 0; + int error, force = DLM_RELEASE_NO_LOCKS; if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -427,9 +446,9 @@ static int device_remove_lockspace(struct dlm_lspace_params *params) return -ENOENT; if (params->flags & DLM_USER_LSFLG_FORCEFREE) - force = 2; + force = DLM_RELEASE_NORMAL; - lockspace = ls->ls_local_handle; + lockspace = ls; dlm_put_lockspace(ls); /* The final dlm_release_lockspace waits for references to go to @@ -493,7 +512,6 @@ static ssize_t device_write(struct file *file, const char __user *buf, { struct dlm_user_proc *proc = file->private_data; struct dlm_write_request *kbuf; - sigset_t tmpsig, allsigs; int error; #ifdef CONFIG_COMPAT @@ -510,14 +528,9 @@ static ssize_t device_write(struct file *file, const char __user *buf, if (count > sizeof(struct dlm_write_request) + DLM_RESNAME_MAXLEN) return -EINVAL; - kbuf = kzalloc(count + 1, GFP_NOFS); - if (!kbuf) - return -ENOMEM; - - if (copy_from_user(kbuf, buf, count)) { - error = -EFAULT; - goto out_free; - } + kbuf = memdup_user_nul(buf, count); + if (IS_ERR(kbuf)) + return PTR_ERR(kbuf); if (check_version(kbuf)) { error = -EBADE; @@ -557,9 +570,6 @@ static ssize_t device_write(struct file *file, const char __user *buf, goto out_free; } - sigfillset(&allsigs); - sigprocmask(SIG_BLOCK, &allsigs, &tmpsig); - error = -EINVAL; switch (kbuf->cmd) @@ -567,7 +577,7 @@ static ssize_t device_write(struct file *file, const char __user *buf, case DLM_USER_LOCK: if (!proc) { log_print("no locking on control device"); - goto out_sig; + goto out_free; } error = device_user_lock(proc, &kbuf->i.lock); break; @@ -575,7 +585,7 @@ static ssize_t device_write(struct file *file, const char __user *buf, case DLM_USER_UNLOCK: if (!proc) { log_print("no locking on control device"); - goto out_sig; + goto out_free; } error = device_user_unlock(proc, &kbuf->i.lock); break; @@ -583,7 +593,7 @@ static ssize_t device_write(struct file *file, const char __user *buf, case DLM_USER_DEADLOCK: if (!proc) { log_print("no locking on control device"); - goto out_sig; + goto out_free; } error = device_user_deadlock(proc, &kbuf->i.lock); break; @@ -591,7 +601,7 @@ static ssize_t device_write(struct file *file, const char __user *buf, case DLM_USER_CREATE_LOCKSPACE: if (proc) { log_print("create/remove only on control device"); - goto out_sig; + goto out_free; } error = device_create_lockspace(&kbuf->i.lspace); break; @@ -599,7 +609,7 @@ static ssize_t device_write(struct file *file, const char __user *buf, case DLM_USER_REMOVE_LOCKSPACE: if (proc) { log_print("create/remove only on control device"); - goto out_sig; + goto out_free; } error = device_remove_lockspace(&kbuf->i.lspace); break; @@ -607,7 +617,7 @@ static ssize_t device_write(struct file *file, const char __user *buf, case DLM_USER_PURGE: if (!proc) { log_print("no locking on control device"); - goto out_sig; + goto out_free; } error = device_user_purge(proc, &kbuf->i.purge); break; @@ -617,8 +627,6 @@ static ssize_t device_write(struct file *file, const char __user *buf, kbuf->cmd); } - out_sig: - sigprocmask(SIG_SETMASK, &tmpsig, NULL); out_free: kfree(kbuf); return error; @@ -643,7 +651,7 @@ static int device_open(struct inode *inode, struct file *file) return -ENOMEM; } - proc->lockspace = ls->ls_local_handle; + proc->lockspace = ls; INIT_LIST_HEAD(&proc->asts); INIT_LIST_HEAD(&proc->locks); INIT_LIST_HEAD(&proc->unlocking); @@ -659,15 +667,11 @@ static int device_close(struct inode *inode, struct file *file) { struct dlm_user_proc *proc = file->private_data; struct dlm_ls *ls; - sigset_t tmpsig, allsigs; ls = dlm_find_lockspace_local(proc->lockspace); if (!ls) return -ENOENT; - sigfillset(&allsigs); - sigprocmask(SIG_BLOCK, &allsigs, &tmpsig); - set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags); dlm_clear_proc_locks(ls, proc); @@ -685,9 +689,6 @@ static int device_close(struct inode *inode, struct file *file) /* FIXME: AUTOFREE: if this ls is no longer used do device_remove_lockspace() */ - sigprocmask(SIG_SETMASK, &tmpsig, NULL); - recalc_sigpending(); - return 0; } @@ -708,7 +709,7 @@ static int copy_result_to_user(struct dlm_user_args *ua, int compat, result.version[0] = DLM_DEVICE_VERSION_MAJOR; result.version[1] = DLM_DEVICE_VERSION_MINOR; result.version[2] = DLM_DEVICE_VERSION_PATCH; - memcpy(&result.lksb, &ua->lksb, sizeof(struct dlm_lksb)); + memcpy(&result.lksb, &ua->lksb, offsetof(struct dlm_lksb, sb_lvbptr)); result.user_lksb = ua->user_lksb; /* FIXME: dlm1 provides for the user's bastparam/addr to not be updated @@ -785,10 +786,9 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct dlm_user_proc *proc = file->private_data; - struct dlm_lkb *lkb; DECLARE_WAITQUEUE(wait, current); - struct dlm_callback cb; - int rv, resid, copy_lvb = 0; + struct dlm_callback *cb; + int rv, ret; if (count == sizeof(struct dlm_device_version)) { rv = copy_version_to_user(buf, count); @@ -807,16 +807,14 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, #endif return -EINVAL; - try_another: - /* do we really need this? can a read happen after a close? */ if (test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags)) return -EINVAL; - spin_lock(&proc->asts_spin); + spin_lock_bh(&proc->asts_spin); if (list_empty(&proc->asts)) { if (file->f_flags & O_NONBLOCK) { - spin_unlock(&proc->asts_spin); + spin_unlock_bh(&proc->asts_spin); return -EAGAIN; } @@ -825,16 +823,16 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, repeat: set_current_state(TASK_INTERRUPTIBLE); if (list_empty(&proc->asts) && !signal_pending(current)) { - spin_unlock(&proc->asts_spin); + spin_unlock_bh(&proc->asts_spin); schedule(); - spin_lock(&proc->asts_spin); + spin_lock_bh(&proc->asts_spin); goto repeat; } set_current_state(TASK_RUNNING); remove_wait_queue(&proc->wait, &wait); if (signal_pending(current)) { - spin_unlock(&proc->asts_spin); + spin_unlock_bh(&proc->asts_spin); return -ERESTARTSYS; } } @@ -843,67 +841,39 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count, without removing lkb_cb_list; so empty lkb_cb_list is always consistent with empty lkb_callbacks */ - lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_cb_list); - - rv = dlm_rem_lkb_callback(lkb->lkb_resource->res_ls, lkb, &cb, &resid); - if (rv < 0) { - /* this shouldn't happen; lkb should have been removed from - list when resid was zero */ - log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id); - list_del_init(&lkb->lkb_cb_list); - spin_unlock(&proc->asts_spin); - /* removes ref for proc->asts, may cause lkb to be freed */ - dlm_put_lkb(lkb); - goto try_another; + cb = list_first_entry(&proc->asts, struct dlm_callback, list); + list_del(&cb->list); + spin_unlock_bh(&proc->asts_spin); + + if (cb->flags & DLM_CB_BAST) { + trace_dlm_bast(cb->ls_id, cb->lkb_id, cb->mode, cb->res_name, + cb->res_length); + } else if (cb->flags & DLM_CB_CAST) { + cb->lkb_lksb->sb_status = cb->sb_status; + cb->lkb_lksb->sb_flags = cb->sb_flags; + trace_dlm_ast(cb->ls_id, cb->lkb_id, cb->sb_status, + cb->sb_flags, cb->res_name, cb->res_length); } - if (!resid) - list_del_init(&lkb->lkb_cb_list); - spin_unlock(&proc->asts_spin); - - if (cb.flags & DLM_CB_SKIP) { - /* removes ref for proc->asts, may cause lkb to be freed */ - if (!resid) - dlm_put_lkb(lkb); - goto try_another; - } - - if (cb.flags & DLM_CB_CAST) { - int old_mode, new_mode; - - old_mode = lkb->lkb_last_cast.mode; - new_mode = cb.mode; - - if (!cb.sb_status && lkb->lkb_lksb->sb_lvbptr && - dlm_lvb_operations[old_mode + 1][new_mode + 1]) - copy_lvb = 1; - - lkb->lkb_lksb->sb_status = cb.sb_status; - lkb->lkb_lksb->sb_flags = cb.sb_flags; - } - - rv = copy_result_to_user(lkb->lkb_ua, - test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags), - cb.flags, cb.mode, copy_lvb, buf, count); - - /* removes ref for proc->asts, may cause lkb to be freed */ - if (!resid) - dlm_put_lkb(lkb); - return rv; + ret = copy_result_to_user(&cb->ua, + test_bit(DLM_PROC_FLAGS_COMPAT, &proc->flags), + cb->flags, cb->mode, cb->copy_lvb, buf, count); + dlm_free_cb(cb); + return ret; } -static unsigned int device_poll(struct file *file, poll_table *wait) +static __poll_t device_poll(struct file *file, poll_table *wait) { struct dlm_user_proc *proc = file->private_data; poll_wait(file, &proc->wait, wait); - spin_lock(&proc->asts_spin); + spin_lock_bh(&proc->asts_spin); if (!list_empty(&proc->asts)) { - spin_unlock(&proc->asts_spin); - return POLLIN | POLLRDNORM; + spin_unlock_bh(&proc->asts_spin); + return EPOLLIN | EPOLLRDNORM; } - spin_unlock(&proc->asts_spin); + spin_unlock_bh(&proc->asts_spin); return 0; } diff --git a/fs/dlm/user.h b/fs/dlm/user.h index 00499ab8835f..2caf8e6e24d5 100644 --- a/fs/dlm/user.h +++ b/fs/dlm/user.h @@ -1,16 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2006-2010 Red Hat, Inc. All rights reserved. - * - * This copyrighted material is made available to anyone wishing to use, - * modify, copy, or redistribute it subject to the terms and conditions - * of the GNU General Public License v.2. */ #ifndef __USER_DOT_H__ #define __USER_DOT_H__ +void dlm_purge_lkb_callbacks(struct dlm_lkb *lkb); void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, - int status, uint32_t sbflags, uint64_t seq); + int status, uint32_t sbflags); int dlm_user_init(void); void dlm_user_exit(void); int dlm_device_deregister(struct dlm_ls *ls); diff --git a/fs/dlm/util.c b/fs/dlm/util.c index e36520af7cc0..f2bc401f312f 100644 --- a/fs/dlm/util.c +++ b/fs/dlm/util.c @@ -1,11 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -22,26 +20,10 @@ #define DLM_ERRNO_ETIMEDOUT 110 #define DLM_ERRNO_EINPROGRESS 115 -static void header_out(struct dlm_header *hd) -{ - hd->h_version = cpu_to_le32(hd->h_version); - hd->h_lockspace = cpu_to_le32(hd->h_lockspace); - hd->h_nodeid = cpu_to_le32(hd->h_nodeid); - hd->h_length = cpu_to_le16(hd->h_length); -} - -static void header_in(struct dlm_header *hd) -{ - hd->h_version = le32_to_cpu(hd->h_version); - hd->h_lockspace = le32_to_cpu(hd->h_lockspace); - hd->h_nodeid = le32_to_cpu(hd->h_nodeid); - hd->h_length = le16_to_cpu(hd->h_length); -} - /* higher errno values are inconsistent across architectures, so select one set of values for on the wire */ -static int to_dlm_errno(int err) +int to_dlm_errno(int err) { switch (err) { case -EDEADLK: @@ -62,7 +44,7 @@ static int to_dlm_errno(int err) return err; } -static int from_dlm_errno(int err) +int from_dlm_errno(int err) { switch (err) { case -DLM_ERRNO_EDEADLK: @@ -82,73 +64,3 @@ static int from_dlm_errno(int err) } return err; } - -void dlm_message_out(struct dlm_message *ms) -{ - header_out(&ms->m_header); - - ms->m_type = cpu_to_le32(ms->m_type); - ms->m_nodeid = cpu_to_le32(ms->m_nodeid); - ms->m_pid = cpu_to_le32(ms->m_pid); - ms->m_lkid = cpu_to_le32(ms->m_lkid); - ms->m_remid = cpu_to_le32(ms->m_remid); - ms->m_parent_lkid = cpu_to_le32(ms->m_parent_lkid); - ms->m_parent_remid = cpu_to_le32(ms->m_parent_remid); - ms->m_exflags = cpu_to_le32(ms->m_exflags); - ms->m_sbflags = cpu_to_le32(ms->m_sbflags); - ms->m_flags = cpu_to_le32(ms->m_flags); - ms->m_lvbseq = cpu_to_le32(ms->m_lvbseq); - ms->m_hash = cpu_to_le32(ms->m_hash); - ms->m_status = cpu_to_le32(ms->m_status); - ms->m_grmode = cpu_to_le32(ms->m_grmode); - ms->m_rqmode = cpu_to_le32(ms->m_rqmode); - ms->m_bastmode = cpu_to_le32(ms->m_bastmode); - ms->m_asts = cpu_to_le32(ms->m_asts); - ms->m_result = cpu_to_le32(to_dlm_errno(ms->m_result)); -} - -void dlm_message_in(struct dlm_message *ms) -{ - header_in(&ms->m_header); - - ms->m_type = le32_to_cpu(ms->m_type); - ms->m_nodeid = le32_to_cpu(ms->m_nodeid); - ms->m_pid = le32_to_cpu(ms->m_pid); - ms->m_lkid = le32_to_cpu(ms->m_lkid); - ms->m_remid = le32_to_cpu(ms->m_remid); - ms->m_parent_lkid = le32_to_cpu(ms->m_parent_lkid); - ms->m_parent_remid = le32_to_cpu(ms->m_parent_remid); - ms->m_exflags = le32_to_cpu(ms->m_exflags); - ms->m_sbflags = le32_to_cpu(ms->m_sbflags); - ms->m_flags = le32_to_cpu(ms->m_flags); - ms->m_lvbseq = le32_to_cpu(ms->m_lvbseq); - ms->m_hash = le32_to_cpu(ms->m_hash); - ms->m_status = le32_to_cpu(ms->m_status); - ms->m_grmode = le32_to_cpu(ms->m_grmode); - ms->m_rqmode = le32_to_cpu(ms->m_rqmode); - ms->m_bastmode = le32_to_cpu(ms->m_bastmode); - ms->m_asts = le32_to_cpu(ms->m_asts); - ms->m_result = from_dlm_errno(le32_to_cpu(ms->m_result)); -} - -void dlm_rcom_out(struct dlm_rcom *rc) -{ - header_out(&rc->rc_header); - - rc->rc_type = cpu_to_le32(rc->rc_type); - rc->rc_result = cpu_to_le32(rc->rc_result); - rc->rc_id = cpu_to_le64(rc->rc_id); - rc->rc_seq = cpu_to_le64(rc->rc_seq); - rc->rc_seq_reply = cpu_to_le64(rc->rc_seq_reply); -} - -void dlm_rcom_in(struct dlm_rcom *rc) -{ - header_in(&rc->rc_header); - - rc->rc_type = le32_to_cpu(rc->rc_type); - rc->rc_result = le32_to_cpu(rc->rc_result); - rc->rc_id = le64_to_cpu(rc->rc_id); - rc->rc_seq = le64_to_cpu(rc->rc_seq); - rc->rc_seq_reply = le64_to_cpu(rc->rc_seq_reply); -} diff --git a/fs/dlm/util.h b/fs/dlm/util.h index 2b25915161c0..b6a4b8adca8d 100644 --- a/fs/dlm/util.h +++ b/fs/dlm/util.h @@ -1,11 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** ******************************************************************************* ** ** Copyright (C) 2005 Red Hat, Inc. All rights reserved. ** -** This copyrighted material is made available to anyone wishing to use, -** modify, copy, or redistribute it subject to the terms and conditions -** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ @@ -13,10 +11,8 @@ #ifndef __UTIL_DOT_H__ #define __UTIL_DOT_H__ -void dlm_message_out(struct dlm_message *ms); -void dlm_message_in(struct dlm_message *ms); -void dlm_rcom_out(struct dlm_rcom *rc); -void dlm_rcom_in(struct dlm_rcom *rc); +int to_dlm_errno(int err); +int from_dlm_errno(int err); #endif |
