summaryrefslogtreecommitdiff
path: root/drivers/staging/lustre/lustre/ldlm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/lustre/lustre/ldlm')
-rw-r--r--drivers/staging/lustre/lustre/ldlm/interval_tree.c100
-rw-r--r--drivers/staging/lustre/lustre/ldlm/l_lock.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c109
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h20
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c32
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c84
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c28
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c49
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c119
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c53
11 files changed, 381 insertions, 221 deletions
diff --git a/drivers/staging/lustre/lustre/ldlm/interval_tree.c b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
index f4a70ebddeaf..e134ecd21bb2 100644
--- a/drivers/staging/lustre/lustre/ldlm/interval_tree.c
+++ b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
@@ -90,6 +90,17 @@ static inline int extent_equal(struct interval_node_extent *e1,
return (e1->start == e2->start) && (e1->end == e2->end);
}
+static inline int extent_overlapped(struct interval_node_extent *e1,
+ struct interval_node_extent *e2)
+{
+ return (e1->start <= e2->end) && (e2->start <= e1->end);
+}
+
+static inline int node_equal(struct interval_node *n1, struct interval_node *n2)
+{
+ return extent_equal(&n1->in_extent, &n2->in_extent);
+}
+
static inline __u64 max_u64(__u64 x, __u64 y)
{
return x > y ? x : y;
@@ -262,7 +273,7 @@ struct interval_node *interval_insert(struct interval_node *node,
p = root;
while (*p) {
parent = *p;
- if (extent_equal(&parent->in_extent, &node->in_extent))
+ if (node_equal(parent, node))
return parent;
/* max_high field must be updated after each iteration */
@@ -463,3 +474,90 @@ color:
interval_erase_color(child, parent, root);
}
EXPORT_SYMBOL(interval_erase);
+
+static inline int interval_may_overlap(struct interval_node *node,
+ struct interval_node_extent *ext)
+{
+ return (ext->start <= node->in_max_high &&
+ ext->end >= interval_low(node));
+}
+
+/*
+ * This function finds all intervals that overlap interval ext,
+ * and calls func to handle resulted intervals one by one.
+ * in lustre, this function will find all conflicting locks in
+ * the granted queue and add these locks to the ast work list.
+ *
+ * {
+ * if (!node)
+ * return 0;
+ * if (ext->end < interval_low(node)) {
+ * interval_search(node->in_left, ext, func, data);
+ * } else if (interval_may_overlap(node, ext)) {
+ * if (extent_overlapped(ext, &node->in_extent))
+ * func(node, data);
+ * interval_search(node->in_left, ext, func, data);
+ * interval_search(node->in_right, ext, func, data);
+ * }
+ * return 0;
+ * }
+ *
+ */
+enum interval_iter interval_search(struct interval_node *node,
+ struct interval_node_extent *ext,
+ interval_callback_t func,
+ void *data)
+{
+ enum interval_iter rc = INTERVAL_ITER_CONT;
+ struct interval_node *parent;
+
+ LASSERT(ext);
+ LASSERT(func);
+
+ while (node) {
+ if (ext->end < interval_low(node)) {
+ if (node->in_left) {
+ node = node->in_left;
+ continue;
+ }
+ } else if (interval_may_overlap(node, ext)) {
+ if (extent_overlapped(ext, &node->in_extent)) {
+ rc = func(node, data);
+ if (rc == INTERVAL_ITER_STOP)
+ break;
+ }
+
+ if (node->in_left) {
+ node = node->in_left;
+ continue;
+ }
+ if (node->in_right) {
+ node = node->in_right;
+ continue;
+ }
+ }
+
+ parent = node->in_parent;
+ while (parent) {
+ if (node_is_left_child(node) &&
+ parent->in_right) {
+ /*
+ * If we ever got the left, it means that the
+ * parent met ext->end<interval_low(parent), or
+ * may_overlap(parent). If the former is true,
+ * we needn't go back. So stop early and check
+ * may_overlap(parent) after this loop.
+ */
+ node = parent->in_right;
+ break;
+ }
+ node = parent;
+ parent = parent->in_parent;
+ }
+ if (!parent || !interval_may_overlap(parent, ext))
+ break;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(interval_search);
diff --git a/drivers/staging/lustre/lustre/ldlm/l_lock.c b/drivers/staging/lustre/lustre/ldlm/l_lock.c
index ea8840cb9056..3845f386f1db 100644
--- a/drivers/staging/lustre/lustre/ldlm/l_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/l_lock.c
@@ -45,6 +45,8 @@
* being an atomic operation.
*/
struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock)
+ __acquires(&lock->l_lock)
+ __acquires(&lock->l_resource->lr_lock)
{
spin_lock(&lock->l_lock);
@@ -59,6 +61,8 @@ EXPORT_SYMBOL(lock_res_and_lock);
* Unlock a lock and its resource previously locked with lock_res_and_lock
*/
void unlock_res_and_lock(struct ldlm_lock *lock)
+ __releases(&lock->l_resource->lr_lock)
+ __releases(&lock->l_lock)
{
/* on server-side resource of lock doesn't change */
ldlm_clear_res_locked(lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
index f5023d9b78f5..ecf472e4813d 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -221,7 +221,7 @@ void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
}
void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy)
+ ldlm_policy_data_t *lpolicy)
{
memset(lpolicy, 0, sizeof(*lpolicy));
lpolicy->l_extent.start = wpolicy->l_extent.start;
@@ -230,7 +230,7 @@ void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
}
void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy)
+ ldlm_wire_policy_data_t *wpolicy)
{
memset(wpolicy, 0, sizeof(*wpolicy));
wpolicy->l_extent.start = lpolicy->l_extent.start;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index d6b61bc39135..861f36f039b5 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -97,7 +97,7 @@ ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
LASSERT(hlist_unhashed(&lock->l_exp_flock_hash));
list_del_init(&lock->l_res_link);
- if (flags == LDLM_FL_WAIT_NOREPROC && !ldlm_is_failed(lock)) {
+ if (flags == LDLM_FL_WAIT_NOREPROC) {
/* client side - set a flag to prevent sending a CANCEL */
lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
@@ -166,7 +166,7 @@ reprocess:
*/
list_for_each(tmp, &res->lr_granted) {
lock = list_entry(tmp, struct ldlm_lock,
- l_res_link);
+ l_res_link);
if (ldlm_same_flock_owner(lock, req)) {
ownlocks = tmp;
break;
@@ -182,7 +182,7 @@ reprocess:
*/
list_for_each(tmp, &res->lr_granted) {
lock = list_entry(tmp, struct ldlm_lock,
- l_res_link);
+ l_res_link);
if (ldlm_same_flock_owner(lock, req)) {
if (!ownlocks)
@@ -339,10 +339,10 @@ reprocess:
lock->l_granted_mode, &null_cbs,
NULL, 0, LVB_T_NONE);
lock_res_and_lock(req);
- if (!new2) {
+ if (IS_ERR(new2)) {
ldlm_flock_destroy(req, lock->l_granted_mode,
*flags);
- *err = -ENOLCK;
+ *err = PTR_ERR(new2);
return LDLM_ITER_STOP;
}
goto reprocess;
@@ -455,29 +455,22 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
enum ldlm_error err;
int rc = 0;
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT2, 4);
+ if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT3)) {
+ lock_res_and_lock(lock);
+ lock->l_flags |= LDLM_FL_FAIL_LOC;
+ unlock_res_and_lock(lock);
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT3, 4);
+ }
CDEBUG(D_DLMTRACE, "flags: 0x%llx data: %p getlk: %p\n",
flags, data, getlk);
- /* Import invalidation. We need to actually release the lock
- * references being held, so that it can go away. No point in
- * holding the lock even if app still believes it has it, since
- * server already dropped it anyway. Only for granted locks too.
- */
- if ((lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) ==
- (LDLM_FL_FAILED|LDLM_FL_LOCAL_ONLY)) {
- if (lock->l_req_mode == lock->l_granted_mode &&
- lock->l_granted_mode != LCK_NL && !data)
- ldlm_lock_decref_internal(lock, lock->l_req_mode);
-
- /* Need to wake up the waiter if we were evicted */
- wake_up(&lock->l_waitq);
- return 0;
- }
-
LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
- if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV))) {
+ if (flags & LDLM_FL_FAILED)
+ goto granted;
+
+ if (!(flags & LDLM_FL_BLOCKED_MASK)) {
if (!data)
/* mds granted the lock in the reply */
goto granted;
@@ -514,12 +507,21 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
granted:
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
- if (ldlm_is_failed(lock)) {
- LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
- return -EIO;
+ if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT4)) {
+ lock_res_and_lock(lock);
+ /* DEADLOCK is always set with CBPENDING */
+ lock->l_flags |= LDLM_FL_FLOCK_DEADLOCK | LDLM_FL_CBPENDING;
+ unlock_res_and_lock(lock);
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT4, 4);
+ }
+ if (OBD_FAIL_PRECHECK(OBD_FAIL_LDLM_CP_CB_WAIT5)) {
+ lock_res_and_lock(lock);
+ /* DEADLOCK is always set with CBPENDING */
+ lock->l_flags |= LDLM_FL_FAIL_LOC |
+ LDLM_FL_FLOCK_DEADLOCK | LDLM_FL_CBPENDING;
+ unlock_res_and_lock(lock);
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT5, 4);
}
-
- LDLM_DEBUG(lock, "client-side enqueue granted");
lock_res_and_lock(lock);
@@ -530,20 +532,59 @@ granted:
if (ldlm_is_destroyed(lock)) {
unlock_res_and_lock(lock);
LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
- return 0;
+ /*
+ * An error is still to be returned, to propagate it up to
+ * ldlm_cli_enqueue_fini() caller.
+ */
+ return -EIO;
}
/* ldlm_lock_enqueue() has already placed lock on the granted list. */
- list_del_init(&lock->l_res_link);
+ ldlm_resource_unlink_lock(lock);
+
+ /*
+ * Import invalidation. We need to actually release the lock
+ * references being held, so that it can go away. No point in
+ * holding the lock even if app still believes it has it, since
+ * server already dropped it anyway. Only for granted locks too.
+ */
+ /* Do the same for DEADLOCK'ed locks. */
+ if (ldlm_is_failed(lock) || ldlm_is_flock_deadlock(lock)) {
+ int mode;
+
+ if (flags & LDLM_FL_TEST_LOCK)
+ LASSERT(ldlm_is_test_lock(lock));
+
+ if (ldlm_is_test_lock(lock) || ldlm_is_flock_deadlock(lock))
+ mode = getlk->fl_type;
+ else
+ mode = lock->l_granted_mode;
+
+ if (ldlm_is_flock_deadlock(lock)) {
+ LDLM_DEBUG(lock, "client-side enqueue deadlock received");
+ rc = -EDEADLK;
+ }
+ ldlm_flock_destroy(lock, mode, LDLM_FL_WAIT_NOREPROC);
+ unlock_res_and_lock(lock);
+
+ /* Need to wake up the waiter if we were evicted */
+ wake_up(&lock->l_waitq);
+
+ /*
+ * An error is still to be returned, to propagate it up to
+ * ldlm_cli_enqueue_fini() caller.
+ */
+ return rc ? : -EIO;
+ }
+
+ LDLM_DEBUG(lock, "client-side enqueue granted");
- if (ldlm_is_flock_deadlock(lock)) {
- LDLM_DEBUG(lock, "client-side enqueue deadlock received");
- rc = -EDEADLK;
- } else if (flags & LDLM_FL_TEST_LOCK) {
+ if (flags & LDLM_FL_TEST_LOCK) {
/* fcntl(F_GETLK) request */
/* The old mode was saved in getlk->fl_type so that if the mode
* in the lock changes we can decref the appropriate refcount.
*/
+ LASSERT(ldlm_is_test_lock(lock));
ldlm_flock_destroy(lock, getlk->fl_type, LDLM_FL_WAIT_NOREPROC);
switch (lock->l_granted_mode) {
case LCK_PR:
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index e4cf65d2d3b1..5e82cfc245b2 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -100,9 +100,10 @@ enum {
int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
enum ldlm_cancel_flags sync, int flags);
int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
- struct list_head *cancels, int count, int max,
- enum ldlm_cancel_flags cancel_flags, int flags);
-extern int ldlm_enqueue_min;
+ struct list_head *cancels, int count, int max,
+ enum ldlm_cancel_flags cancel_flags, int flags);
+extern unsigned int ldlm_enqueue_min;
+extern unsigned int ldlm_cancel_unused_locks_before_replay;
/* ldlm_resource.c */
int ldlm_resource_putref_locked(struct ldlm_resource *res);
@@ -200,8 +201,7 @@ ldlm_interval_extent(struct ldlm_interval *node)
LASSERT(!list_empty(&node->li_group));
- lock = list_entry(node->li_group.next, struct ldlm_lock,
- l_sl_policy);
+ lock = list_entry(node->li_group.next, struct ldlm_lock, l_sl_policy);
return &lock->l_policy_data.l_extent;
}
@@ -302,7 +302,7 @@ static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
lock_res_and_lock(lock);
if ((lock->l_req_mode == lock->l_granted_mode) &&
- !ldlm_is_cp_reqd(lock))
+ !ldlm_is_cp_reqd(lock))
ret = 1;
else if (ldlm_is_failed(lock) || ldlm_is_cancel(lock))
ret = 1;
@@ -326,13 +326,13 @@ void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
void ldlm_ibits_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
ldlm_wire_policy_data_t *wpolicy);
void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
+ ldlm_policy_data_t *lpolicy);
void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
- ldlm_wire_policy_data_t *wpolicy);
+ ldlm_wire_policy_data_t *wpolicy);
void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
+ ldlm_policy_data_t *lpolicy);
void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
- ldlm_policy_data_t *lpolicy);
+ ldlm_policy_data_t *lpolicy);
void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
ldlm_wire_policy_data_t *wpolicy);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index 7c832aae7d5e..153e990c494e 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -82,7 +82,7 @@ static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
if (priority) {
list_del(&item->oic_item);
list_add(&item->oic_item,
- &imp->imp_conn_list);
+ &imp->imp_conn_list);
item->oic_last_attempt = 0;
}
CDEBUG(D_HA, "imp %p@%s: found existing conn %s%s\n",
@@ -102,7 +102,7 @@ static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
list_add(&imp_conn->oic_item, &imp->imp_conn_list);
else
list_add_tail(&imp_conn->oic_item,
- &imp->imp_conn_list);
+ &imp->imp_conn_list);
CDEBUG(D_HA, "imp %p@%s: add connection %s at %s\n",
imp, imp->imp_obd->obd_name, uuid->uuid,
(priority ? "head" : "tail"));
@@ -299,12 +299,14 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
min_t(unsigned int, LUSTRE_CFG_BUFLEN(lcfg, 2),
sizeof(server_uuid)));
- cli->cl_dirty = 0;
+ cli->cl_dirty_pages = 0;
cli->cl_avail_grant = 0;
- /* FIXME: Should limit this for the sum of all cl_dirty_max. */
- cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
- if (cli->cl_dirty_max >> PAGE_SHIFT > totalram_pages / 8)
- cli->cl_dirty_max = totalram_pages << (PAGE_SHIFT - 3);
+ /* FIXME: Should limit this for the sum of all cl_dirty_max_pages. */
+ /*
+ * cl_dirty_max_pages may be changed at connect time in
+ * ptlrpc_connect_interpret().
+ */
+ client_adjust_max_dirty(cli);
INIT_LIST_HEAD(&cli->cl_cache_waiters);
INIT_LIST_HEAD(&cli->cl_loi_ready_list);
INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
@@ -326,11 +328,11 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
/* lru for osc. */
INIT_LIST_HEAD(&cli->cl_lru_osc);
atomic_set(&cli->cl_lru_shrinkers, 0);
- atomic_set(&cli->cl_lru_busy, 0);
- atomic_set(&cli->cl_lru_in_list, 0);
+ atomic_long_set(&cli->cl_lru_busy, 0);
+ atomic_long_set(&cli->cl_lru_in_list, 0);
INIT_LIST_HEAD(&cli->cl_lru_list);
spin_lock_init(&cli->cl_lru_list_lock);
- atomic_set(&cli->cl_unstable_count, 0);
+ atomic_long_set(&cli->cl_unstable_count, 0);
init_waitqueue_head(&cli->cl_destroy_waitq);
atomic_set(&cli->cl_destroy_in_flight, 0);
@@ -360,7 +362,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
cli->cl_chunkbits = PAGE_SHIFT;
if (!strcmp(name, LUSTRE_MDC_NAME)) {
- cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
+ cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT;
} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
cli->cl_max_rpcs_in_flight = 2;
} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */) {
@@ -368,7 +370,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */) {
cli->cl_max_rpcs_in_flight = 4;
} else {
- cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;
+ cli->cl_max_rpcs_in_flight = OBD_MAX_RIF_DEFAULT;
}
rc = ldlm_get_ref();
if (rc) {
@@ -534,7 +536,7 @@ int client_disconnect_export(struct obd_export *exp)
imp = cli->cl_import;
down_write(&cli->cl_sem);
- CDEBUG(D_INFO, "disconnect %s - %d\n", obd->obd_name,
+ CDEBUG(D_INFO, "disconnect %s - %zu\n", obd->obd_name,
cli->cl_conn_count);
if (!cli->cl_conn_count) {
@@ -690,7 +692,7 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
if (rs->rs_transno > exp->exp_last_committed) {
/* not committed already */
list_add_tail(&rs->rs_obd_list,
- &exp->exp_uncommitted_replies);
+ &exp->exp_uncommitted_replies);
}
spin_unlock(&exp->exp_uncommitted_replies_lock);
@@ -795,7 +797,7 @@ void ldlm_dump_export_locks(struct obd_export *exp)
CERROR("dumping locks for export %p,ignore if the unmount doesn't hang\n",
exp);
list_for_each_entry(lock, &exp->exp_locks_list,
- l_exp_refs_link)
+ l_exp_refs_link)
LDLM_ERROR(lock, "lock:");
}
spin_unlock(&exp->exp_locks_list_guard);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index a5993f745ebe..3c48b4fb96f1 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -481,8 +481,8 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
unlock_res_and_lock(lock);
newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
- if (!newres)
- return -ENOMEM;
+ if (IS_ERR(newres))
+ return PTR_ERR(newres);
lu_ref_add(&newres->lr_reference, "lock", lock);
/*
@@ -542,7 +542,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
LASSERT(handle);
- lock = class_handle2object(handle->cookie);
+ lock = class_handle2object(handle->cookie, NULL);
if (!lock)
return NULL;
@@ -937,7 +937,7 @@ static void search_granted_lock(struct list_head *queue,
/* go to next policy group within mode group */
tmp = policy_end->l_res_link.next;
lock = list_entry(tmp, struct ldlm_lock,
- l_res_link);
+ l_res_link);
} /* loop over policy groups within the mode group */
/* insert point is last lock of the mode group,
@@ -1028,15 +1028,28 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
check_res_locked(res);
lock->l_granted_mode = lock->l_req_mode;
+
+ if (work_list && lock->l_completion_ast)
+ ldlm_add_ast_work_item(lock, NULL, work_list);
+
if (res->lr_type == LDLM_PLAIN || res->lr_type == LDLM_IBITS)
ldlm_grant_lock_with_skiplist(lock);
else if (res->lr_type == LDLM_EXTENT)
ldlm_extent_add_lock(res, lock);
- else
+ else if (res->lr_type == LDLM_FLOCK) {
+ /*
+ * We should not add locks to granted list in the following cases:
+ * - this is an UNLOCK but not a real lock;
+ * - this is a TEST lock;
+ * - this is a F_CANCELLK lock (async flock has req_mode == 0)
+ * - this is a deadlock (flock cannot be granted)
+ */
+ if (!lock->l_req_mode || lock->l_req_mode == LCK_NL ||
+ ldlm_is_test_lock(lock) || ldlm_is_flock_deadlock(lock))
+ return;
ldlm_resource_add_lock(res, &res->lr_granted, lock);
-
- if (work_list && lock->l_completion_ast)
- ldlm_add_ast_work_item(lock, NULL, work_list);
+ } else
+ LBUG();
ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock);
}
@@ -1103,7 +1116,7 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
* of bits.
*/
if (lock->l_resource->lr_type == LDLM_IBITS &&
- ((lock->l_policy_data.l_inodebits.bits &
+ ((lock->l_policy_data.l_inodebits.bits &
policy->l_inodebits.bits) !=
policy->l_inodebits.bits))
continue;
@@ -1214,7 +1227,7 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
}
res = ldlm_resource_get(ns, NULL, res_id, type, 0);
- if (!res) {
+ if (IS_ERR(res)) {
LASSERT(!old_lock);
return 0;
}
@@ -1363,12 +1376,12 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
if (size == sizeof(struct ost_lvb)) {
if (loc == RCL_CLIENT)
lvb = req_capsule_client_swab_get(pill,
- &RMF_DLM_LVB,
- lustre_swab_ost_lvb);
+ &RMF_DLM_LVB,
+ lustre_swab_ost_lvb);
else
lvb = req_capsule_server_swab_get(pill,
- &RMF_DLM_LVB,
- lustre_swab_ost_lvb);
+ &RMF_DLM_LVB,
+ lustre_swab_ost_lvb);
if (unlikely(!lvb)) {
LDLM_ERROR(lock, "no LVB");
return -EPROTO;
@@ -1380,8 +1393,8 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
if (loc == RCL_CLIENT)
lvb = req_capsule_client_swab_get(pill,
- &RMF_DLM_LVB,
- lustre_swab_ost_lvb_v1);
+ &RMF_DLM_LVB,
+ lustre_swab_ost_lvb_v1);
else
lvb = req_capsule_server_sized_swab_get(pill,
&RMF_DLM_LVB, size,
@@ -1405,12 +1418,12 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
if (size == sizeof(struct lquota_lvb)) {
if (loc == RCL_CLIENT)
lvb = req_capsule_client_swab_get(pill,
- &RMF_DLM_LVB,
- lustre_swab_lquota_lvb);
+ &RMF_DLM_LVB,
+ lustre_swab_lquota_lvb);
else
lvb = req_capsule_server_swab_get(pill,
- &RMF_DLM_LVB,
- lustre_swab_lquota_lvb);
+ &RMF_DLM_LVB,
+ lustre_swab_lquota_lvb);
if (unlikely(!lvb)) {
LDLM_ERROR(lock, "no LVB");
return -EPROTO;
@@ -1462,15 +1475,15 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
{
struct ldlm_lock *lock;
struct ldlm_resource *res;
+ int rc;
res = ldlm_resource_get(ns, NULL, res_id, type, 1);
- if (!res)
- return NULL;
+ if (IS_ERR(res))
+ return ERR_CAST(res);
lock = ldlm_lock_new(res);
-
if (!lock)
- return NULL;
+ return ERR_PTR(-ENOMEM);
lock->l_req_mode = mode;
lock->l_ast_data = data;
@@ -1484,27 +1497,33 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
lock->l_tree_node = NULL;
/* if this is the extent lock, allocate the interval tree node */
if (type == LDLM_EXTENT) {
- if (!ldlm_interval_alloc(lock))
+ if (!ldlm_interval_alloc(lock)) {
+ rc = -ENOMEM;
goto out;
+ }
}
if (lvb_len) {
lock->l_lvb_len = lvb_len;
lock->l_lvb_data = kzalloc(lvb_len, GFP_NOFS);
- if (!lock->l_lvb_data)
+ if (!lock->l_lvb_data) {
+ rc = -ENOMEM;
goto out;
+ }
}
lock->l_lvb_type = lvb_type;
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK))
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK)) {
+ rc = -ENOENT;
goto out;
+ }
return lock;
out:
ldlm_lock_destroy(lock);
LDLM_LOCK_RELEASE(lock);
- return NULL;
+ return ERR_PTR(rc);
}
/**
@@ -1522,16 +1541,13 @@ enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
struct ldlm_lock *lock = *lockp;
struct ldlm_resource *res = lock->l_resource;
- lock->l_last_activity = ktime_get_real_seconds();
-
lock_res_and_lock(lock);
if (lock->l_req_mode == lock->l_granted_mode) {
/* The server returned a blocked lock, but it was granted
* before we got a chance to actually enqueue it. We don't
* need to do anything else.
*/
- *flags &= ~(LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV | LDLM_FL_BLOCK_WAIT);
+ *flags &= ~LDLM_FL_BLOCKED_MASK;
goto out;
}
@@ -1546,6 +1562,8 @@ enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
*/
if (*flags & LDLM_FL_AST_DISCARD_DATA)
ldlm_set_ast_discard_data(lock);
+ if (*flags & LDLM_FL_TEST_LOCK)
+ ldlm_set_test_lock(lock);
/*
* This distinction between local lock trees is very important; a client
@@ -1688,7 +1706,7 @@ static int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
return -ENOENT;
gl_work = list_entry(arg->list->next, struct ldlm_glimpse_work,
- gl_list);
+ gl_list);
list_del_init(&gl_work->gl_list);
lock = gl_work->gl_lock;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index 821939ff2e6b..fde697ebaadc 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -559,8 +559,11 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
switch (lustre_msg_get_opc(req->rq_reqmsg)) {
case LDLM_BL_CALLBACK:
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET)) {
+ if (cfs_fail_err)
+ ldlm_callback_reply(req, -(int)cfs_fail_err);
return 0;
+ }
break;
case LDLM_CP_CALLBACK:
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK_NET))
@@ -706,12 +709,12 @@ static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
if (!list_empty(&blp->blp_list) &&
(list_empty(&blp->blp_prio_list) || num_bl == 0))
blwi = list_entry(blp->blp_list.next,
- struct ldlm_bl_work_item, blwi_entry);
+ struct ldlm_bl_work_item, blwi_entry);
else
if (!list_empty(&blp->blp_prio_list))
blwi = list_entry(blp->blp_prio_list.next,
- struct ldlm_bl_work_item,
- blwi_entry);
+ struct ldlm_bl_work_item,
+ blwi_entry);
if (blwi) {
if (++num_bl >= atomic_read(&blp->blp_num_threads))
@@ -741,7 +744,7 @@ static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
init_completion(&bltd.bltd_comp);
bltd.bltd_num = atomic_read(&blp->blp_num_threads);
snprintf(bltd.bltd_name, sizeof(bltd.bltd_name),
- "ldlm_bl_%02d", bltd.bltd_num);
+ "ldlm_bl_%02d", bltd.bltd_num);
task = kthread_run(ldlm_bl_thread_main, &bltd, "%s", bltd.bltd_name);
if (IS_ERR(task)) {
CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n",
@@ -786,8 +789,8 @@ static int ldlm_bl_thread_main(void *arg)
if (!blwi) {
atomic_dec(&blp->blp_busy_threads);
l_wait_event_exclusive(blp->blp_waitq,
- (blwi = ldlm_bl_get_work(blp)),
- &lwi);
+ (blwi = ldlm_bl_get_work(blp)),
+ &lwi);
busy = atomic_inc_return(&blp->blp_busy_threads);
} else {
busy = atomic_read(&blp->blp_busy_threads);
@@ -874,8 +877,6 @@ void ldlm_put_ref(void)
}
EXPORT_SYMBOL(ldlm_put_ref);
-extern unsigned int ldlm_cancel_unused_locks_before_replay;
-
static ssize_t cancel_unused_locks_before_replay_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
@@ -1094,16 +1095,17 @@ int ldlm_init(void)
return -ENOMEM;
ldlm_lock_slab = kmem_cache_create("ldlm_locks",
- sizeof(struct ldlm_lock), 0,
- SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU, NULL);
+ sizeof(struct ldlm_lock), 0,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_DESTROY_BY_RCU, NULL);
if (!ldlm_lock_slab) {
kmem_cache_destroy(ldlm_resource_slab);
return -ENOMEM;
}
ldlm_interval_slab = kmem_cache_create("interval_node",
- sizeof(struct ldlm_interval),
- 0, SLAB_HWCACHE_ALIGN, NULL);
+ sizeof(struct ldlm_interval),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
if (!ldlm_interval_slab) {
kmem_cache_destroy(ldlm_resource_slab);
kmem_cache_destroy(ldlm_lock_slab);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index 657ed4012776..9a1136e32dfc 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -357,38 +357,40 @@ static int ldlm_pool_recalc(struct ldlm_pool *pl)
int count;
recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
- if (recalc_interval_sec <= 0)
- goto recalc;
-
- spin_lock(&pl->pl_lock);
if (recalc_interval_sec > 0) {
- /*
- * Update pool statistics every 1s.
- */
- ldlm_pool_recalc_stats(pl);
-
- /*
- * Zero out all rates and speed for the last period.
- */
- atomic_set(&pl->pl_grant_rate, 0);
- atomic_set(&pl->pl_cancel_rate, 0);
+ spin_lock(&pl->pl_lock);
+ recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
+
+ if (recalc_interval_sec > 0) {
+ /*
+ * Update pool statistics every 1s.
+ */
+ ldlm_pool_recalc_stats(pl);
+
+ /*
+ * Zero out all rates and speed for the last period.
+ */
+ atomic_set(&pl->pl_grant_rate, 0);
+ atomic_set(&pl->pl_cancel_rate, 0);
+ }
+ spin_unlock(&pl->pl_lock);
}
- spin_unlock(&pl->pl_lock);
- recalc:
if (pl->pl_ops->po_recalc) {
count = pl->pl_ops->po_recalc(pl);
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
count);
}
+
recalc_interval_sec = pl->pl_recalc_time - ktime_get_seconds() +
pl->pl_recalc_period;
if (recalc_interval_sec <= 0) {
+ /* DEBUG: should be re-removed after LU-4536 is fixed */
+ CDEBUG(D_DLMTRACE, "%s: Negative interval(%ld), too short period(%ld)\n",
+ pl->pl_name, (long)recalc_interval_sec,
+ (long)pl->pl_recalc_period);
+
/* Prevent too frequent recalculation. */
- CDEBUG(D_DLMTRACE,
- "Negative interval(%d), too short period(%lld)",
- recalc_interval_sec,
- (s64)pl->pl_recalc_period);
recalc_interval_sec = 1;
}
@@ -792,7 +794,8 @@ static struct completion ldlm_pools_comp;
*/
static unsigned long ldlm_pools_count(ldlm_side_t client, gfp_t gfp_mask)
{
- int total = 0, nr_ns;
+ unsigned long total = 0;
+ int nr_ns;
struct ldlm_namespace *ns;
struct ldlm_namespace *ns_old = NULL; /* loop detection */
void *cookie;
@@ -995,7 +998,7 @@ static int ldlm_pools_thread_main(void *arg)
wake_up(&thread->t_ctl_waitq);
CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
- "ldlm_poold", current_pid());
+ "ldlm_poold", current_pid());
while (1) {
struct l_wait_info lwi;
@@ -1025,7 +1028,7 @@ static int ldlm_pools_thread_main(void *arg)
wake_up(&thread->t_ctl_waitq);
CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
- "ldlm_poold", current_pid());
+ "ldlm_poold", current_pid());
complete_and_exit(&ldlm_pools_comp, 0);
}
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index af487f9937f4..35ba6f14d95f 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -63,8 +63,8 @@
#include "ldlm_internal.h"
-int ldlm_enqueue_min = OBD_TIMEOUT_DEFAULT;
-module_param(ldlm_enqueue_min, int, 0644);
+unsigned int ldlm_enqueue_min = OBD_TIMEOUT_DEFAULT;
+module_param(ldlm_enqueue_min, uint, 0644);
MODULE_PARM_DESC(ldlm_enqueue_min, "lock enqueue timeout minimum");
/* in client side, whether the cached locks will be canceled before replay */
@@ -123,44 +123,56 @@ static int ldlm_expired_completion_wait(void *data)
return 0;
}
+/**
+ * Calculate the Completion timeout (covering enqueue, BL AST, data flush,
+ * lock cancel, and their replies). Used for lock completion timeout on the
+ * client side.
+ *
+ * \param[in] lock lock which is waiting the completion callback
+ *
+ * \retval timeout in seconds to wait for the server reply
+ */
/* We use the same basis for both server side and client side functions
* from a single node.
*/
-static int ldlm_get_enq_timeout(struct ldlm_lock *lock)
+static unsigned int ldlm_cp_timeout(struct ldlm_lock *lock)
{
- int timeout = at_get(ldlm_lock_to_ns_at(lock));
+ unsigned int timeout;
if (AT_OFF)
- return obd_timeout / 2;
- /* Since these are non-updating timeouts, we should be conservative.
- * It would be nice to have some kind of "early reply" mechanism for
- * lock callbacks too...
+ return obd_timeout;
+
+ /*
+ * Wait a long time for enqueue - server may have to callback a
+ * lock from another client. Server will evict the other client if it
+ * doesn't respond reasonably, and then give us the lock.
*/
- timeout = min_t(int, at_max, timeout + (timeout >> 1)); /* 150% */
- return max(timeout, ldlm_enqueue_min);
+ timeout = at_get(ldlm_lock_to_ns_at(lock));
+ return max(3 * timeout, ldlm_enqueue_min);
}
/**
* Helper function for ldlm_completion_ast(), updating timings when lock is
* actually granted.
*/
-static int ldlm_completion_tail(struct ldlm_lock *lock)
+static int ldlm_completion_tail(struct ldlm_lock *lock, void *data)
{
long delay;
- int result;
+ int result = 0;
if (ldlm_is_destroyed(lock) || ldlm_is_failed(lock)) {
LDLM_DEBUG(lock, "client-side enqueue: destroyed");
result = -EIO;
+ } else if (!data) {
+ LDLM_DEBUG(lock, "client-side enqueue: granted");
} else {
+ /* Take into AT only CP RPC, not immediately granted locks */
delay = ktime_get_real_seconds() - lock->l_last_activity;
LDLM_DEBUG(lock, "client-side enqueue: granted after %lds",
delay);
/* Update our time estimate */
- at_measured(ldlm_lock_to_ns_at(lock),
- delay);
- result = 0;
+ at_measured(ldlm_lock_to_ns_at(lock), delay);
}
return result;
}
@@ -177,10 +189,9 @@ int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data)
return 0;
}
- if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV))) {
+ if (!(flags & LDLM_FL_BLOCKED_MASK)) {
wake_up(&lock->l_waitq);
- return ldlm_completion_tail(lock);
+ return ldlm_completion_tail(lock, data);
}
LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, going forward");
@@ -224,8 +235,7 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
goto noreproc;
}
- if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
- LDLM_FL_BLOCK_CONV))) {
+ if (!(flags & LDLM_FL_BLOCKED_MASK)) {
wake_up(&lock->l_waitq);
return 0;
}
@@ -240,13 +250,10 @@ noreproc:
if (obd)
imp = obd->u.cli.cl_import;
- /* Wait a long time for enqueue - server may have to callback a
- * lock from another client. Server will evict the other client if it
- * doesn't respond reasonably, and then give us the lock.
- */
- timeout = ldlm_get_enq_timeout(lock) * 2;
+ timeout = ldlm_cp_timeout(lock);
lwd.lwd_lock = lock;
+ lock->l_last_activity = ktime_get_real_seconds();
if (ldlm_is_no_timeout(lock)) {
LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
@@ -279,7 +286,7 @@ noreproc:
return rc;
}
- return ldlm_completion_tail(lock);
+ return ldlm_completion_tail(lock, data);
}
EXPORT_SYMBOL(ldlm_completion_ast);
@@ -309,8 +316,6 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns,
else
LDLM_DEBUG(lock, "lock was granted or failed in race");
- ldlm_lock_decref_internal(lock, mode);
-
/* XXX - HACK because we shouldn't call ldlm_lock_destroy()
* from llite/file.c/ll_file_flock().
*/
@@ -321,9 +326,14 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns,
*/
if (lock->l_resource->lr_type == LDLM_FLOCK) {
lock_res_and_lock(lock);
- ldlm_resource_unlink_lock(lock);
- ldlm_lock_destroy_nolock(lock);
+ if (!ldlm_is_destroyed(lock)) {
+ ldlm_resource_unlink_lock(lock);
+ ldlm_lock_decref_internal_nolock(lock, mode);
+ ldlm_lock_destroy_nolock(lock);
+ }
unlock_res_and_lock(lock);
+ } else {
+ ldlm_lock_decref_internal(lock, mode);
}
}
@@ -418,11 +428,6 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
*flags = ldlm_flags_from_wire(reply->lock_flags);
lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
LDLM_FL_INHERIT_MASK);
- /* move NO_TIMEOUT flag to the lock to force ldlm_lock_match()
- * to wait with no timeout as well
- */
- lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
- LDLM_FL_NO_TIMEOUT);
unlock_res_and_lock(lock);
CDEBUG(D_INFO, "local: %p, remote cookie: %#llx, flags: 0x%llx\n",
@@ -556,7 +561,7 @@ static inline int ldlm_capsule_handles_avail(struct req_capsule *pill,
enum req_location loc,
int off)
{
- int size = req_capsule_msg_size(pill, loc);
+ u32 size = req_capsule_msg_size(pill, loc);
return ldlm_req_handles_avail(size, off);
}
@@ -565,7 +570,7 @@ static inline int ldlm_format_handles_avail(struct obd_import *imp,
const struct req_format *fmt,
enum req_location loc, int off)
{
- int size = req_capsule_fmt_size(imp->imp_msg_magic, fmt, loc);
+ u32 size = req_capsule_fmt_size(imp->imp_msg_magic, fmt, loc);
return ldlm_req_handles_avail(size, off);
}
@@ -696,8 +701,8 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
einfo->ei_mode, &cbs, einfo->ei_cbdata,
lvb_len, lvb_type);
- if (!lock)
- return -ENOMEM;
+ if (IS_ERR(lock))
+ return PTR_ERR(lock);
/* for the local lock, add the reference */
ldlm_lock_addref_internal(lock, einfo->ei_mode);
ldlm_lock2handle(lock, lockh);
@@ -719,6 +724,7 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
lock->l_export = NULL;
lock->l_blocking_ast = einfo->ei_cb_bl;
lock->l_flags |= (*flags & (LDLM_FL_NO_LRU | LDLM_FL_EXCL));
+ lock->l_last_activity = ktime_get_real_seconds();
/* lock not sent to server yet */
@@ -819,7 +825,7 @@ static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
lock_res_and_lock(lock);
ldlm_set_cbpending(lock);
local_only = !!(lock->l_flags &
- (LDLM_FL_LOCAL_ONLY|LDLM_FL_CANCEL_ON_BLOCK));
+ (LDLM_FL_LOCAL_ONLY | LDLM_FL_CANCEL_ON_BLOCK));
ldlm_cancel_callback(lock);
rc = ldlm_is_bl_ast(lock) ? LDLM_FL_BL_AST : LDLM_FL_CANCELING;
unlock_res_and_lock(lock);
@@ -1180,8 +1186,7 @@ static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
slv = ldlm_pool_get_slv(pl);
lvf = ldlm_pool_get_lvf(pl);
- la = cfs_duration_sec(cfs_time_sub(cur,
- lock->l_last_used));
+ la = cfs_duration_sec(cfs_time_sub(cur, lock->l_last_used));
lv = lvf * la * unused;
/* Inform pool about current CLV to see it via debugfs. */
@@ -1193,9 +1198,6 @@ static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
if (slv == 0 || lv < slv)
return LDLM_POLICY_KEEP_LOCK;
- if (ns->ns_cancel && ns->ns_cancel(lock) == 0)
- return LDLM_POLICY_KEEP_LOCK;
-
return LDLM_POLICY_CANCEL_LOCK;
}
@@ -1239,9 +1241,6 @@ static enum ldlm_policy_res ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
cfs_time_add(lock->l_last_used, ns->ns_max_age)))
return LDLM_POLICY_KEEP_LOCK;
- if (ns->ns_cancel && ns->ns_cancel(lock) == 0)
- return LDLM_POLICY_KEEP_LOCK;
-
return LDLM_POLICY_CANCEL_LOCK;
}
@@ -1374,7 +1373,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
break;
list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
- l_lru) {
+ l_lru) {
/* No locks which got blocking requests. */
LASSERT(!ldlm_is_bl_ast(lock));
@@ -1413,7 +1412,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
* That is, for shrinker policy we drop only
* old locks, but additionally choose them by
* their weight. Big extent locks will stay in
- * the cache. */
+ * the cache.
+ */
result = pf(ns, lock, unused, added, count);
if (result == LDLM_POLICY_KEEP_LOCK) {
lu_ref_del(&lock->l_reference,
@@ -1610,8 +1610,7 @@ int ldlm_cli_cancel_list(struct list_head *cancels, int count,
*/
while (count > 0) {
LASSERT(!list_empty(cancels));
- lock = list_entry(cancels->next, struct ldlm_lock,
- l_bl_ast);
+ lock = list_entry(cancels->next, struct ldlm_lock, l_bl_ast);
LASSERT(lock->l_conn_export);
if (exp_connect_cancelset(lock->l_conn_export)) {
@@ -1660,7 +1659,7 @@ int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
int rc;
res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
- if (!res) {
+ if (IS_ERR(res)) {
/* This is not a problem. */
CDEBUG(D_INFO, "No resource %llu\n", res_id->name[0]);
return 0;
@@ -1704,7 +1703,8 @@ static int ldlm_cli_hash_cancel_unused(struct cfs_hash *hs,
* that have 0 readers/writers.
*
* If flags & LCF_LOCAL, throw the locks away without trying
- * to notify the server. */
+ * to notify the server.
+ */
int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
const struct ldlm_res_id *res_id,
enum ldlm_cancel_flags flags, void *opaque)
@@ -1811,13 +1811,10 @@ int ldlm_resource_iterate(struct ldlm_namespace *ns,
struct ldlm_resource *res;
int rc;
- if (!ns) {
- CERROR("must pass in namespace\n");
- LBUG();
- }
+ LASSERTF(ns, "must pass in namespace\n");
res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
- if (!res)
+ if (IS_ERR(res))
return 0;
LDLM_RESOURCE_ADDREF(res);
@@ -1843,7 +1840,7 @@ static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
* bug 17614: locks being actively cancelled. Get a reference
* on a lock so that it does not disappear under us (e.g. due to cancel)
*/
- if (!(lock->l_flags & (LDLM_FL_FAILED|LDLM_FL_CANCELING))) {
+ if (!(lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_CANCELING))) {
list_add(&lock->l_pending_chain, list);
LDLM_LOCK_GET(lock);
}
@@ -2013,7 +2010,7 @@ static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
LCF_LOCAL, LDLM_CANCEL_NO_WAIT);
CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n",
- canceled, ldlm_ns_name(ns));
+ canceled, ldlm_ns_name(ns));
}
int ldlm_replay_locks(struct obd_import *imp)
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index 51a28d96af39..a09c25aea698 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -449,8 +449,8 @@ static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
const void *key, unsigned mask)
{
const struct ldlm_res_id *id = key;
- unsigned val = 0;
- unsigned i;
+ unsigned int val = 0;
+ unsigned int i;
for (i = 0; i < RES_NAME_SIZE; i++)
val += id->name[i];
@@ -561,9 +561,9 @@ static struct cfs_hash_ops ldlm_ns_fid_hash_ops = {
struct ldlm_ns_hash_def {
enum ldlm_ns_type nsd_type;
/** hash bucket bits */
- unsigned nsd_bkt_bits;
+ unsigned int nsd_bkt_bits;
/** hash bits */
- unsigned nsd_all_bits;
+ unsigned int nsd_all_bits;
/** hash operations */
struct cfs_hash_ops *nsd_hops;
};
@@ -758,8 +758,7 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
*/
lock_res(res);
list_for_each(tmp, q) {
- lock = list_entry(tmp, struct ldlm_lock,
- l_res_link);
+ lock = list_entry(tmp, struct ldlm_lock, l_res_link);
if (ldlm_is_cleaned(lock)) {
lock = NULL;
continue;
@@ -793,8 +792,14 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
*/
unlock_res(res);
LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
+ if (lock->l_flags & LDLM_FL_FAIL_LOC) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(4));
+ set_current_state(TASK_RUNNING);
+ }
if (lock->l_completion_ast)
- lock->l_completion_ast(lock, 0, NULL);
+ lock->l_completion_ast(lock, LDLM_FL_FAILED,
+ NULL);
LDLM_LOCK_RELEASE(lock);
continue;
}
@@ -875,7 +880,8 @@ static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
force_wait:
if (force)
- lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
+ lwi = LWI_TIMEOUT(msecs_to_jiffies(obd_timeout *
+ MSEC_PER_SEC) / 4, NULL, NULL);
rc = l_wait_event(ns->ns_waitq,
atomic_read(&ns->ns_bref) == 0, &lwi);
@@ -1082,10 +1088,11 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
int create)
{
struct hlist_node *hnode;
- struct ldlm_resource *res;
+ struct ldlm_resource *res = NULL;
struct cfs_hash_bd bd;
__u64 version;
int ns_refcount = 0;
+ int rc;
LASSERT(!parent);
LASSERT(ns->ns_rs_hash);
@@ -1095,31 +1102,20 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
if (hnode) {
cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
- res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
- /* Synchronize with regard to resource creation. */
- if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
- mutex_lock(&res->lr_lvb_mutex);
- mutex_unlock(&res->lr_lvb_mutex);
- }
-
- if (unlikely(res->lr_lvb_len < 0)) {
- ldlm_resource_putref(res);
- res = NULL;
- }
- return res;
+ goto lvbo_init;
}
version = cfs_hash_bd_version_get(&bd);
cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
if (create == 0)
- return NULL;
+ return ERR_PTR(-ENOENT);
LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
"type: %d\n", type);
res = ldlm_resource_new();
if (!res)
- return NULL;
+ return ERR_PTR(-ENOMEM);
res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
res->lr_name = *name;
@@ -1137,7 +1133,7 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
/* We have taken lr_lvb_mutex. Drop it. */
mutex_unlock(&res->lr_lvb_mutex);
kmem_cache_free(ldlm_resource_slab, res);
-
+lvbo_init:
res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
/* Synchronize with regard to resource creation. */
if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
@@ -1146,8 +1142,9 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
}
if (unlikely(res->lr_lvb_len < 0)) {
+ rc = res->lr_lvb_len;
ldlm_resource_putref(res);
- res = NULL;
+ res = ERR_PTR(rc);
}
return res;
}
@@ -1158,8 +1155,6 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
- int rc;
-
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
rc = ns->ns_lvbo->lvbo_init(res);
if (rc < 0) {
@@ -1169,7 +1164,7 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
res->lr_lvb_len = rc;
mutex_unlock(&res->lr_lvb_mutex);
ldlm_resource_putref(res);
- return NULL;
+ return ERR_PTR(rc);
}
}
@@ -1386,7 +1381,7 @@ void ldlm_resource_dump(int level, struct ldlm_resource *res)
if (!list_empty(&res->lr_granted)) {
CDEBUG(level, "Granted locks (in reverse order):\n");
list_for_each_entry_reverse(lock, &res->lr_granted,
- l_res_link) {
+ l_res_link) {
LDLM_DEBUG_LIMIT(level, lock, "###");
if (!(level & D_CANTMASK) &&
++granted > ldlm_dump_granted_max) {