summaryrefslogtreecommitdiff
path: root/fs/dlm
diff options
context:
space:
mode:
authorBob Peterson <rpeterso@redhat.com>2018-11-08 14:04:50 -0500
committerDavid Teigland <teigland@redhat.com>2018-11-08 13:17:00 -0600
commit216f0efd19b9cc32207934fd1b87a45f2c4c593e (patch)
tree78c64859be77492003b792df6363ab931398bfa8 /fs/dlm
parent9de30f3f7f4d31037cfbb7c787e1089c1944b3a7 (diff)
dlm: Don't swamp the CPU with callbacks queued during recovery
Before this patch, recovery would cause all callbacks to be delayed, put on a queue, and afterward they were all queued to the callback work queue. This patch does the same thing, but occasionally takes a break after 25 of them so it won't swamp the CPU at the expense of other RT processes like corosync. Signed-off-by: Bob Peterson <rpeterso@redhat.com> Signed-off-by: David Teigland <teigland@redhat.com>
Diffstat (limited to 'fs/dlm')
-rw-r--r--fs/dlm/ast.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index 562fa8c3edff..47ee66d70109 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -292,6 +292,8 @@ void dlm_callback_suspend(struct dlm_ls *ls)
flush_workqueue(ls->ls_callback_wq);
}
+#define MAX_CB_QUEUE 25
+
void dlm_callback_resume(struct dlm_ls *ls)
{
struct dlm_lkb *lkb, *safe;
@@ -302,15 +304,23 @@ void dlm_callback_resume(struct dlm_ls *ls)
if (!ls->ls_callback_wq)
return;
+more:
mutex_lock(&ls->ls_cb_mutex);
list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
list_del_init(&lkb->lkb_cb_list);
queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
count++;
+ if (count == MAX_CB_QUEUE)
+ break;
}
mutex_unlock(&ls->ls_cb_mutex);
if (count)
log_rinfo(ls, "dlm_callback_resume %d", count);
+ if (count == MAX_CB_QUEUE) {
+ count = 0;
+ cond_resched();
+ goto more;
+ }
}