summaryrefslogtreecommitdiff
path: root/fs/dlm
diff options
context:
space:
mode:
authorBob Peterson <rpeterso@redhat.com>2016-10-10 09:19:52 -0400
committerDavid Teigland <teigland@redhat.com>2016-10-19 11:00:03 -0500
commitd2fee58a3bb15b2b8f1eaff14aa3432cf0f35d8c (patch)
treef1df5567b2efcc956e5947d4256d0ee9763e0dfb /fs/dlm
parent3735b4b9f1c102dcaf70241225339bdea4447dc8 (diff)
dlm: remove lock_sock to avoid scheduling while atomic
Before this patch, functions save_callbacks and restore_callbacks called function lock_sock and release_sock to prevent other processes from messing with the struct sock while the callbacks were saved and restored. However, function add_sock calls write_lock_bh prior to calling it save_callbacks, which disables preempts. So the call to lock_sock would try to schedule when we can't schedule. Signed-off-by: Bob Peterson <rpeterso@redhat.com> Signed-off-by: David Teigland <teigland@redhat.com>
Diffstat (limited to 'fs/dlm')
-rw-r--r--fs/dlm/lowcomms.c4
1 files changed, 0 insertions, 4 deletions
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 485494e5a28e..df680a26141b 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -519,24 +519,20 @@ out:
/* Note: sk_callback_lock must be locked before calling this function. */
static void save_callbacks(struct connection *con, struct sock *sk)
{
- lock_sock(sk);
con->orig_data_ready = sk->sk_data_ready;
con->orig_state_change = sk->sk_state_change;
con->orig_write_space = sk->sk_write_space;
con->orig_error_report = sk->sk_error_report;
- release_sock(sk);
}
static void restore_callbacks(struct connection *con, struct sock *sk)
{
write_lock_bh(&sk->sk_callback_lock);
- lock_sock(sk);
sk->sk_user_data = NULL;
sk->sk_data_ready = con->orig_data_ready;
sk->sk_state_change = con->orig_state_change;
sk->sk_write_space = con->orig_write_space;
sk->sk_error_report = con->orig_error_report;
- release_sock(sk);
write_unlock_bh(&sk->sk_callback_lock);
}