summaryrefslogtreecommitdiff
path: root/net/hsr
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-11-26 17:15:29 +0100
committerDavid S. Miller <davem@davemloft.net>2021-11-29 12:12:36 +0000
commitfd888e85fe6b661e78044dddfec0be5271afa626 (patch)
treec1ef71d45fd053e110de5aeea2b0eb02b1e2c681 /net/hsr
parent07b8ca3792dec6bc3288b08ff85d80b5330de1d6 (diff)
net: Write lock dev_base_lock without disabling bottom halves.
The writer acquires dev_base_lock with disabled bottom halves. The reader can acquire dev_base_lock without disabling bottom halves because there is no writer in softirq context. On PREEMPT_RT the softirqs are preemptible and local_bh_disable() acts as a lock to ensure that resources, that are protected by disabling bottom halves, remain protected. This leads to a circular locking dependency if the lock acquired with disabled bottom halves (as in write_lock_bh()) and somewhere else with enabled bottom halves (as by read_lock() in netstat_show()) followed by disabling bottom halves (cxgb_get_stats() -> t4_wr_mbox_meat_timeout() -> spin_lock_bh()). This is the reverse locking order. All read_lock() invocation are from sysfs callback which are not invoked from softirq context. Therefore there is no need to disable bottom halves while acquiring a write lock. Acquire the write lock of dev_base_lock without disabling bottom halves. Reported-by: Pei Zhang <pezhang@redhat.com> Reported-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/hsr')
-rw-r--r--net/hsr/hsr_device.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 737e4f17e1c6..e57fdad9ef94 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -30,13 +30,13 @@ static bool is_slave_up(struct net_device *dev)
static void __hsr_set_operstate(struct net_device *dev, int transition)
{
- write_lock_bh(&dev_base_lock);
+ write_lock(&dev_base_lock);
if (dev->operstate != transition) {
dev->operstate = transition;
- write_unlock_bh(&dev_base_lock);
+ write_unlock(&dev_base_lock);
netdev_state_change(dev);
} else {
- write_unlock_bh(&dev_base_lock);
+ write_unlock(&dev_base_lock);
}
}