summaryrefslogtreecommitdiff
path: root/net/smc/smc_ib.c
diff options
context:
space:
mode:
authorUrsula Braun <ubraun@linux.ibm.com>2020-02-17 16:24:55 +0100
committerDavid S. Miller <davem@davemloft.net>2020-02-17 14:50:24 -0800
commit5613f20c938102093df2c67c7c22d7d0b0d3e3fb (patch)
tree85e6feb574608404e11185ba606b7966ecdcac4e /net/smc/smc_ib.c
parent5f78fe968d76902944534db85c4fb244dedc87f4 (diff)
net/smc: reduce port_event scheduling
IB event handlers schedule the port event worker for further processing of port state changes. This patch reduces the number of schedules to avoid duplicate processing of the same port change. Reviewed-by: Karsten Graul <kgraul@linux.ibm.com> Signed-off-by: Ursula Braun <ubraun@linux.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/smc/smc_ib.c')
-rw-r--r--net/smc/smc_ib.c44
1 files changed, 29 insertions, 15 deletions
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index 548632621f4b..6756bd5a3fe4 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -257,6 +257,7 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler,
struct ib_event *ibevent)
{
struct smc_ib_device *smcibdev;
+ bool schedule = false;
u8 port_idx;
smcibdev = container_of(handler, struct smc_ib_device, event_handler);
@@ -266,22 +267,35 @@ static void smc_ib_global_event_handler(struct ib_event_handler *handler,
/* terminate all ports on device */
for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++) {
set_bit(port_idx, &smcibdev->port_event_mask);
- set_bit(port_idx, smcibdev->ports_going_away);
+ if (!test_and_set_bit(port_idx,
+ smcibdev->ports_going_away))
+ schedule = true;
}
- schedule_work(&smcibdev->port_event_work);
+ if (schedule)
+ schedule_work(&smcibdev->port_event_work);
break;
- case IB_EVENT_PORT_ERR:
case IB_EVENT_PORT_ACTIVE:
- case IB_EVENT_GID_CHANGE:
port_idx = ibevent->element.port_num - 1;
- if (port_idx < SMC_MAX_PORTS) {
- set_bit(port_idx, &smcibdev->port_event_mask);
- if (ibevent->event == IB_EVENT_PORT_ERR)
- set_bit(port_idx, smcibdev->ports_going_away);
- else if (ibevent->event == IB_EVENT_PORT_ACTIVE)
- clear_bit(port_idx, smcibdev->ports_going_away);
+ if (port_idx >= SMC_MAX_PORTS)
+ break;
+ set_bit(port_idx, &smcibdev->port_event_mask);
+ if (test_and_clear_bit(port_idx, smcibdev->ports_going_away))
+ schedule_work(&smcibdev->port_event_work);
+ break;
+ case IB_EVENT_PORT_ERR:
+ port_idx = ibevent->element.port_num - 1;
+ if (port_idx >= SMC_MAX_PORTS)
+ break;
+ set_bit(port_idx, &smcibdev->port_event_mask);
+ if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
schedule_work(&smcibdev->port_event_work);
- }
+ break;
+ case IB_EVENT_GID_CHANGE:
+ port_idx = ibevent->element.port_num - 1;
+ if (port_idx >= SMC_MAX_PORTS)
+ break;
+ set_bit(port_idx, &smcibdev->port_event_mask);
+ schedule_work(&smcibdev->port_event_work);
break;
default:
break;
@@ -316,11 +330,11 @@ static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
case IB_EVENT_QP_FATAL:
case IB_EVENT_QP_ACCESS_ERR:
port_idx = ibevent->element.qp->port - 1;
- if (port_idx < SMC_MAX_PORTS) {
- set_bit(port_idx, &smcibdev->port_event_mask);
- set_bit(port_idx, smcibdev->ports_going_away);
+ if (port_idx >= SMC_MAX_PORTS)
+ break;
+ set_bit(port_idx, &smcibdev->port_event_mask);
+ if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
schedule_work(&smcibdev->port_event_work);
- }
break;
default:
break;