summaryrefslogtreecommitdiff
path: root/net/openvswitch/datapath.c
diff options
context:
space:
mode:
authorEelco Chaudron <echaudro@redhat.com>2020-07-15 14:09:28 +0200
committerDavid S. Miller <davem@davemloft.net>2020-07-17 10:36:50 -0700
commiteac87c413bf9794c14d488998a5265ea5b32f04e (patch)
tree969f981073d246b9888cc3501d883f6c1f6ebe93 /net/openvswitch/datapath.c
parentb18432c5a49c9413fd3afb717b378e08cb71331b (diff)
net: openvswitch: reorder masks array based on usage
This patch reorders the masks array every 4 seconds based on their usage count. This greatly reduces the masks per packet hit, and hence the overall performance. Especially in the OVS/OVN case for OpenShift. Here are some results from the OVS/OVN OpenShift test, which use 8 pods, each pod having 512 uperf connections, each connection sends a 64-byte request and gets a 1024-byte response (TCP). All uperf clients are on 1 worker node while all uperf servers are on the other worker node. Kernel without this patch : 7.71 Gbps Kernel with this patch applied: 14.52 Gbps We also run some tests to verify the rebalance activity does not lower the flow insertion rate, which does not. Signed-off-by: Eelco Chaudron <echaudro@redhat.com> Tested-by: Andrew Theurer <atheurer@redhat.com> Reviewed-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/openvswitch/datapath.c')
-rw-r--r--net/openvswitch/datapath.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 94b024534987..95805f0e27bd 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -130,6 +130,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
const struct dp_upcall_info *,
uint32_t cutlen);
+static void ovs_dp_masks_rebalance(struct work_struct *work);
+
/* Must be called with rcu_read_lock or ovs_mutex. */
const char *ovs_dp_name(const struct datapath *dp)
{
@@ -1653,6 +1655,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
goto err_destroy_reply;
ovs_dp_set_net(dp, sock_net(skb->sk));
+ INIT_DELAYED_WORK(&dp->masks_rebalance, ovs_dp_masks_rebalance);
/* Allocate table. */
err = ovs_flow_tbl_init(&dp->table);
@@ -1712,6 +1715,9 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
+ schedule_delayed_work(&dp->masks_rebalance,
+ msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL));
+
ovs_unlock();
ovs_notify(&dp_datapath_genl_family, reply, info);
@@ -1756,6 +1762,9 @@ static void __dp_destroy(struct datapath *dp)
/* RCU destroy the flow table */
call_rcu(&dp->rcu, destroy_dp_rcu);
+
+ /* Cancel remaining work. */
+ cancel_delayed_work_sync(&dp->masks_rebalance);
}
static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
@@ -2338,6 +2347,19 @@ out:
return skb->len;
}
+static void ovs_dp_masks_rebalance(struct work_struct *work)
+{
+ struct datapath *dp = container_of(work, struct datapath,
+ masks_rebalance.work);
+
+ ovs_lock();
+ ovs_flow_masks_rebalance(&dp->table);
+ ovs_unlock();
+
+ schedule_delayed_work(&dp->masks_rebalance,
+ msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL));
+}
+
static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
[OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
[OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },