From 759f29b62fb9af5274e7f761f9f4cdfa7bb5a1f2 Mon Sep 17 00:00:00 2001 From: Tung Nguyen Date: Thu, 28 Jun 2018 22:39:25 +0200 Subject: tipc: optimize function tipc_node_timeout() In single-link usage, the function tipc_node_timeout() still iterates over the whole link array to handle each link. Given that the maximum number of bearers are 3, there are 2 redundant iterations with lock grab/release. Since this function is executing very frequently it makes sense to optimize it. This commit adds conditional checking to exit from the loop if the known number of configured links has already been accessed. Acked-by: Ying Xue Signed-off-by: Tung Nguyen Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/node.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'net/tipc/node.c') diff --git a/net/tipc/node.c b/net/tipc/node.c index 6a44eb812baf..8972ca1c654c 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -551,21 +551,23 @@ static void tipc_node_timeout(struct timer_list *t) struct tipc_node *n = from_timer(n, t, timer); struct tipc_link_entry *le; struct sk_buff_head xmitq; + int remains = n->link_cnt; int bearer_id; int rc = 0; __skb_queue_head_init(&xmitq); - for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { + for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) { tipc_node_read_lock(n); le = &n->links[bearer_id]; - spin_lock_bh(&le->lock); if (le->link) { + spin_lock_bh(&le->lock); /* Link tolerance may change asynchronously: */ tipc_node_calculate_timer(n, le->link); rc = tipc_link_timeout(le->link, &xmitq); + spin_unlock_bh(&le->lock); + remains--; } - spin_unlock_bh(&le->lock); tipc_node_read_unlock(n); tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr); if (rc & TIPC_LINK_DOWN_EVT) -- cgit From 6a939f365bdb03a74b4617bdb4402fc08da088b9 Mon Sep 17 00:00:00 2001 From: GhantaKrishnamurthy MohanKrishna Date: Fri, 29 Jun 2018 13:23:41 +0200 Subject: tipc: Auto removal of peer down node instance A peer node is considered down if there are no active links (or) lost contact to the node. In current implementation, a peer node instance is deleted either if a) TIPC module is removed (or) b) Application can use a netlink/iproute2 interface to delete a specific down node. Thus, a down node instance lives in the system forever, unless the application explicitly removes it. We fix this by deleting the nodes which are down for a specified amount of time (5 minutes). Existing node supervision timer is used to achieve this. Acked-by: Ying Xue Acked-by: Jon Maloy Signed-off-by: GhantaKrishnamurthy MohanKrishna Signed-off-by: David S. Miller --- net/tipc/node.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 55 insertions(+), 11 deletions(-) (limited to 'net/tipc/node.c') diff --git a/net/tipc/node.c b/net/tipc/node.c index 8972ca1c654c..cfdbaf479fd1 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -45,6 +45,7 @@ #include "netlink.h" #define INVALID_NODE_SIG 0x10000 +#define NODE_CLEANUP_AFTER 300000 /* Flags used to take different actions according to flag type * TIPC_NOTIFY_NODE_DOWN: notify node is down @@ -96,6 +97,7 @@ struct tipc_bclink_entry { * @link_id: local and remote bearer ids of changing link, if any * @publ_list: list of publications * @rcu: rcu struct for tipc_node + * @delete_at: indicates the time for deleting a down node */ struct tipc_node { u32 addr; @@ -121,6 +123,7 @@ struct tipc_node { unsigned long keepalive_intv; struct timer_list timer; struct rcu_head rcu; + unsigned long delete_at; }; /* Node FSM states and events: @@ -160,6 +163,7 @@ static struct tipc_node *tipc_node_find(struct net *net, u32 addr); static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id); static void tipc_node_put(struct tipc_node *node); static bool node_is_up(struct tipc_node *n); +static void tipc_node_delete_from_list(struct tipc_node *node); struct tipc_sock_conn { u32 port; @@ -390,6 +394,7 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr, for (i = 0; i < MAX_BEARERS; i++) spin_lock_init(&n->links[i].lock); n->state = SELF_DOWN_PEER_LEAVING; + n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); n->signature = INVALID_NODE_SIG; n->active_links[0] = INVALID_BEARER_ID; n->active_links[1] = INVALID_BEARER_ID; @@ -433,11 +438,16 @@ static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l) tipc_link_set_abort_limit(l, tol / n->keepalive_intv); } -static void tipc_node_delete(struct tipc_node *node) +static void tipc_node_delete_from_list(struct tipc_node *node) { list_del_rcu(&node->list); hlist_del_rcu(&node->hash); tipc_node_put(node); +} + +static void tipc_node_delete(struct tipc_node *node) +{ + tipc_node_delete_from_list(node); del_timer_sync(&node->timer); tipc_node_put(node); @@ -544,6 +554,42 @@ void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) tipc_node_put(node); } +static void tipc_node_clear_links(struct tipc_node *node) +{ + int i; + + for (i = 0; i < MAX_BEARERS; i++) { + struct tipc_link_entry *le = &node->links[i]; + + if (le->link) { + kfree(le->link); + le->link = NULL; + node->link_cnt--; + } + } +} + +/* tipc_node_cleanup - delete nodes that does not + * have active links for NODE_CLEANUP_AFTER time + */ +static int tipc_node_cleanup(struct tipc_node *peer) +{ + struct tipc_net *tn = tipc_net(peer->net); + bool deleted = false; + + spin_lock_bh(&tn->node_list_lock); + tipc_node_write_lock(peer); + + if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) { + tipc_node_clear_links(peer); + tipc_node_delete_from_list(peer); + deleted = true; + } + tipc_node_write_unlock(peer); + spin_unlock_bh(&tn->node_list_lock); + return deleted; +} + /* tipc_node_timeout - handle expiration of node timer */ static void tipc_node_timeout(struct timer_list *t) @@ -555,6 +601,12 @@ static void tipc_node_timeout(struct timer_list *t) int bearer_id; int rc = 0; + if (!node_is_up(n) && tipc_node_cleanup(n)) { + /*Removing the reference of Timer*/ + tipc_node_put(n); + return; + } + __skb_queue_head_init(&xmitq); for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) { @@ -1173,6 +1225,7 @@ static void node_lost_contact(struct tipc_node *n, uint i; pr_debug("Lost contact with %x\n", n->addr); + n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER); /* Clean up broadcast state */ tipc_bcast_remove_peer(n->net, n->bc_entry.link); @@ -1742,7 +1795,6 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info) struct tipc_node *peer; u32 addr; int err; - int i; /* We identify the peer by its net */ if (!info->attrs[TIPC_NLA_NET]) @@ -1777,15 +1829,7 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info) goto err_out; } - for (i = 0; i < MAX_BEARERS; i++) { - struct tipc_link_entry *le = &peer->links[i]; - - if (le->link) { - kfree(le->link); - le->link = NULL; - peer->link_cnt--; - } - } + tipc_node_clear_links(peer); tipc_node_write_unlock(peer); tipc_node_delete(peer); -- cgit From 9012de5089560136b849b920ad038b96160ed8f6 Mon Sep 17 00:00:00 2001 From: Jon Maloy Date: Tue, 10 Jul 2018 01:07:35 +0200 Subject: tipc: add sequence number check for link STATE messages Some switch infrastructures produce huge amounts of packet duplicates. This becomes a problem if those messages are STATE/NACK protocol messages, causing unnecessary retransmissions of already accepted packets. We now introduce a unique sequence number per STATE protocol message so that duplicates can be identified and ignored. This will also be useful when tracing such cases, and to avert replay attacks when TIPC is encrypted. For compatibility reasons we have to introduce a new capability flag TIPC_LINK_PROTO_SEQNO to handle this new feature. Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/node.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'net/tipc/node.c') diff --git a/net/tipc/node.c b/net/tipc/node.c index cfdbaf479fd1..1cdb176798f7 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -363,6 +363,8 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr, { struct tipc_net *tn = net_generic(net, tipc_net_id); struct tipc_node *n, *temp_node; + struct tipc_link *l; + int bearer_id; int i; spin_lock_bh(&tn->node_list_lock); @@ -370,6 +372,11 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr, if (n) { /* Same node may come back with new capabilities */ n->capabilities = capabilities; + for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { + l = n->links[bearer_id].link; + if (l) + tipc_link_update_caps(l, capabilities); + } goto exit; } n = kzalloc(sizeof(*n), GFP_ATOMIC); -- cgit From 7ea817f4e8322fa27fb860d15025bf72f68b179f Mon Sep 17 00:00:00 2001 From: Jon Maloy Date: Tue, 10 Jul 2018 01:07:36 +0200 Subject: tipc: check session number before accepting link protocol messages In some virtual environments we observe a significant higher number of packet reordering and delays than we have been used to traditionally. This makes it necessary with stricter checks on incoming link protocol messages' session number, which until now only has been validated for RESET messages. Since the other two message types, ACTIVATE and STATE messages also carry this number, it is easy to extend the validation check to those messages. We also introduce a flag indicating if a link has a valid peer session number or not. This eliminates the mixing of 32- and 16-bit arithmethics we are currently using to achieve this. Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/node.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'net/tipc/node.c') diff --git a/net/tipc/node.c b/net/tipc/node.c index 1cdb176798f7..52fd80b0e728 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -1540,7 +1540,7 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id * tipc_node_check_state - check and if necessary update node state * @skb: TIPC packet * @bearer_id: identity of bearer delivering the packet - * Returns true if state is ok, otherwise consumes buffer and returns false + * Returns true if state and msg are ok, otherwise false */ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, int bearer_id, struct sk_buff_head *xmitq) @@ -1574,6 +1574,9 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, } } + if (!tipc_link_validate_msg(l, hdr)) + return false; + /* Check and update node accesibility if applicable */ if (state == SELF_UP_PEER_COMING) { if (!tipc_link_is_up(l)) -- cgit From 40999f11ce677ce3c5d0e8f5f76c40192a26b479 Mon Sep 17 00:00:00 2001 From: Jon Maloy Date: Wed, 18 Jul 2018 19:50:06 +0200 Subject: tipc: make link capability update thread safe The commit referred to below introduced an update of the link capabilities field that is not safe. Given the recently added feature to remove idle node and link items after 5 minutes, there is a small risk that the update will happen at the very moment the targeted link is being removed. To avoid this we have to perform the update inside the node item's write lock protection. Fixes: 9012de508956 ("tipc: add sequence number check for link STATE messages") Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/node.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'net/tipc/node.c') diff --git a/net/tipc/node.c b/net/tipc/node.c index 52fd80b0e728..3819ab14e073 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -370,13 +370,17 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr, spin_lock_bh(&tn->node_list_lock); n = tipc_node_find(net, addr); if (n) { + if (n->capabilities == capabilities) + goto exit; /* Same node may come back with new capabilities */ + write_lock_bh(&n->lock); n->capabilities = capabilities; for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { l = n->links[bearer_id].link; if (l) tipc_link_update_caps(l, capabilities); } + write_unlock_bh(&n->lock); goto exit; } n = kzalloc(sizeof(*n), GFP_ATOMIC); -- cgit