diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-18 12:35:14 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-18 12:35:14 -0800 |
commit | 48f58ba9cbffd3945fbc4d7f45b75cf4801a60b7 (patch) | |
tree | bf153ccf9ea834c39882ad1008f16bd26584ebfe /net | |
parent | 7f36f1b2a8c4f55f8226ed6c8bb4ed6de11c4015 (diff) | |
parent | b064d0d88ae5280c7e878f79d0c9a8e2876a4d14 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull more networking fixes from David Miller:
1) Fix brcmfmac build with older gcc, from Arend van Spriel.
2) IRQ values unintentionally truncated to u8 in mlx5 driver, from
Doron Tsur.
3) Fix build warnings wrt tcp cgroup changes, from Geert Uytterhoeven.
4) Limit deep recursion in ovs stack, from Hannes Frederic Sowa.
5) at803x phy driver bug fixes from, Martin Blumenstingl.
6) Fix TSO handling in hns driver, from Daode Huang
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (22 commits)
ovs: limit ovs recursions in ovs_execute_actions to not corrupt stack
team: Replace rcu_read_lock with a mutex in team_vlan_rx_kill_vid
net: hns: bug fix about hisilicon TSO BD mode
brcmfmac: fix BRCMF_FW_NVRAM_DEF macro for older gcc compilers
net: phy: at803x: Add the interrupt register bit definitions
net: phy: at803x: Clean up duplicate register definitions
net: phy: at803x: Allow specifying the RGMII RX clock delay via phy mode
net: phy: at803x: Don't set gbit features for the AR8030 phy
arm64: bpf: add extra pass to handle faulty codegen
arm64: insn: remove BUG_ON from codegen
sctp: the temp asoc's transports should not be hashed/unhashed
net/mlx5_core: Fix trimming down IRQ number
tcp_memcontrol: Forward declare cgroup_subsys and mem_cgroup stucts
batman-adv: Drop immediate orig_node free function
batman-adv: Drop immediate batadv_hard_iface free function
batman-adv: Drop immediate neigh_ifinfo free function
batman-adv: Drop immediate batadv_hardif_neigh_node free function
batman-adv: Drop immediate batadv_neigh_node free function
batman-adv: Drop immediate batadv_orig_ifinfo free function
batman-adv: Avoid recursive call_rcu for batadv_nc_node
...
Diffstat (limited to 'net')
-rw-r--r-- | net/batman-adv/bridge_loop_avoidance.c | 10 | ||||
-rw-r--r-- | net/batman-adv/hard-interface.h | 12 | ||||
-rw-r--r-- | net/batman-adv/network-coding.c | 19 | ||||
-rw-r--r-- | net/batman-adv/originator.c | 196 | ||||
-rw-r--r-- | net/batman-adv/originator.h | 1 | ||||
-rw-r--r-- | net/batman-adv/translation-table.c | 28 | ||||
-rw-r--r-- | net/openvswitch/actions.c | 19 | ||||
-rw-r--r-- | net/sctp/endpointola.c | 2 | ||||
-rw-r--r-- | net/sctp/input.c | 8 |
9 files changed, 116 insertions, 179 deletions
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index d5d71ac96c8a..c24c481b666f 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -127,21 +127,17 @@ batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw) } /* finally deinitialize the claim */ -static void batadv_claim_free_rcu(struct rcu_head *rcu) +static void batadv_claim_release(struct batadv_bla_claim *claim) { - struct batadv_bla_claim *claim; - - claim = container_of(rcu, struct batadv_bla_claim, rcu); - batadv_backbone_gw_free_ref(claim->backbone_gw); - kfree(claim); + kfree_rcu(claim, rcu); } /* free a claim, call claim_free_rcu if its the last reference */ static void batadv_claim_free_ref(struct batadv_bla_claim *claim) { if (atomic_dec_and_test(&claim->refcount)) - call_rcu(&claim->rcu, batadv_claim_free_rcu); + batadv_claim_release(claim); } /** diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h index 5a31420513e1..7b12ea8ea29d 100644 --- a/net/batman-adv/hard-interface.h +++ b/net/batman-adv/hard-interface.h @@ -75,18 +75,6 @@ batadv_hardif_free_ref(struct batadv_hard_iface *hard_iface) call_rcu(&hard_iface->rcu, batadv_hardif_free_rcu); } -/** - * batadv_hardif_free_ref_now - decrement the hard interface refcounter and - * possibly free it (without rcu callback) - * @hard_iface: the hard interface to free - */ -static inline void -batadv_hardif_free_ref_now(struct batadv_hard_iface *hard_iface) -{ - if (atomic_dec_and_test(&hard_iface->refcount)) - batadv_hardif_free_rcu(&hard_iface->rcu); -} - static inline struct batadv_hard_iface * batadv_primary_if_get_selected(struct batadv_priv *bat_priv) { diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index c98b0ab85449..cc63b44f0d2e 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -203,28 +203,25 @@ void batadv_nc_init_orig(struct batadv_orig_node *orig_node) } /** - * batadv_nc_node_free_rcu - rcu callback to free an nc node and remove - * its refcount on the orig_node - * @rcu: rcu pointer of the nc node + * batadv_nc_node_release - release nc_node from lists and queue for free after + * rcu grace period + * @nc_node: the nc node to free */ -static void batadv_nc_node_free_rcu(struct rcu_head *rcu) +static void batadv_nc_node_release(struct batadv_nc_node *nc_node) { - struct batadv_nc_node *nc_node; - - nc_node = container_of(rcu, struct batadv_nc_node, rcu); batadv_orig_node_free_ref(nc_node->orig_node); - kfree(nc_node); + kfree_rcu(nc_node, rcu); } /** - * batadv_nc_node_free_ref - decrements the nc node refcounter and possibly - * frees it + * batadv_nc_node_free_ref - decrement the nc node refcounter and possibly + * release it * @nc_node: the nc node to free */ static void batadv_nc_node_free_ref(struct batadv_nc_node *nc_node) { if (atomic_dec_and_test(&nc_node->refcount)) - call_rcu(&nc_node->rcu, batadv_nc_node_free_rcu); + batadv_nc_node_release(nc_node); } /** diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index ae6d18cafc5a..fe578f75c391 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -163,148 +163,101 @@ err: } /** - * batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object - * @rcu: rcu pointer of the neigh_ifinfo object - */ -static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu) -{ - struct batadv_neigh_ifinfo *neigh_ifinfo; - - neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu); - - if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT) - batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing); - - kfree(neigh_ifinfo); -} - -/** - * batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free - * the neigh_ifinfo (without rcu callback) + * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for + * free after rcu grace period * @neigh_ifinfo: the neigh_ifinfo object to release */ static void -batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo) +batadv_neigh_ifinfo_release(struct batadv_neigh_ifinfo *neigh_ifinfo) { - if (atomic_dec_and_test(&neigh_ifinfo->refcount)) - batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu); + if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT) + batadv_hardif_free_ref(neigh_ifinfo->if_outgoing); + + kfree_rcu(neigh_ifinfo, rcu); } /** - * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free + * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly release * the neigh_ifinfo * @neigh_ifinfo: the neigh_ifinfo object to release */ void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo) { if (atomic_dec_and_test(&neigh_ifinfo->refcount)) - call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu); + batadv_neigh_ifinfo_release(neigh_ifinfo); } /** - * batadv_hardif_neigh_free_rcu - free the hardif neigh_node - * @rcu: rcu pointer of the neigh_node - */ -static void batadv_hardif_neigh_free_rcu(struct rcu_head *rcu) -{ - struct batadv_hardif_neigh_node *hardif_neigh; - - hardif_neigh = container_of(rcu, struct batadv_hardif_neigh_node, rcu); - - batadv_hardif_free_ref_now(hardif_neigh->if_incoming); - kfree(hardif_neigh); -} - -/** - * batadv_hardif_neigh_free_now - decrement the hardif neighbors refcounter - * and possibly free it (without rcu callback) + * batadv_hardif_neigh_release - release hardif neigh node from lists and + * queue for free after rcu grace period * @hardif_neigh: hardif neigh neighbor to free */ static void -batadv_hardif_neigh_free_now(struct batadv_hardif_neigh_node *hardif_neigh) +batadv_hardif_neigh_release(struct batadv_hardif_neigh_node *hardif_neigh) { - if (atomic_dec_and_test(&hardif_neigh->refcount)) { - spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock); - hlist_del_init_rcu(&hardif_neigh->list); - spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock); + spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock); + hlist_del_init_rcu(&hardif_neigh->list); + spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock); - batadv_hardif_neigh_free_rcu(&hardif_neigh->rcu); - } + batadv_hardif_free_ref(hardif_neigh->if_incoming); + kfree_rcu(hardif_neigh, rcu); } /** * batadv_hardif_neigh_free_ref - decrement the hardif neighbors refcounter - * and possibly free it + * and possibly release it * @hardif_neigh: hardif neigh neighbor to free */ void batadv_hardif_neigh_free_ref(struct batadv_hardif_neigh_node *hardif_neigh) { - if (atomic_dec_and_test(&hardif_neigh->refcount)) { - spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock); - hlist_del_init_rcu(&hardif_neigh->list); - spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock); - - call_rcu(&hardif_neigh->rcu, batadv_hardif_neigh_free_rcu); - } + if (atomic_dec_and_test(&hardif_neigh->refcount)) + batadv_hardif_neigh_release(hardif_neigh); } /** - * batadv_neigh_node_free_rcu - free the neigh_node - * @rcu: rcu pointer of the neigh_node + * batadv_neigh_node_release - release neigh_node from lists and queue for + * free after rcu grace period + * @neigh_node: neigh neighbor to free */ -static void batadv_neigh_node_free_rcu(struct rcu_head *rcu) +static void batadv_neigh_node_release(struct batadv_neigh_node *neigh_node) { struct hlist_node *node_tmp; - struct batadv_neigh_node *neigh_node; struct batadv_hardif_neigh_node *hardif_neigh; struct batadv_neigh_ifinfo *neigh_ifinfo; struct batadv_algo_ops *bao; - neigh_node = container_of(rcu, struct batadv_neigh_node, rcu); bao = neigh_node->orig_node->bat_priv->bat_algo_ops; hlist_for_each_entry_safe(neigh_ifinfo, node_tmp, &neigh_node->ifinfo_list, list) { - batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo); + batadv_neigh_ifinfo_free_ref(neigh_ifinfo); } hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming, neigh_node->addr); if (hardif_neigh) { /* batadv_hardif_neigh_get() increases refcount too */ - batadv_hardif_neigh_free_now(hardif_neigh); - batadv_hardif_neigh_free_now(hardif_neigh); + batadv_hardif_neigh_free_ref(hardif_neigh); + batadv_hardif_neigh_free_ref(hardif_neigh); } if (bao->bat_neigh_free) bao->bat_neigh_free(neigh_node); - batadv_hardif_free_ref_now(neigh_node->if_incoming); + batadv_hardif_free_ref(neigh_node->if_incoming); - kfree(neigh_node); -} - -/** - * batadv_neigh_node_free_ref_now - decrement the neighbors refcounter - * and possibly free it (without rcu callback) - * @neigh_node: neigh neighbor to free - */ -static void -batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node) -{ - if (atomic_dec_and_test(&neigh_node->refcount)) - batadv_neigh_node_free_rcu(&neigh_node->rcu); + kfree_rcu(neigh_node, rcu); } /** * batadv_neigh_node_free_ref - decrement the neighbors refcounter - * and possibly free it + * and possibly release it * @neigh_node: neigh neighbor to free */ void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node) { if (atomic_dec_and_test(&neigh_node->refcount)) - call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu); + batadv_neigh_node_release(neigh_node); } /** @@ -733,108 +686,99 @@ int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset) } /** - * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object - * @rcu: rcu pointer of the orig_ifinfo object + * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for + * free after rcu grace period + * @orig_ifinfo: the orig_ifinfo object to release */ -static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu) +static void batadv_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_ifinfo) { - struct batadv_orig_ifinfo *orig_ifinfo; struct batadv_neigh_node *router; - orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu); - if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT) - batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing); + batadv_hardif_free_ref(orig_ifinfo->if_outgoing); /* this is the last reference to this object */ router = rcu_dereference_protected(orig_ifinfo->router, true); if (router) - batadv_neigh_node_free_ref_now(router); - kfree(orig_ifinfo); + batadv_neigh_node_free_ref(router); + + kfree_rcu(orig_ifinfo, rcu); } /** - * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free - * the orig_ifinfo (without rcu callback) + * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release + * the orig_ifinfo * @orig_ifinfo: the orig_ifinfo object to release */ -static void -batadv_orig_ifinfo_free_ref_now(struct batadv_orig_ifinfo *orig_ifinfo) +void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo) { if (atomic_dec_and_test(&orig_ifinfo->refcount)) - batadv_orig_ifinfo_free_rcu(&orig_ifinfo->rcu); + batadv_orig_ifinfo_release(orig_ifinfo); } /** - * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free - * the orig_ifinfo - * @orig_ifinfo: the orig_ifinfo object to release + * batadv_orig_node_free_rcu - free the orig_node + * @rcu: rcu pointer of the orig_node */ -void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo) +static void batadv_orig_node_free_rcu(struct rcu_head *rcu) { - if (atomic_dec_and_test(&orig_ifinfo->refcount)) - call_rcu(&orig_ifinfo->rcu, batadv_orig_ifinfo_free_rcu); + struct batadv_orig_node *orig_node; + + orig_node = container_of(rcu, struct batadv_orig_node, rcu); + + batadv_mcast_purge_orig(orig_node); + + batadv_frag_purge_orig(orig_node, NULL); + + if (orig_node->bat_priv->bat_algo_ops->bat_orig_free) + orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node); + + kfree(orig_node->tt_buff); + kfree(orig_node); } -static void batadv_orig_node_free_rcu(struct rcu_head *rcu) +/** + * batadv_orig_node_release - release orig_node from lists and queue for + * free after rcu grace period + * @orig_node: the orig node to free + */ +static void batadv_orig_node_release(struct batadv_orig_node *orig_node) { struct hlist_node *node_tmp; struct batadv_neigh_node *neigh_node; - struct batadv_orig_node *orig_node; struct batadv_orig_ifinfo *orig_ifinfo; - orig_node = container_of(rcu, struct batadv_orig_node, rcu); - spin_lock_bh(&orig_node->neigh_list_lock); /* for all neighbors towards this originator ... */ hlist_for_each_entry_safe(neigh_node, node_tmp, &orig_node->neigh_list, list) { hlist_del_rcu(&neigh_node->list); - batadv_neigh_node_free_ref_now(neigh_node); + batadv_neigh_node_free_ref(neigh_node); } hlist_for_each_entry_safe(orig_ifinfo, node_tmp, &orig_node->ifinfo_list, list) { hlist_del_rcu(&orig_ifinfo->list); - batadv_orig_ifinfo_free_ref_now(orig_ifinfo); + batadv_orig_ifinfo_free_ref(orig_ifinfo); } spin_unlock_bh(&orig_node->neigh_list_lock); - batadv_mcast_purge_orig(orig_node); - /* Free nc_nodes */ batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL); - batadv_frag_purge_orig(orig_node, NULL); - - if (orig_node->bat_priv->bat_algo_ops->bat_orig_free) - orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node); - - kfree(orig_node->tt_buff); - kfree(orig_node); + call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu); } /** * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly - * schedule an rcu callback for freeing it + * release it * @orig_node: the orig node to free */ void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node) { if (atomic_dec_and_test(&orig_node->refcount)) - call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu); -} - -/** - * batadv_orig_node_free_ref_now - decrement the orig node refcounter and - * possibly free it (without rcu callback) - * @orig_node: the orig node to free - */ -void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node) -{ - if (atomic_dec_and_test(&orig_node->refcount)) - batadv_orig_node_free_rcu(&orig_node->rcu); + batadv_orig_node_release(orig_node); } void batadv_originator_free(struct batadv_priv *bat_priv) diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h index 29557753d552..cf0730414ed2 100644 --- a/net/batman-adv/originator.h +++ b/net/batman-adv/originator.h @@ -38,7 +38,6 @@ int batadv_originator_init(struct batadv_priv *bat_priv); void batadv_originator_free(struct batadv_priv *bat_priv); void batadv_purge_orig_ref(struct batadv_priv *bat_priv); void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node); -void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node); struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, const u8 *addr); struct batadv_hardif_neigh_node * diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index a22080c53401..cdfc85fa2743 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -240,20 +240,6 @@ int batadv_tt_global_hash_count(struct batadv_priv *bat_priv, return count; } -static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu) -{ - struct batadv_tt_orig_list_entry *orig_entry; - - orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu); - - /* We are in an rcu callback here, therefore we cannot use - * batadv_orig_node_free_ref() and its call_rcu(): - * An rcu_barrier() wouldn't wait for that to finish - */ - batadv_orig_node_free_ref_now(orig_entry->orig_node); - kfree(orig_entry); -} - /** * batadv_tt_local_size_mod - change the size by v of the local table identified * by vid @@ -349,13 +335,25 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node, batadv_tt_global_size_mod(orig_node, vid, -1); } +/** + * batadv_tt_orig_list_entry_release - release tt orig entry from lists and + * queue for free after rcu grace period + * @orig_entry: tt orig entry to be free'd + */ +static void +batadv_tt_orig_list_entry_release(struct batadv_tt_orig_list_entry *orig_entry) +{ + batadv_orig_node_free_ref(orig_entry->orig_node); + kfree_rcu(orig_entry, rcu); +} + static void batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry) { if (!atomic_dec_and_test(&orig_entry->refcount)) return; - call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu); + batadv_tt_orig_list_entry_release(orig_entry); } /** diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index c88d0f2d3e01..2d59df521915 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -1160,17 +1160,26 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, const struct sw_flow_actions *acts, struct sw_flow_key *key) { - int level = this_cpu_read(exec_actions_level); - int err; + static const int ovs_recursion_limit = 5; + int err, level; + + level = __this_cpu_inc_return(exec_actions_level); + if (unlikely(level > ovs_recursion_limit)) { + net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n", + ovs_dp_name(dp)); + kfree_skb(skb); + err = -ENETDOWN; + goto out; + } - this_cpu_inc(exec_actions_level); err = do_execute_actions(dp, skb, key, acts->actions, acts->actions_len); - if (!level) + if (level == 1) process_deferred_actions(dp); - this_cpu_dec(exec_actions_level); +out: + __this_cpu_dec(exec_actions_level); return err; } diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 52838eaa1582..2522a6175291 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -333,7 +333,7 @@ struct sctp_association *sctp_endpoint_lookup_assoc( if (!ep->base.bind_addr.port) goto out; t = sctp_epaddr_lookup_transport(ep, paddr); - if (!t || t->asoc->temp) + if (!t) goto out; *transport = t; diff --git a/net/sctp/input.c b/net/sctp/input.c index b9a536b52da2..bf61dfb8e09e 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -874,6 +874,9 @@ void sctp_hash_transport(struct sctp_transport *t) { struct sctp_hash_cmp_arg arg; + if (t->asoc->temp) + return; + arg.ep = t->asoc->ep; arg.paddr = &t->ipaddr; arg.net = sock_net(t->asoc->base.sk); @@ -886,6 +889,9 @@ reinsert: void sctp_unhash_transport(struct sctp_transport *t) { + if (t->asoc->temp) + return; + rhashtable_remove_fast(&sctp_transport_hashtable, &t->node, sctp_hash_params); } @@ -931,7 +937,7 @@ static struct sctp_association *__sctp_lookup_association( struct sctp_transport *t; t = sctp_addrs_lookup_transport(net, local, peer); - if (!t || t->dead || t->asoc->temp) + if (!t || t->dead) return NULL; sctp_association_hold(t->asoc); |