summaryrefslogtreecommitdiff
path: root/net/tipc
diff options
context:
space:
mode:
Diffstat (limited to 'net/tipc')
-rw-r--r--net/tipc/Kconfig30
-rw-r--r--net/tipc/Makefile9
-rw-r--r--net/tipc/addr.c160
-rw-r--r--net/tipc/addr.h91
-rw-r--r--net/tipc/bcast.c378
-rw-r--r--net/tipc/bcast.h25
-rw-r--r--net/tipc/bearer.c635
-rw-r--r--net/tipc/bearer.h63
-rw-r--r--net/tipc/core.c107
-rw-r--r--net/tipc/core.h75
-rw-r--r--net/tipc/crypto.c2484
-rw-r--r--net/tipc/crypto.h200
-rw-r--r--net/tipc/diag.c118
-rw-r--r--net/tipc/discover.c408
-rw-r--r--net/tipc/discover.h8
-rw-r--r--net/tipc/eth_media.c9
-rw-r--r--net/tipc/group.c959
-rw-r--r--net/tipc/group.h77
-rw-r--r--net/tipc/ib_media.c7
-rw-r--r--net/tipc/link.c1681
-rw-r--r--net/tipc/link.h33
-rw-r--r--net/tipc/monitor.c133
-rw-r--r--net/tipc/monitor.h1
-rw-r--r--net/tipc/msg.c555
-rw-r--r--net/tipc/msg.h464
-rw-r--r--net/tipc/name_distr.c366
-rw-r--r--net/tipc/name_distr.h13
-rw-r--r--net/tipc/name_table.c1523
-rw-r--r--net/tipc/name_table.h140
-rw-r--r--net/tipc/net.c177
-rw-r--r--net/tipc/net.h7
-rw-r--r--net/tipc/netlink.c101
-rw-r--r--net/tipc/netlink.h1
-rw-r--r--net/tipc/netlink_compat.c288
-rw-r--r--net/tipc/node.c1360
-rw-r--r--net/tipc/node.h57
-rw-r--r--net/tipc/server.c638
-rw-r--r--net/tipc/socket.c2472
-rw-r--r--net/tipc/socket.h17
-rw-r--r--net/tipc/subscr.c399
-rw-r--r--net/tipc/subscr.h96
-rw-r--r--net/tipc/sysctl.c44
-rw-r--r--net/tipc/topsrv.c733
-rw-r--r--net/tipc/topsrv.h (renamed from net/tipc/server.h)63
-rw-r--r--net/tipc/trace.c206
-rw-r--r--net/tipc/trace.h434
-rw-r--r--net/tipc/udp_media.c246
-rw-r--r--net/tipc/udp_media.h14
48 files changed, 13675 insertions, 4430 deletions
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index c25a3a149dc4..bb0d71eb02a6 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
#
# TIPC configuration
#
@@ -5,7 +6,8 @@
menuconfig TIPC
tristate "The TIPC Protocol"
depends on INET
- ---help---
+ depends on IPV6 || IPV6=n
+ help
The Transparent Inter Process Communication (TIPC) protocol is
specially designed for intra cluster communication. This protocol
originates from Ericsson where it has been used in carrier grade
@@ -16,7 +18,7 @@ menuconfig TIPC
This protocol support is also available as a module ( = code which
can be inserted in and removed from the running kernel whenever you
want). The module will be called tipc. If you want to compile it
- as a module, say M here and read <file:Documentation/kbuild/modules.txt>.
+ as a module, say M here and read <file:Documentation/kbuild/modules.rst>.
If in doubt, say N.
@@ -30,7 +32,29 @@ config TIPC_MEDIA_UDP
bool "IP/UDP media type support"
depends on TIPC
select NET_UDP_TUNNEL
+ default y
help
Saying Y here will enable support for running TIPC over IP/UDP
- bool
+
+config TIPC_CRYPTO
+ bool "TIPC encryption support"
+ depends on TIPC
+ select CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_GCM
default y
+ help
+ Saying Y here will enable support for TIPC encryption.
+ All TIPC messages will be encrypted/decrypted by using the currently most
+ advanced algorithm: AEAD AES-GCM (like IPSec or TLS) before leaving/
+ entering the TIPC stack.
+ Key setting from user-space is performed via netlink by a user program
+ (e.g. the iproute2 'tipc' tool).
+
+config TIPC_DIAG
+ tristate "TIPC: socket monitoring interface"
+ depends on TIPC
+ default y
+ help
+ Support for TIPC socket monitoring interface used by ss tool.
+ If unsure, say Y.
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index 31b9f9c52974..18e1636aa036 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux TIPC layer
#
@@ -8,8 +9,14 @@ tipc-y += addr.o bcast.o bearer.o \
core.o link.o discover.o msg.o \
name_distr.o subscr.o monitor.o name_table.o net.o \
netlink.o netlink_compat.o node.o socket.o eth_media.o \
- server.o socket.o
+ topsrv.o group.o trace.o
+
+CFLAGS_trace.o += -I$(src)
tipc-$(CONFIG_TIPC_MEDIA_UDP) += udp_media.o
tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o
tipc-$(CONFIG_SYSCTL) += sysctl.o
+tipc-$(CONFIG_TIPC_CRYPTO) += crypto.o
+
+obj-$(CONFIG_TIPC_DIAG) += tipc_diag.o
+tipc_diag-y += diag.o
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index 48fd3b5a73fb..6f5c54cbf8d9 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -1,8 +1,9 @@
/*
* net/tipc/addr.c: TIPC address utility routines
*
- * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2000-2006, 2018, Ericsson AB
* Copyright (c) 2004-2005, 2010-2011, Wind River Systems
+ * Copyright (c) 2020-2021, Red Hat Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -34,113 +35,90 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/kernel.h>
#include "addr.h"
#include "core.h"
-/**
- * in_own_cluster - test for cluster inclusion; <0.0.0> always matches
- */
-int in_own_cluster(struct net *net, u32 addr)
-{
- return in_own_cluster_exact(net, addr) || !addr;
-}
-
-int in_own_cluster_exact(struct net *net, u32 addr)
+bool tipc_in_scope(bool legacy_format, u32 domain, u32 addr)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
-
- return !((addr ^ tn->own_addr) >> 12);
+ if (!domain || (domain == addr))
+ return true;
+ if (!legacy_format)
+ return false;
+ if (domain == tipc_cluster_mask(addr)) /* domain <Z.C.0> */
+ return true;
+ if (domain == (addr & TIPC_ZONE_CLUSTER_MASK)) /* domain <Z.C.0> */
+ return true;
+ if (domain == (addr & TIPC_ZONE_MASK)) /* domain <Z.0.0> */
+ return true;
+ return false;
}
-/**
- * in_own_node - test for node inclusion; <0.0.0> always matches
- */
-int in_own_node(struct net *net, u32 addr)
+void tipc_set_node_id(struct net *net, u8 *id)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_net *tn = tipc_net(net);
- return (addr == tn->own_addr) || !addr;
+ memcpy(tn->node_id, id, NODE_ID_LEN);
+ tipc_nodeid2string(tn->node_id_string, id);
+ tn->trial_addr = hash128to32(id);
+ pr_info("Node identity %s, cluster identity %u\n",
+ tipc_own_id_string(net), tn->net_id);
}
-/**
- * addr_domain - convert 2-bit scope value to equivalent message lookup domain
- *
- * Needed when address of a named message must be looked up a second time
- * after a network hop.
- */
-u32 addr_domain(struct net *net, u32 sc)
+void tipc_set_node_addr(struct net *net, u32 addr)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_net *tn = tipc_net(net);
+ u8 node_id[NODE_ID_LEN] = {0,};
- if (likely(sc == TIPC_NODE_SCOPE))
- return tn->own_addr;
- if (sc == TIPC_CLUSTER_SCOPE)
- return tipc_cluster_mask(tn->own_addr);
- return tipc_zone_mask(tn->own_addr);
+ tn->node_addr = addr;
+ if (!tipc_own_id(net)) {
+ sprintf(node_id, "%x", addr);
+ tipc_set_node_id(net, node_id);
+ }
+ tn->trial_addr = addr;
+ tn->addr_trial_end = jiffies;
+ pr_info("Node number set to %u\n", addr);
}
-/**
- * tipc_addr_domain_valid - validates a network domain address
- *
- * Accepts <Z.C.N>, <Z.C.0>, <Z.0.0>, and <0.0.0>,
- * where Z, C, and N are non-zero.
- *
- * Returns 1 if domain address is valid, otherwise 0
- */
-int tipc_addr_domain_valid(u32 addr)
+int tipc_nodeid2string(char *str, u8 *id)
{
- u32 n = tipc_node(addr);
- u32 c = tipc_cluster(addr);
- u32 z = tipc_zone(addr);
-
- if (n && (!z || !c))
- return 0;
- if (c && !z)
- return 0;
- return 1;
-}
+ int i;
+ u8 c;
-/**
- * tipc_addr_node_valid - validates a proposed network address for this node
- *
- * Accepts <Z.C.N>, where Z, C, and N are non-zero.
- *
- * Returns 1 if address can be used, otherwise 0
- */
-int tipc_addr_node_valid(u32 addr)
-{
- return tipc_addr_domain_valid(addr) && tipc_node(addr);
-}
+ /* Already a string ? */
+ for (i = 0; i < NODE_ID_LEN; i++) {
+ c = id[i];
+ if (c >= '0' && c <= '9')
+ continue;
+ if (c >= 'A' && c <= 'Z')
+ continue;
+ if (c >= 'a' && c <= 'z')
+ continue;
+ if (c == '.')
+ continue;
+ if (c == ':')
+ continue;
+ if (c == '_')
+ continue;
+ if (c == '-')
+ continue;
+ if (c == '@')
+ continue;
+ if (c != 0)
+ break;
+ }
+ if (i == NODE_ID_LEN) {
+ memcpy(str, id, NODE_ID_LEN);
+ str[NODE_ID_LEN] = 0;
+ return i;
+ }
-int tipc_in_scope(u32 domain, u32 addr)
-{
- if (!domain || (domain == addr))
- return 1;
- if (domain == tipc_cluster_mask(addr)) /* domain <Z.C.0> */
- return 1;
- if (domain == tipc_zone_mask(addr)) /* domain <Z.0.0> */
- return 1;
- return 0;
-}
+ /* Translate to hex string */
+ for (i = 0; i < NODE_ID_LEN; i++)
+ sprintf(&str[2 * i], "%02x", id[i]);
-/**
- * tipc_addr_scope - convert message lookup domain to a 2-bit scope value
- */
-int tipc_addr_scope(u32 domain)
-{
- if (likely(!domain))
- return TIPC_ZONE_SCOPE;
- if (tipc_node(domain))
- return TIPC_NODE_SCOPE;
- if (tipc_cluster(domain))
- return TIPC_CLUSTER_SCOPE;
- return TIPC_ZONE_SCOPE;
-}
+ /* Strip off trailing zeroes */
+ for (i = NODE_ID_STR_LEN - 2; str[i] == '0'; i--)
+ str[i] = 0;
-char *tipc_addr_string_fill(char *string, u32 addr)
-{
- snprintf(string, 16, "<%u.%u.%u>",
- tipc_zone(addr), tipc_cluster(addr), tipc_node(addr));
- return string;
+ return i + 1;
}
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index bebb347803ce..a113cf7e1f89 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -1,8 +1,9 @@
/*
* net/tipc/addr.h: Include file for TIPC address utility routines
*
- * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2000-2006, 2018, Ericsson AB
* Copyright (c) 2004-2005, Wind River Systems
+ * Copyright (c) 2020-2021, Red Hat Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -43,16 +44,67 @@
#include <net/netns/generic.h>
#include "core.h"
+/* Struct tipc_uaddr: internal version of struct sockaddr_tipc.
+ * Must be kept aligned both regarding field positions and size.
+ */
+struct tipc_uaddr {
+ unsigned short family;
+ unsigned char addrtype;
+ signed char scope;
+ union {
+ struct {
+ struct tipc_service_addr sa;
+ u32 lookup_node;
+ };
+ struct tipc_service_range sr;
+ struct tipc_socket_addr sk;
+ };
+};
+
+static inline void tipc_uaddr(struct tipc_uaddr *ua, u32 atype, u32 scope,
+ u32 type, u32 lower, u32 upper)
+{
+ ua->family = AF_TIPC;
+ ua->addrtype = atype;
+ ua->scope = scope;
+ ua->sr.type = type;
+ ua->sr.lower = lower;
+ ua->sr.upper = upper;
+}
+
+static inline bool tipc_uaddr_valid(struct tipc_uaddr *ua, int len)
+{
+ u32 atype;
+
+ if (len < sizeof(struct sockaddr_tipc))
+ return false;
+ atype = ua->addrtype;
+ if (ua->family != AF_TIPC)
+ return false;
+ if (atype == TIPC_SERVICE_ADDR || atype == TIPC_SOCKET_ADDR)
+ return true;
+ if (atype == TIPC_SERVICE_RANGE)
+ return ua->sr.upper >= ua->sr.lower;
+ return false;
+}
+
static inline u32 tipc_own_addr(struct net *net)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
+ return tipc_net(net)->node_addr;
+}
- return tn->own_addr;
+static inline u8 *tipc_own_id(struct net *net)
+{
+ struct tipc_net *tn = tipc_net(net);
+
+ if (!strlen(tn->node_id_string))
+ return NULL;
+ return tn->node_id;
}
-static inline u32 tipc_zone_mask(u32 addr)
+static inline char *tipc_own_id_string(struct net *net)
{
- return addr & TIPC_ZONE_MASK;
+ return tipc_net(net)->node_id_string;
}
static inline u32 tipc_cluster_mask(u32 addr)
@@ -60,15 +112,24 @@ static inline u32 tipc_cluster_mask(u32 addr)
return addr & TIPC_ZONE_CLUSTER_MASK;
}
-u32 tipc_own_addr(struct net *net);
-int in_own_cluster(struct net *net, u32 addr);
-int in_own_cluster_exact(struct net *net, u32 addr);
-int in_own_node(struct net *net, u32 addr);
-u32 addr_domain(struct net *net, u32 sc);
-int tipc_addr_domain_valid(u32);
-int tipc_addr_node_valid(u32 addr);
-int tipc_in_scope(u32 domain, u32 addr);
-int tipc_addr_scope(u32 domain);
-char *tipc_addr_string_fill(char *string, u32 addr);
+static inline int tipc_node2scope(u32 node)
+{
+ return node ? TIPC_NODE_SCOPE : TIPC_CLUSTER_SCOPE;
+}
+
+static inline int tipc_scope2node(struct net *net, int sc)
+{
+ return sc != TIPC_NODE_SCOPE ? 0 : tipc_own_addr(net);
+}
+
+static inline int in_own_node(struct net *net, u32 addr)
+{
+ return addr == tipc_own_addr(net) || !addr;
+}
+
+bool tipc_in_scope(bool legacy_format, u32 domain, u32 addr);
+void tipc_set_node_id(struct net *net, u8 *id);
+void tipc_set_node_addr(struct net *net, u32 addr);
+int tipc_nodeid2string(char *str, u8 *id);
#endif
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 7d99029df342..114fef65f92e 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -1,7 +1,7 @@
/*
* net/tipc/bcast.c: TIPC broadcast code
*
- * Copyright (c) 2004-2006, 2014-2016, Ericsson AB
+ * Copyright (c) 2004-2006, 2014-2017, Ericsson AB
* Copyright (c) 2004, Intel Corporation.
* Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
@@ -42,21 +42,24 @@
#include "link.h"
#include "name_table.h"
-#define BCLINK_WIN_DEFAULT 50 /* bcast link window size (default) */
-#define BCLINK_WIN_MIN 32 /* bcast minimum link window size */
+#define BCLINK_WIN_DEFAULT 50 /* bcast link window size (default) */
+#define BCLINK_WIN_MIN 32 /* bcast minimum link window size */
const char tipc_bclink_name[] = "broadcast-link";
+unsigned long sysctl_tipc_bc_retruni __read_mostly;
/**
* struct tipc_bc_base - base structure for keeping broadcast send state
* @link: broadcast send link structure
* @inputq: data input queue; will only carry SOCK_WAKEUP messages
- * @dest: array keeping number of reachable destinations per bearer
+ * @dests: array keeping number of reachable destinations per bearer
* @primary_bearer: a bearer having links to all broadcast destinations, if any
* @bcast_support: indicates if primary bearer, if any, supports broadcast
+ * @force_bcast: forces broadcast for multicast traffic
* @rcast_support: indicates if all peer nodes support replicast
+ * @force_rcast: forces replicast for multicast traffic
* @rc_ratio: dest count as percentage of cluster size where send method changes
- * @bc_threshold: calculated drom rc_ratio; if dests > threshold use broadcast
+ * @bc_threshold: calculated from rc_ratio; if dests > threshold use broadcast
*/
struct tipc_bc_base {
struct tipc_link *link;
@@ -64,7 +67,9 @@ struct tipc_bc_base {
int dests[MAX_BEARERS];
int primary_bearer;
bool bcast_support;
+ bool force_bcast;
bool rcast_support;
+ bool force_rcast;
int rc_ratio;
int bc_threshold;
};
@@ -74,14 +79,18 @@ static struct tipc_bc_base *tipc_bc_base(struct net *net)
return tipc_net(net)->bcbase;
}
+/* tipc_bcast_get_mtu(): -get the MTU currently used by broadcast link
+ * Note: the MTU is decremented to give room for a tunnel header, in
+ * case the message needs to be sent as replicast
+ */
int tipc_bcast_get_mtu(struct net *net)
{
- return tipc_link_mtu(tipc_bc_sndlink(net)) - INT_H_SIZE;
+ return tipc_link_mss(tipc_bc_sndlink(net));
}
-void tipc_bcast_disable_rcast(struct net *net)
+void tipc_bcast_toggle_rcast(struct net *net, bool supp)
{
- tipc_bc_base(net)->rcast_support = false;
+ tipc_bc_base(net)->rcast_support = supp;
}
static void tipc_bcbase_calc_bc_threshold(struct net *net)
@@ -99,6 +108,8 @@ static void tipc_bcbase_select_primary(struct net *net)
{
struct tipc_bc_base *bb = tipc_bc_base(net);
int all_dests = tipc_link_bc_peers(bb->link);
+ int max_win = tipc_link_max_win(bb->link);
+ int min_win = tipc_link_min_win(bb->link);
int i, mtu, prim;
bb->primary_bearer = INVALID_BEARER_ID;
@@ -112,8 +123,12 @@ static void tipc_bcbase_select_primary(struct net *net)
continue;
mtu = tipc_bearer_mtu(net, i);
- if (mtu < tipc_link_mtu(bb->link))
+ if (mtu < tipc_link_mtu(bb->link)) {
tipc_link_set_mtu(bb->link, mtu);
+ tipc_link_set_queue_limits(bb->link,
+ min_win,
+ max_win);
+ }
bb->bcast_support &= tipc_bearer_bcast_support(net, i);
if (bb->dests[i] < all_dests)
continue;
@@ -177,7 +192,7 @@ static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq)
}
/* We have to transmit across all bearers */
- skb_queue_head_init(&_xmitq);
+ __skb_queue_head_init(&_xmitq);
for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
if (!bb->dests[bearer_id])
continue;
@@ -212,9 +227,24 @@ static void tipc_bcast_select_xmit_method(struct net *net, int dests,
}
/* Can current method be changed ? */
method->expires = jiffies + TIPC_METHOD_EXPIRE;
- if (method->mandatory || time_before(jiffies, exp))
+ if (method->mandatory)
+ return;
+
+ if (!(tipc_net(net)->capabilities & TIPC_MCAST_RBCTL) &&
+ time_before(jiffies, exp))
return;
+ /* Configuration as force 'broadcast' method */
+ if (bb->force_bcast) {
+ method->rcast = false;
+ return;
+ }
+ /* Configuration as force 'replicast' method */
+ if (bb->force_rcast) {
+ method->rcast = true;
+ return;
+ }
+ /* Configuration as 'autoselect' or default method */
/* Determine method to use now */
method->rcast = dests <= bb->bc_threshold;
}
@@ -226,8 +256,8 @@ static void tipc_bcast_select_xmit_method(struct net *net, int dests,
* Consumes the buffer chain.
* Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE
*/
-static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
- u16 *cong_link_cnt)
+int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
+ u16 *cong_link_cnt)
{
struct tipc_link *l = tipc_bc_sndlink(net);
struct sk_buff_head xmitq;
@@ -258,25 +288,83 @@ static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
struct tipc_nlist *dests, u16 *cong_link_cnt)
{
+ struct tipc_dest *dst, *tmp;
struct sk_buff_head _pkts;
- struct u32_item *n, *tmp;
- u32 dst, selector;
+ u32 dnode, selector;
selector = msg_link_selector(buf_msg(skb_peek(pkts)));
__skb_queue_head_init(&_pkts);
- list_for_each_entry_safe(n, tmp, &dests->list, list) {
- dst = n->value;
- if (!tipc_msg_pskb_copy(dst, pkts, &_pkts))
+ list_for_each_entry_safe(dst, tmp, &dests->list, list) {
+ dnode = dst->node;
+ if (!tipc_msg_pskb_copy(dnode, pkts, &_pkts))
return -ENOMEM;
/* Any other return value than -ELINKCONG is ignored */
- if (tipc_node_xmit(net, &_pkts, dst, selector) == -ELINKCONG)
+ if (tipc_node_xmit(net, &_pkts, dnode, selector) == -ELINKCONG)
(*cong_link_cnt)++;
}
return 0;
}
+/* tipc_mcast_send_sync - deliver a dummy message with SYN bit
+ * @net: the applicable net namespace
+ * @skb: socket buffer to copy
+ * @method: send method to be used
+ * @dests: destination nodes for message.
+ * Returns 0 if success, otherwise errno
+ */
+static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb,
+ struct tipc_mc_method *method,
+ struct tipc_nlist *dests)
+{
+ struct tipc_msg *hdr, *_hdr;
+ struct sk_buff_head tmpq;
+ u16 cong_link_cnt = 0;
+ struct sk_buff *_skb;
+ int rc = 0;
+
+ /* Is a cluster supporting with new capabilities ? */
+ if (!(tipc_net(net)->capabilities & TIPC_MCAST_RBCTL))
+ return 0;
+
+ hdr = buf_msg(skb);
+ if (msg_user(hdr) == MSG_FRAGMENTER)
+ hdr = msg_inner_hdr(hdr);
+ if (msg_type(hdr) != TIPC_MCAST_MSG)
+ return 0;
+
+ /* Allocate dummy message */
+ _skb = tipc_buf_acquire(MCAST_H_SIZE, GFP_KERNEL);
+ if (!_skb)
+ return -ENOMEM;
+
+ /* Preparing for 'synching' header */
+ msg_set_syn(hdr, 1);
+
+ /* Copy skb's header into a dummy header */
+ skb_copy_to_linear_data(_skb, hdr, MCAST_H_SIZE);
+ skb_orphan(_skb);
+
+ /* Reverse method for dummy message */
+ _hdr = buf_msg(_skb);
+ msg_set_size(_hdr, MCAST_H_SIZE);
+ msg_set_is_rcast(_hdr, !msg_is_rcast(hdr));
+ msg_set_errcode(_hdr, TIPC_ERR_NO_PORT);
+
+ __skb_queue_head_init(&tmpq);
+ __skb_queue_tail(&tmpq, _skb);
+ if (method->rcast)
+ rc = tipc_bcast_xmit(net, &tmpq, &cong_link_cnt);
+ else
+ rc = tipc_rcast_xmit(net, &tmpq, dests, &cong_link_cnt);
+
+ /* This queue should normally be empty by now */
+ __skb_queue_purge(&tmpq);
+
+ return rc;
+}
+
/* tipc_mcast_xmit - deliver message to indicated destination nodes
* and to identified node local sockets
* @net: the applicable net namespace
@@ -292,10 +380,13 @@ int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
u16 *cong_link_cnt)
{
struct sk_buff_head inputq, localq;
+ bool rcast = method->rcast;
+ struct tipc_msg *hdr;
+ struct sk_buff *skb;
int rc = 0;
skb_queue_head_init(&inputq);
- skb_queue_head_init(&localq);
+ __skb_queue_head_init(&localq);
/* Clone packets before they are consumed by next call */
if (dests->local && !tipc_msg_reassemble(pkts, &localq)) {
@@ -305,14 +396,33 @@ int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
/* Send according to determined transmit method */
if (dests->remote) {
tipc_bcast_select_xmit_method(net, dests->remote, method);
+
+ skb = skb_peek(pkts);
+ hdr = buf_msg(skb);
+ if (msg_user(hdr) == MSG_FRAGMENTER)
+ hdr = msg_inner_hdr(hdr);
+ msg_set_is_rcast(hdr, method->rcast);
+
+ /* Switch method ? */
+ if (rcast != method->rcast) {
+ rc = tipc_mcast_send_sync(net, skb, method, dests);
+ if (unlikely(rc)) {
+ pr_err("Unable to send SYN: method %d, rc %d\n",
+ rcast, rc);
+ goto exit;
+ }
+ }
+
if (method->rcast)
rc = tipc_rcast_xmit(net, pkts, dests, cong_link_cnt);
else
rc = tipc_bcast_xmit(net, pkts, cong_link_cnt);
}
- if (dests->local)
+ if (dests->local) {
+ tipc_loopback_trace(net, &localq);
tipc_sk_mcast_rcv(net, &localq, &inputq);
+ }
exit:
/* This queue should normally be empty by now */
__skb_queue_purge(pkts);
@@ -371,7 +481,7 @@ void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
__skb_queue_head_init(&xmitq);
tipc_bcast_lock(net);
- tipc_link_bc_ack_rcv(l, acked, &xmitq);
+ tipc_link_bc_ack_rcv(l, acked, 0, NULL, &xmitq, NULL);
tipc_bcast_unlock(net);
tipc_bcbase_xmit(net, &xmitq);
@@ -386,9 +496,11 @@ void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
* RCU is locked, no other locks set
*/
int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
- struct tipc_msg *hdr)
+ struct tipc_msg *hdr,
+ struct sk_buff_head *retrq)
{
struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+ struct tipc_gap_ack_blks *ga;
struct sk_buff_head xmitq;
int rc = 0;
@@ -398,8 +510,13 @@ int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
if (msg_type(hdr) != STATE_MSG) {
tipc_link_bc_init_rcv(l, hdr);
} else if (!msg_bc_ack_invalid(hdr)) {
- tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq);
- rc = tipc_link_bc_sync_rcv(l, hdr, &xmitq);
+ tipc_get_gap_ack_blks(&ga, l, hdr, false);
+ if (!sysctl_tipc_bc_retruni)
+ retrq = &xmitq;
+ rc = tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr),
+ msg_bc_gap(hdr), ga, &xmitq,
+ retrq);
+ rc |= tipc_link_bc_sync_rcv(l, hdr, &xmitq);
}
tipc_bcast_unlock(net);
@@ -452,10 +569,8 @@ void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_l)
tipc_sk_rcv(net, inputq);
}
-int tipc_bclink_reset_stats(struct net *net)
+int tipc_bclink_reset_stats(struct net *net, struct tipc_link *l)
{
- struct tipc_link *l = tipc_bc_sndlink(net);
-
if (!l)
return -ENOPROTOOPT;
@@ -465,26 +580,79 @@ int tipc_bclink_reset_stats(struct net *net)
return 0;
}
-static int tipc_bc_link_set_queue_limits(struct net *net, u32 limit)
+static int tipc_bc_link_set_queue_limits(struct net *net, u32 max_win)
{
struct tipc_link *l = tipc_bc_sndlink(net);
if (!l)
return -ENOPROTOOPT;
- if (limit < BCLINK_WIN_MIN)
- limit = BCLINK_WIN_MIN;
- if (limit > TIPC_MAX_LINK_WIN)
+ if (max_win < BCLINK_WIN_MIN)
+ max_win = BCLINK_WIN_MIN;
+ if (max_win > TIPC_MAX_LINK_WIN)
return -EINVAL;
tipc_bcast_lock(net);
- tipc_link_set_queue_limits(l, limit);
+ tipc_link_set_queue_limits(l, tipc_link_min_win(l), max_win);
tipc_bcast_unlock(net);
return 0;
}
+static int tipc_bc_link_set_broadcast_mode(struct net *net, u32 bc_mode)
+{
+ struct tipc_bc_base *bb = tipc_bc_base(net);
+
+ switch (bc_mode) {
+ case BCLINK_MODE_BCAST:
+ if (!bb->bcast_support)
+ return -ENOPROTOOPT;
+
+ bb->force_bcast = true;
+ bb->force_rcast = false;
+ break;
+ case BCLINK_MODE_RCAST:
+ if (!bb->rcast_support)
+ return -ENOPROTOOPT;
+
+ bb->force_bcast = false;
+ bb->force_rcast = true;
+ break;
+ case BCLINK_MODE_SEL:
+ if (!bb->bcast_support || !bb->rcast_support)
+ return -ENOPROTOOPT;
+
+ bb->force_bcast = false;
+ bb->force_rcast = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tipc_bc_link_set_broadcast_ratio(struct net *net, u32 bc_ratio)
+{
+ struct tipc_bc_base *bb = tipc_bc_base(net);
+
+ if (!bb->bcast_support || !bb->rcast_support)
+ return -ENOPROTOOPT;
+
+ if (bc_ratio > 100 || bc_ratio <= 0)
+ return -EINVAL;
+
+ bb->rc_ratio = bc_ratio;
+ tipc_bcast_lock(net);
+ tipc_bcbase_calc_bc_threshold(net);
+ tipc_bcast_unlock(net);
+
+ return 0;
+}
+
int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
{
int err;
u32 win;
+ u32 bc_mode;
+ u32 bc_ratio;
struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
if (!attrs[TIPC_NLA_LINK_PROP])
@@ -494,12 +662,28 @@ int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
if (err)
return err;
- if (!props[TIPC_NLA_PROP_WIN])
+ if (!props[TIPC_NLA_PROP_WIN] &&
+ !props[TIPC_NLA_PROP_BROADCAST] &&
+ !props[TIPC_NLA_PROP_BROADCAST_RATIO]) {
return -EOPNOTSUPP;
+ }
- win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ if (props[TIPC_NLA_PROP_BROADCAST]) {
+ bc_mode = nla_get_u32(props[TIPC_NLA_PROP_BROADCAST]);
+ err = tipc_bc_link_set_broadcast_mode(net, bc_mode);
+ }
- return tipc_bc_link_set_queue_limits(net, win);
+ if (!err && props[TIPC_NLA_PROP_BROADCAST_RATIO]) {
+ bc_ratio = nla_get_u32(props[TIPC_NLA_PROP_BROADCAST_RATIO]);
+ err = tipc_bc_link_set_broadcast_ratio(net, bc_ratio);
+ }
+
+ if (!err && props[TIPC_NLA_PROP_WIN]) {
+ win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ err = tipc_bc_link_set_queue_limits(net, win);
+ }
+
+ return err;
}
int tipc_bcast_init(struct net *net)
@@ -508,14 +692,15 @@ int tipc_bcast_init(struct net *net)
struct tipc_bc_base *bb = NULL;
struct tipc_link *l = NULL;
- bb = kzalloc(sizeof(*bb), GFP_ATOMIC);
+ bb = kzalloc(sizeof(*bb), GFP_KERNEL);
if (!bb)
goto enomem;
tn->bcbase = bb;
spin_lock_init(&tipc_net(net)->bclock);
- if (!tipc_link_bc_create(net, 0, 0,
- U16_MAX,
+ if (!tipc_link_bc_create(net, 0, 0, NULL,
+ one_page_mtu,
+ BCLINK_WIN_DEFAULT,
BCLINK_WIN_DEFAULT,
0,
&bb->inputq,
@@ -525,7 +710,7 @@ int tipc_bcast_init(struct net *net)
goto enomem;
bb->link = l;
tn->bcl = l;
- bb->rc_ratio = 25;
+ bb->rc_ratio = 10;
bb->rcast_support = true;
return 0;
enomem:
@@ -554,7 +739,7 @@ void tipc_nlist_add(struct tipc_nlist *nl, u32 node)
{
if (node == nl->self)
nl->local = true;
- else if (u32_push(&nl->list, node))
+ else if (tipc_dest_push(&nl->list, node, 0))
nl->remote++;
}
@@ -562,13 +747,118 @@ void tipc_nlist_del(struct tipc_nlist *nl, u32 node)
{
if (node == nl->self)
nl->local = false;
- else if (u32_del(&nl->list, node))
+ else if (tipc_dest_del(&nl->list, node, 0))
nl->remote--;
}
void tipc_nlist_purge(struct tipc_nlist *nl)
{
- u32_list_purge(&nl->list);
+ tipc_dest_list_purge(&nl->list);
nl->remote = 0;
- nl->local = 0;
+ nl->local = false;
+}
+
+u32 tipc_bcast_get_mode(struct net *net)
+{
+ struct tipc_bc_base *bb = tipc_bc_base(net);
+
+ if (bb->force_bcast)
+ return BCLINK_MODE_BCAST;
+
+ if (bb->force_rcast)
+ return BCLINK_MODE_RCAST;
+
+ if (bb->bcast_support && bb->rcast_support)
+ return BCLINK_MODE_SEL;
+
+ return 0;
+}
+
+u32 tipc_bcast_get_broadcast_ratio(struct net *net)
+{
+ struct tipc_bc_base *bb = tipc_bc_base(net);
+
+ return bb->rc_ratio;
+}
+
+void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head *defq,
+ struct sk_buff_head *inputq)
+{
+ struct sk_buff *skb, *_skb, *tmp;
+ struct tipc_msg *hdr, *_hdr;
+ bool match = false;
+ u32 node, port;
+
+ skb = skb_peek(inputq);
+ if (!skb)
+ return;
+
+ hdr = buf_msg(skb);
+
+ if (likely(!msg_is_syn(hdr) && skb_queue_empty(defq)))
+ return;
+
+ node = msg_orignode(hdr);
+ if (node == tipc_own_addr(net))
+ return;
+
+ port = msg_origport(hdr);
+
+ /* Has the twin SYN message already arrived ? */
+ skb_queue_walk(defq, _skb) {
+ _hdr = buf_msg(_skb);
+ if (msg_orignode(_hdr) != node)
+ continue;
+ if (msg_origport(_hdr) != port)
+ continue;
+ match = true;
+ break;
+ }
+
+ if (!match) {
+ if (!msg_is_syn(hdr))
+ return;
+ __skb_dequeue(inputq);
+ __skb_queue_tail(defq, skb);
+ return;
+ }
+
+ /* Deliver non-SYN message from other link, otherwise queue it */
+ if (!msg_is_syn(hdr)) {
+ if (msg_is_rcast(hdr) != msg_is_rcast(_hdr))
+ return;
+ __skb_dequeue(inputq);
+ __skb_queue_tail(defq, skb);
+ return;
+ }
+
+ /* Queue non-SYN/SYN message from same link */
+ if (msg_is_rcast(hdr) == msg_is_rcast(_hdr)) {
+ __skb_dequeue(inputq);
+ __skb_queue_tail(defq, skb);
+ return;
+ }
+
+ /* Matching SYN messages => return the one with data, if any */
+ __skb_unlink(_skb, defq);
+ if (msg_data_sz(hdr)) {
+ kfree_skb(_skb);
+ } else {
+ __skb_dequeue(inputq);
+ kfree_skb(skb);
+ __skb_queue_tail(inputq, _skb);
+ }
+
+ /* Deliver subsequent non-SYN messages from same peer */
+ skb_queue_walk_safe(defq, _skb, tmp) {
+ _hdr = buf_msg(_skb);
+ if (msg_orignode(_hdr) != node)
+ continue;
+ if (msg_origport(_hdr) != port)
+ continue;
+ if (msg_is_syn(_hdr))
+ break;
+ __skb_unlink(_skb, defq);
+ __skb_queue_tail(inputq, _skb);
+ }
}
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 751530ab0c49..2d9352dc7b0e 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -45,9 +45,14 @@ struct tipc_nl_msg;
struct tipc_nlist;
struct tipc_nitem;
extern const char tipc_bclink_name[];
+extern unsigned long sysctl_tipc_bc_retruni;
#define TIPC_METHOD_EXPIRE msecs_to_jiffies(5000)
+#define BCLINK_MODE_BCAST 0x1
+#define BCLINK_MODE_RCAST 0x2
+#define BCLINK_MODE_SEL 0x4
+
struct tipc_nlist {
struct list_head list;
u32 self;
@@ -63,11 +68,13 @@ void tipc_nlist_del(struct tipc_nlist *nl, u32 node);
/* Cookie to be used between socket and broadcast layer
* @rcast: replicast (instead of broadcast) was used at previous xmit
* @mandatory: broadcast/replicast indication was set by user
+ * @deferredq: defer queue to make message in order
* @expires: re-evaluate non-mandatory transmit method if we are past this
*/
struct tipc_mc_method {
bool rcast;
bool mandatory;
+ struct sk_buff_head deferredq;
unsigned long expires;
};
@@ -79,18 +86,28 @@ void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_bcl);
void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id);
void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id);
int tipc_bcast_get_mtu(struct net *net);
-void tipc_bcast_disable_rcast(struct net *net);
+void tipc_bcast_toggle_rcast(struct net *net, bool supp);
int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
struct tipc_mc_method *method, struct tipc_nlist *dests,
u16 *cong_link_cnt);
+int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
+ u16 *cong_link_cnt);
int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb);
void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
struct tipc_msg *hdr);
int tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
- struct tipc_msg *hdr);
-int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
+ struct tipc_msg *hdr,
+ struct sk_buff_head *retrq);
+int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg,
+ struct tipc_link *bcl);
int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]);
-int tipc_bclink_reset_stats(struct net *net);
+int tipc_bclink_reset_stats(struct net *net, struct tipc_link *l);
+
+u32 tipc_bcast_get_mode(struct net *net);
+u32 tipc_bcast_get_broadcast_ratio(struct net *net);
+
+void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head *defq,
+ struct sk_buff_head *inputq);
static inline void tipc_bcast_lock(struct net *net)
{
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index d174ee3254ee..ae1ddbf71853 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -43,6 +43,8 @@
#include "bcast.h"
#include "netlink.h"
#include "udp_media.h"
+#include "trace.h"
+#include "crypto.h"
#define MAX_ADDR_STR 60
@@ -61,13 +63,16 @@ static struct tipc_bearer *bearer_get(struct net *net, int bearer_id)
{
struct tipc_net *tn = tipc_net(net);
- return rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
+ return rcu_dereference(tn->bearer_list[bearer_id]);
}
static void bearer_disable(struct net *net, struct tipc_bearer *b);
+static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev);
/**
* tipc_media_find - locates specified media object by name
+ * @name: name to locate
*/
struct tipc_media *tipc_media_find(const char *name)
{
@@ -82,6 +87,7 @@ struct tipc_media *tipc_media_find(const char *name)
/**
* media_find_id - locates specified media object by type identifier
+ * @type: type identifier to locate
*/
static struct tipc_media *media_find_id(u8 type)
{
@@ -96,8 +102,11 @@ static struct tipc_media *media_find_id(u8 type)
/**
* tipc_media_addr_printf - record media address in print buffer
+ * @buf: output buffer
+ * @len: output buffer size remaining
+ * @a: input media address
*/
-void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a)
+int tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a)
{
char addr_str[MAX_ADDR_STR];
struct tipc_media *m;
@@ -112,9 +121,10 @@ void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a)
ret = scnprintf(buf, len, "UNKNOWN(%u)", a->media_id);
for (i = 0; i < sizeof(a->value); i++)
- ret += scnprintf(buf - ret, len + ret,
- "-%02x", a->value[i]);
+ ret += scnprintf(buf + ret, len - ret,
+ "-%x", a->value[i]);
}
+ return ret;
}
/**
@@ -122,7 +132,7 @@ void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a)
* @name: ptr to bearer name string
* @name_parts: ptr to area for bearer name components (or NULL if not needed)
*
- * Returns 1 if bearer name is valid, otherwise 0.
+ * Return: 1 if bearer name is valid, otherwise 0.
*/
static int bearer_name_validate(const char *name,
struct tipc_bearer_names *name_parts)
@@ -134,10 +144,7 @@ static int bearer_name_validate(const char *name,
u32 if_len;
/* copy bearer name & ensure length is OK */
- name_copy[TIPC_MAX_BEARER_NAME - 1] = 0;
- /* need above in case non-Posix strncpy() doesn't pad with nulls */
- strncpy(name_copy, name, TIPC_MAX_BEARER_NAME);
- if (name_copy[TIPC_MAX_BEARER_NAME - 1] != 0)
+ if (strscpy(name_copy, name, TIPC_MAX_BEARER_NAME) < 0)
return 0;
/* ensure all component parts of bearer name are present */
@@ -156,18 +163,24 @@ static int bearer_name_validate(const char *name,
/* return bearer name components, if necessary */
if (name_parts) {
- strcpy(name_parts->media_name, media_name);
- strcpy(name_parts->if_name, if_name);
+ if (strscpy(name_parts->media_name, media_name,
+ TIPC_MAX_MEDIA_NAME) < 0)
+ return 0;
+ if (strscpy(name_parts->if_name, if_name,
+ TIPC_MAX_IF_NAME) < 0)
+ return 0;
}
return 1;
}
/**
* tipc_bearer_find - locates bearer object with matching bearer name
+ * @net: the applicable net namespace
+ * @name: bearer name to locate
*/
struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_net *tn = tipc_net(net);
struct tipc_bearer *b;
u32 i;
@@ -202,160 +215,168 @@ int tipc_bearer_get_name(struct net *net, char *name, u32 bearer_id)
void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_bearer *b;
rcu_read_lock();
- b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
+ b = bearer_get(net, bearer_id);
if (b)
- tipc_disc_add_dest(b->link_req);
+ tipc_disc_add_dest(b->disc);
rcu_read_unlock();
}
void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_bearer *b;
rcu_read_lock();
- b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
+ b = bearer_get(net, bearer_id);
if (b)
- tipc_disc_remove_dest(b->link_req);
+ tipc_disc_remove_dest(b->disc);
rcu_read_unlock();
}
/**
* tipc_enable_bearer - enable bearer with the given name
+ * @net: the applicable net namespace
+ * @name: bearer name to enable
+ * @disc_domain: bearer domain
+ * @prio: bearer priority
+ * @attr: nlattr array
+ * @extack: netlink extended ack
*/
static int tipc_enable_bearer(struct net *net, const char *name,
- u32 disc_domain, u32 priority,
- struct nlattr *attr[])
+ u32 disc_domain, u32 prio,
+ struct nlattr *attr[],
+ struct netlink_ext_ack *extack)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_bearer_names b_names;
+ int with_this_prio = 1;
struct tipc_bearer *b;
struct tipc_media *m;
- struct tipc_bearer_names b_names;
struct sk_buff *skb;
- char addr_string[16];
- u32 bearer_id;
- u32 with_this_prio;
- u32 i;
+ int bearer_id = 0;
int res = -EINVAL;
+ char *errstr = "";
+ u32 i;
- if (!tn->own_addr) {
- pr_warn("Bearer <%s> rejected, not supported in standalone mode\n",
- name);
- return -ENOPROTOOPT;
- }
if (!bearer_name_validate(name, &b_names)) {
- pr_warn("Bearer <%s> rejected, illegal name\n", name);
- return -EINVAL;
- }
- if (tipc_addr_domain_valid(disc_domain) &&
- (disc_domain != tn->own_addr)) {
- if (tipc_in_scope(disc_domain, tn->own_addr)) {
- disc_domain = tn->own_addr & TIPC_ZONE_CLUSTER_MASK;
- res = 0; /* accept any node in own cluster */
- } else if (in_own_cluster_exact(net, disc_domain))
- res = 0; /* accept specified node in own cluster */
- }
- if (res) {
- pr_warn("Bearer <%s> rejected, illegal discovery domain\n",
- name);
- return -EINVAL;
+ NL_SET_ERR_MSG(extack, "Illegal name");
+ return res;
}
- if ((priority > TIPC_MAX_LINK_PRI) &&
- (priority != TIPC_MEDIA_LINK_PRI)) {
- pr_warn("Bearer <%s> rejected, illegal priority\n", name);
- return -EINVAL;
+
+ if (prio > TIPC_MAX_LINK_PRI && prio != TIPC_MEDIA_LINK_PRI) {
+ errstr = "illegal priority";
+ NL_SET_ERR_MSG(extack, "Illegal priority");
+ goto rejected;
}
m = tipc_media_find(b_names.media_name);
if (!m) {
- pr_warn("Bearer <%s> rejected, media <%s> not registered\n",
- name, b_names.media_name);
- return -EINVAL;
+ errstr = "media not registered";
+ NL_SET_ERR_MSG(extack, "Media not registered");
+ goto rejected;
}
- if (priority == TIPC_MEDIA_LINK_PRI)
- priority = m->priority;
+ if (prio == TIPC_MEDIA_LINK_PRI)
+ prio = m->priority;
-restart:
+ /* Check new bearer vs existing ones and find free bearer id if any */
bearer_id = MAX_BEARERS;
- with_this_prio = 1;
- for (i = MAX_BEARERS; i-- != 0; ) {
+ i = MAX_BEARERS;
+ while (i-- != 0) {
b = rtnl_dereference(tn->bearer_list[i]);
if (!b) {
bearer_id = i;
continue;
}
if (!strcmp(name, b->name)) {
- pr_warn("Bearer <%s> rejected, already enabled\n",
- name);
- return -EINVAL;
+ errstr = "already enabled";
+ NL_SET_ERR_MSG(extack, "Already enabled");
+ goto rejected;
}
- if ((b->priority == priority) &&
+
+ if (b->priority == prio &&
(++with_this_prio > 2)) {
- if (priority-- == 0) {
- pr_warn("Bearer <%s> rejected, duplicate priority\n",
- name);
- return -EINVAL;
+ pr_warn("Bearer <%s>: already 2 bearers with priority %u\n",
+ name, prio);
+
+ if (prio == TIPC_MIN_LINK_PRI) {
+ errstr = "cannot adjust to lower";
+ NL_SET_ERR_MSG(extack, "Cannot adjust to lower");
+ goto rejected;
}
- pr_warn("Bearer <%s> priority adjustment required %u->%u\n",
- name, priority + 1, priority);
- goto restart;
+
+ pr_warn("Bearer <%s>: trying with adjusted priority\n",
+ name);
+ prio--;
+ bearer_id = MAX_BEARERS;
+ i = MAX_BEARERS;
+ with_this_prio = 1;
}
}
+
if (bearer_id >= MAX_BEARERS) {
- pr_warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
- name, MAX_BEARERS);
- return -EINVAL;
+ errstr = "max 3 bearers permitted";
+ NL_SET_ERR_MSG(extack, "Max 3 bearers permitted");
+ goto rejected;
}
b = kzalloc(sizeof(*b), GFP_ATOMIC);
if (!b)
return -ENOMEM;
- strcpy(b->name, name);
+ strscpy(b->name, name);
b->media = m;
res = m->enable_media(net, b, attr);
if (res) {
- pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
- name, -res);
- return -EINVAL;
+ kfree(b);
+ errstr = "failed to enable media";
+ NL_SET_ERR_MSG(extack, "Failed to enable media");
+ goto rejected;
}
b->identity = bearer_id;
b->tolerance = m->tolerance;
- b->window = m->window;
+ b->min_win = m->min_win;
+ b->max_win = m->max_win;
b->domain = disc_domain;
b->net_plane = bearer_id + 'A';
- b->priority = priority;
- test_and_set_bit_lock(0, &b->up);
+ b->priority = prio;
+ refcount_set(&b->refcnt, 1);
res = tipc_disc_create(net, b, &b->bcast_addr, &skb);
if (res) {
bearer_disable(net, b);
- pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
- name);
- return -EINVAL;
+ errstr = "failed to create discoverer";
+ NL_SET_ERR_MSG(extack, "Failed to create discoverer");
+ goto rejected;
}
+ /* Create monitoring data before accepting activate messages */
+ if (tipc_mon_create(net, bearer_id)) {
+ bearer_disable(net, b);
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ test_and_set_bit_lock(0, &b->up);
rcu_assign_pointer(tn->bearer_list[bearer_id], b);
if (skb)
tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr);
- if (tipc_mon_create(net, bearer_id))
- return -ENOMEM;
+ pr_info("Enabled bearer <%s>, priority %u\n", name, prio);
- pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
- name,
- tipc_addr_string_fill(addr_string, disc_domain), priority);
+ return res;
+rejected:
+ pr_warn("Enabling of bearer <%s> rejected, %s\n", name, errstr);
return res;
}
/**
* tipc_reset_bearer - Reset all links established over this bearer
+ * @net: the applicable net namespace
+ * @b: the target bearer
*/
static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b)
{
@@ -365,32 +386,21 @@ static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b)
return 0;
}
-/* tipc_bearer_reset_all - reset all links on all bearers
- */
-void tipc_bearer_reset_all(struct net *net)
+bool tipc_bearer_hold(struct tipc_bearer *b)
{
- struct tipc_bearer *b;
- int i;
+ return (b && refcount_inc_not_zero(&b->refcnt));
+}
- for (i = 0; i < MAX_BEARERS; i++) {
- b = bearer_get(net, i);
- if (b)
- clear_bit_unlock(0, &b->up);
- }
- for (i = 0; i < MAX_BEARERS; i++) {
- b = bearer_get(net, i);
- if (b)
- tipc_reset_bearer(net, b);
- }
- for (i = 0; i < MAX_BEARERS; i++) {
- b = bearer_get(net, i);
- if (b)
- test_and_set_bit_lock(0, &b->up);
- }
+void tipc_bearer_put(struct tipc_bearer *b)
+{
+ if (b && refcount_dec_and_test(&b->refcnt))
+ kfree_rcu(b, rcu);
}
/**
- * bearer_disable
+ * bearer_disable - disable this bearer
+ * @net: the applicable net namespace
+ * @b: the bearer to disable
*
* Note: This routine assumes caller holds RTNL lock.
*/
@@ -404,41 +414,64 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b)
tipc_node_delete_links(net, bearer_id);
b->media->disable_media(b);
RCU_INIT_POINTER(b->media_ptr, NULL);
- if (b->link_req)
- tipc_disc_delete(b->link_req);
+ if (b->disc)
+ tipc_disc_delete(b->disc);
RCU_INIT_POINTER(tn->bearer_list[bearer_id], NULL);
- kfree_rcu(b, rcu);
+ tipc_bearer_put(b);
tipc_mon_delete(net, bearer_id);
}
int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
struct nlattr *attr[])
{
+ char *dev_name = strchr((const char *)b->name, ':') + 1;
+ int hwaddr_len = b->media->hwaddr_len;
+ u8 node_id[NODE_ID_LEN] = {0,};
struct net_device *dev;
- char *driver_name = strchr((const char *)b->name, ':') + 1;
/* Find device with specified name */
- dev = dev_get_by_name(net, driver_name);
+ dev = dev_get_by_name(net, dev_name);
if (!dev)
return -ENODEV;
- if (tipc_mtu_bad(dev, 0)) {
+ if (tipc_mtu_bad(dev)) {
+ dev_put(dev);
+ return -EINVAL;
+ }
+ if (dev == net->loopback_dev) {
dev_put(dev);
+ pr_info("Enabling <%s> not permitted\n", b->name);
+ return -EINVAL;
+ }
+
+ /* Autoconfigure own node identity if needed */
+ if (!tipc_own_id(net) && hwaddr_len <= NODE_ID_LEN) {
+ memcpy(node_id, dev->dev_addr, hwaddr_len);
+ tipc_net_init(net, node_id, 0);
+ }
+ if (!tipc_own_id(net)) {
+ dev_put(dev);
+ pr_warn("Failed to obtain node identity\n");
return -EINVAL;
}
/* Associate TIPC bearer with L2 bearer */
rcu_assign_pointer(b->media_ptr, dev);
+ b->pt.dev = dev;
+ b->pt.type = htons(ETH_P_TIPC);
+ b->pt.func = tipc_l2_rcv_msg;
+ dev_add_pack(&b->pt);
memset(&b->bcast_addr, 0, sizeof(b->bcast_addr));
- memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len);
+ memcpy(b->bcast_addr.value, dev->broadcast, hwaddr_len);
b->bcast_addr.media_id = b->media->type_id;
b->bcast_addr.broadcast = TIPC_BROADCAST_SUPPORT;
b->mtu = dev->mtu;
- b->media->raw2addr(b, &b->addr, (char *)dev->dev_addr);
+ b->media->raw2addr(b, &b->addr, (const char *)dev->dev_addr);
rcu_assign_pointer(dev->tipc_ptr, b);
return 0;
}
/* tipc_disable_l2_media - detach TIPC bearer from an L2 interface
+ * @b: the target bearer
*
* Mark L2 bearer as inactive so that incoming buffers are thrown away
*/
@@ -447,6 +480,7 @@ void tipc_disable_l2_media(struct tipc_bearer *b)
struct net_device *dev;
dev = (struct net_device *)rtnl_dereference(b->media_ptr);
+ dev_remove_pack(&b->pt);
RCU_INIT_POINTER(dev->tipc_ptr, NULL);
synchronize_net();
dev_put(dev);
@@ -454,6 +488,7 @@ void tipc_disable_l2_media(struct tipc_bearer *b)
/**
* tipc_l2_send_msg - send a TIPC packet out over an L2 interface
+ * @net: the associated network namespace
* @skb: the packet to be sent
* @b: the bearer through which the packet is to be sent
* @dest: peer destination address
@@ -464,7 +499,7 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
struct net_device *dev;
int delta;
- dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr);
+ dev = (struct net_device *)rcu_dereference(b->media_ptr);
if (!dev)
return 0;
@@ -501,13 +536,26 @@ int tipc_bearer_mtu(struct net *net, u32 bearer_id)
struct tipc_bearer *b;
rcu_read_lock();
- b = rcu_dereference_rtnl(tipc_net(net)->bearer_list[bearer_id]);
+ b = bearer_get(net, bearer_id);
if (b)
mtu = b->mtu;
rcu_read_unlock();
return mtu;
}
+int tipc_bearer_min_mtu(struct net *net, u32 bearer_id)
+{
+ int mtu = TIPC_MIN_BEARER_MTU;
+ struct tipc_bearer *b;
+
+ rcu_read_lock();
+ b = bearer_get(net, bearer_id);
+ if (b)
+ mtu += b->encap_hlen;
+ rcu_read_unlock();
+ return mtu;
+}
+
/* tipc_bearer_xmit_skb - sends buffer to destination over bearer
*/
void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
@@ -519,10 +567,15 @@ void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
rcu_read_lock();
b = bearer_get(net, bearer_id);
- if (likely(b && (test_bit(0, &b->up) || msg_is_reset(hdr))))
- b->media->send_msg(net, skb, b, dest);
- else
+ if (likely(b && (test_bit(0, &b->up) || msg_is_reset(hdr)))) {
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_xmit(net, &skb, b, dest, NULL);
+ if (skb)
+#endif
+ b->media->send_msg(net, skb, b, dest);
+ } else {
kfree_skb(skb);
+ }
rcu_read_unlock();
}
@@ -530,7 +583,8 @@ void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
*/
void tipc_bearer_xmit(struct net *net, u32 bearer_id,
struct sk_buff_head *xmitq,
- struct tipc_media_addr *dst)
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode)
{
struct tipc_bearer *b;
struct sk_buff *skb, *tmp;
@@ -544,10 +598,15 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id,
__skb_queue_purge(xmitq);
skb_queue_walk_safe(xmitq, skb, tmp) {
__skb_dequeue(xmitq);
- if (likely(test_bit(0, &b->up) || msg_is_reset(buf_msg(skb))))
- b->media->send_msg(net, skb, b, dst);
- else
+ if (likely(test_bit(0, &b->up) || msg_is_reset(buf_msg(skb)))) {
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_xmit(net, &skb, b, dst, __dnode);
+ if (skb)
+#endif
+ b->media->send_msg(net, skb, b, dst);
+ } else {
kfree_skb(skb);
+ }
}
rcu_read_unlock();
}
@@ -558,6 +617,7 @@ void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
struct sk_buff_head *xmitq)
{
struct tipc_net *tn = tipc_net(net);
+ struct tipc_media_addr *dst;
int net_id = tn->net_id;
struct tipc_bearer *b;
struct sk_buff *skb, *tmp;
@@ -572,14 +632,19 @@ void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
msg_set_non_seq(hdr, 1);
msg_set_mc_netid(hdr, net_id);
__skb_dequeue(xmitq);
- b->media->send_msg(net, skb, b, &b->bcast_addr);
+ dst = &b->bcast_addr;
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_xmit(net, &skb, b, dst, NULL);
+ if (skb)
+#endif
+ b->media->send_msg(net, skb, b, dst);
}
rcu_read_unlock();
}
/**
* tipc_l2_rcv_msg - handle incoming TIPC message from an interface
- * @buf: the received packet
+ * @skb: the received message
* @dev: the net device that the packet was received on
* @pt: the packet_type structure which was used to register this handler
* @orig_dev: the original receive net device in case the device is a bond
@@ -594,11 +659,13 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
struct tipc_bearer *b;
rcu_read_lock();
- b = rcu_dereference_rtnl(dev->tipc_ptr);
+ b = rcu_dereference(dev->tipc_ptr) ?:
+ rcu_dereference(orig_dev->tipc_ptr);
if (likely(b && test_bit(0, &b->up) &&
- (skb->pkt_type <= PACKET_BROADCAST))) {
- skb->next = NULL;
- tipc_rcv(dev_net(dev), skb, b);
+ (skb->pkt_type <= PACKET_MULTICAST))) {
+ skb_mark_not_on_list(skb);
+ TIPC_SKB_CB(skb)->flags = 0;
+ tipc_rcv(dev_net(b->pt.dev), skb, b);
rcu_read_unlock();
return NET_RX_SUCCESS;
}
@@ -627,19 +694,23 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
if (!b)
return NOTIFY_DONE;
+ trace_tipc_l2_device_event(dev, b, evt);
switch (evt) {
case NETDEV_CHANGE:
- if (netif_carrier_ok(dev))
+ if (netif_carrier_ok(dev) && netif_oper_up(dev)) {
+ test_and_set_bit_lock(0, &b->up);
break;
- case NETDEV_UP:
- test_and_set_bit_lock(0, &b->up);
- break;
+ }
+ fallthrough;
case NETDEV_GOING_DOWN:
clear_bit_unlock(0, &b->up);
tipc_reset_bearer(net, b);
break;
+ case NETDEV_UP:
+ test_and_set_bit_lock(0, &b->up);
+ break;
case NETDEV_CHANGEMTU:
- if (tipc_mtu_bad(dev, 0)) {
+ if (tipc_mtu_bad(dev)) {
bearer_disable(net, b);
break;
}
@@ -648,22 +719,17 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
break;
case NETDEV_CHANGEADDR:
b->media->raw2addr(b, &b->addr,
- (char *)dev->dev_addr);
+ (const char *)dev->dev_addr);
tipc_reset_bearer(net, b);
break;
case NETDEV_UNREGISTER:
case NETDEV_CHANGENAME:
- bearer_disable(dev_net(dev), b);
+ bearer_disable(net, b);
break;
}
return NOTIFY_OK;
}
-static struct packet_type tipc_packet_type __read_mostly = {
- .type = htons(ETH_P_TIPC),
- .func = tipc_l2_rcv_msg,
-};
-
static struct notifier_block notifier = {
.notifier_call = tipc_l2_device_event,
.priority = 0,
@@ -671,24 +737,17 @@ static struct notifier_block notifier = {
int tipc_bearer_setup(void)
{
- int err;
-
- err = register_netdevice_notifier(&notifier);
- if (err)
- return err;
- dev_add_pack(&tipc_packet_type);
- return 0;
+ return register_netdevice_notifier(&notifier);
}
void tipc_bearer_cleanup(void)
{
unregister_netdevice_notifier(&notifier);
- dev_remove_pack(&tipc_packet_type);
}
void tipc_bearer_stop(struct net *net)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_net *tn = tipc_net(net);
struct tipc_bearer *b;
u32 i;
@@ -701,6 +760,65 @@ void tipc_bearer_stop(struct net *net)
}
}
+void tipc_clone_to_loopback(struct net *net, struct sk_buff_head *pkts)
+{
+ struct net_device *dev = net->loopback_dev;
+ struct sk_buff *skb, *_skb;
+ int exp;
+
+ skb_queue_walk(pkts, _skb) {
+ skb = pskb_copy(_skb, GFP_ATOMIC);
+ if (!skb)
+ continue;
+
+ exp = SKB_DATA_ALIGN(dev->hard_header_len - skb_headroom(skb));
+ if (exp > 0 && pskb_expand_head(skb, exp, 0, GFP_ATOMIC)) {
+ kfree_skb(skb);
+ continue;
+ }
+
+ skb_reset_network_header(skb);
+ dev_hard_header(skb, dev, ETH_P_TIPC, dev->dev_addr,
+ dev->dev_addr, skb->len);
+ skb->dev = dev;
+ skb->pkt_type = PACKET_HOST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ }
+}
+
+static int tipc_loopback_rcv_pkt(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *od)
+{
+ consume_skb(skb);
+ return NET_RX_SUCCESS;
+}
+
+int tipc_attach_loopback(struct net *net)
+{
+ struct net_device *dev = net->loopback_dev;
+ struct tipc_net *tn = tipc_net(net);
+
+ if (!dev)
+ return -ENODEV;
+
+ netdev_hold(dev, &tn->loopback_pt.dev_tracker, GFP_KERNEL);
+ tn->loopback_pt.dev = dev;
+ tn->loopback_pt.type = htons(ETH_P_TIPC);
+ tn->loopback_pt.func = tipc_loopback_rcv_pkt;
+ dev_add_pack(&tn->loopback_pt);
+ return 0;
+}
+
+void tipc_detach_loopback(struct net *net)
+{
+ struct tipc_net *tn = tipc_net(net);
+
+ dev_remove_pack(&tn->loopback_pt);
+ netdev_put(net->loopback_dev, &tn->loopback_pt.dev_tracker);
+}
+
/* Caller should hold rtnl_lock to protect the bearer */
static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg,
struct tipc_bearer *bearer, int nlflags)
@@ -714,22 +832,25 @@ static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg,
if (!hdr)
return -EMSGSIZE;
- attrs = nla_nest_start(msg->skb, TIPC_NLA_BEARER);
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_BEARER);
if (!attrs)
goto msg_full;
if (nla_put_string(msg->skb, TIPC_NLA_BEARER_NAME, bearer->name))
goto attr_msg_full;
- prop = nla_nest_start(msg->skb, TIPC_NLA_BEARER_PROP);
+ prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_BEARER_PROP);
if (!prop)
goto prop_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, bearer->priority))
goto prop_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, bearer->tolerance))
goto prop_msg_full;
- if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bearer->window))
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bearer->max_win))
goto prop_msg_full;
+ if (bearer->media->type_id == TIPC_MEDIA_TYPE_UDP)
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_MTU, bearer->mtu))
+ goto prop_msg_full;
nla_nest_end(msg->skb, prop);
@@ -762,7 +883,7 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct tipc_bearer *bearer;
struct tipc_nl_msg msg;
struct net *net = sock_net(skb->sk);
- struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_net *tn = tipc_net(net);
if (i == MAX_BEARERS)
return 0;
@@ -800,9 +921,9 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[TIPC_NLA_BEARER])
return -EINVAL;
- err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX,
- info->attrs[TIPC_NLA_BEARER],
- tipc_nl_bearer_policy, info->extack);
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_BEARER_MAX,
+ info->attrs[TIPC_NLA_BEARER],
+ tipc_nl_bearer_policy, info->extack);
if (err)
return err;
@@ -822,6 +943,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
bearer = tipc_bearer_find(net, name);
if (!bearer) {
err = -EINVAL;
+ NL_SET_ERR_MSG(info->extack, "Bearer not found");
goto err_out;
}
@@ -838,7 +960,7 @@ err_out:
return err;
}
-int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
+int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
{
int err;
char *name;
@@ -849,9 +971,9 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[TIPC_NLA_BEARER])
return -EINVAL;
- err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX,
- info->attrs[TIPC_NLA_BEARER],
- tipc_nl_bearer_policy, info->extack);
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_BEARER_MAX,
+ info->attrs[TIPC_NLA_BEARER],
+ tipc_nl_bearer_policy, info->extack);
if (err)
return err;
@@ -860,38 +982,45 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
- rtnl_lock();
bearer = tipc_bearer_find(net, name);
if (!bearer) {
- rtnl_unlock();
+ NL_SET_ERR_MSG(info->extack, "Bearer not found");
return -EINVAL;
}
bearer_disable(net, bearer);
- rtnl_unlock();
return 0;
}
-int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
+int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+
+ rtnl_lock();
+ err = __tipc_nl_bearer_disable(skb, info);
+ rtnl_unlock();
+
+ return err;
+}
+
+int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
{
int err;
char *bearer;
struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
struct net *net = sock_net(skb->sk);
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- u32 domain;
+ u32 domain = 0;
u32 prio;
prio = TIPC_MEDIA_LINK_PRI;
- domain = tn->own_addr & TIPC_ZONE_CLUSTER_MASK;
if (!info->attrs[TIPC_NLA_BEARER])
return -EINVAL;
- err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX,
- info->attrs[TIPC_NLA_BEARER],
- tipc_nl_bearer_policy, info->extack);
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_BEARER_MAX,
+ info->attrs[TIPC_NLA_BEARER],
+ tipc_nl_bearer_policy, info->extack);
if (err)
return err;
@@ -915,15 +1044,19 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
}
+ return tipc_enable_bearer(net, bearer, domain, prio, attrs,
+ info->extack);
+}
+
+int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+
rtnl_lock();
- err = tipc_enable_bearer(net, bearer, domain, prio, attrs);
- if (err) {
- rtnl_unlock();
- return err;
- }
+ err = __tipc_nl_bearer_enable(skb, info);
rtnl_unlock();
- return 0;
+ return err;
}
int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
@@ -937,9 +1070,9 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[TIPC_NLA_BEARER])
return -EINVAL;
- err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX,
- info->attrs[TIPC_NLA_BEARER],
- tipc_nl_bearer_policy, info->extack);
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_BEARER_MAX,
+ info->attrs[TIPC_NLA_BEARER],
+ tipc_nl_bearer_policy, info->extack);
if (err)
return err;
@@ -950,39 +1083,43 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
b = tipc_bearer_find(net, name);
if (!b) {
- rtnl_unlock();
- return -EINVAL;
+ NL_SET_ERR_MSG(info->extack, "Bearer not found");
+ err = -EINVAL;
+ goto out;
}
#ifdef CONFIG_TIPC_MEDIA_UDP
if (attrs[TIPC_NLA_BEARER_UDP_OPTS]) {
+ if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) {
+ NL_SET_ERR_MSG(info->extack, "UDP option is unsupported");
+ err = -EINVAL;
+ goto out;
+ }
+
err = tipc_udp_nl_bearer_add(b,
attrs[TIPC_NLA_BEARER_UDP_OPTS]);
- if (err) {
- rtnl_unlock();
- return err;
- }
}
#endif
+out:
rtnl_unlock();
- return 0;
+ return err;
}
-int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
+int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
{
- int err;
- char *name;
struct tipc_bearer *b;
struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
struct net *net = sock_net(skb->sk);
+ char *name;
+ int err;
if (!info->attrs[TIPC_NLA_BEARER])
return -EINVAL;
- err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX,
- info->attrs[TIPC_NLA_BEARER],
- tipc_nl_bearer_policy, info->extack);
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_BEARER_MAX,
+ info->attrs[TIPC_NLA_BEARER],
+ tipc_nl_bearer_policy, info->extack);
if (err)
return err;
@@ -990,10 +1127,9 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
- rtnl_lock();
b = tipc_bearer_find(net, name);
if (!b) {
- rtnl_unlock();
+ NL_SET_ERR_MSG(info->extack, "Bearer not found");
return -EINVAL;
}
@@ -1002,23 +1138,50 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP],
props);
- if (err) {
- rtnl_unlock();
+ if (err)
return err;
- }
- if (props[TIPC_NLA_PROP_TOL])
+ if (props[TIPC_NLA_PROP_TOL]) {
b->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
+ tipc_node_apply_property(net, b, TIPC_NLA_PROP_TOL);
+ }
if (props[TIPC_NLA_PROP_PRIO])
b->priority = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
if (props[TIPC_NLA_PROP_WIN])
- b->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ b->max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ if (props[TIPC_NLA_PROP_MTU]) {
+ if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) {
+ NL_SET_ERR_MSG(info->extack,
+ "MTU property is unsupported");
+ return -EINVAL;
+ }
+#ifdef CONFIG_TIPC_MEDIA_UDP
+ if (nla_get_u32(props[TIPC_NLA_PROP_MTU]) <
+ b->encap_hlen + TIPC_MIN_BEARER_MTU) {
+ NL_SET_ERR_MSG(info->extack,
+ "MTU value is out-of-range");
+ return -EINVAL;
+ }
+ b->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]);
+ tipc_node_apply_property(net, b, TIPC_NLA_PROP_MTU);
+#endif
+ }
}
- rtnl_unlock();
return 0;
}
+int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+
+ rtnl_lock();
+ err = __tipc_nl_bearer_set(skb, info);
+ rtnl_unlock();
+
+ return err;
+}
+
static int __tipc_nl_add_media(struct tipc_nl_msg *msg,
struct tipc_media *media, int nlflags)
{
@@ -1031,22 +1194,25 @@ static int __tipc_nl_add_media(struct tipc_nl_msg *msg,
if (!hdr)
return -EMSGSIZE;
- attrs = nla_nest_start(msg->skb, TIPC_NLA_MEDIA);
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MEDIA);
if (!attrs)
goto msg_full;
if (nla_put_string(msg->skb, TIPC_NLA_MEDIA_NAME, media->name))
goto attr_msg_full;
- prop = nla_nest_start(msg->skb, TIPC_NLA_MEDIA_PROP);
+ prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_MEDIA_PROP);
if (!prop)
goto prop_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, media->priority))
goto prop_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, media->tolerance))
goto prop_msg_full;
- if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, media->window))
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, media->max_win))
goto prop_msg_full;
+ if (media->type_id == TIPC_MEDIA_TYPE_UDP)
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_MTU, media->mtu))
+ goto prop_msg_full;
nla_nest_end(msg->skb, prop);
nla_nest_end(msg->skb, attrs);
@@ -1097,14 +1263,14 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
struct tipc_nl_msg msg;
struct tipc_media *media;
struct sk_buff *rep;
- struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+ struct nlattr *attrs[TIPC_NLA_MEDIA_MAX + 1];
if (!info->attrs[TIPC_NLA_MEDIA])
return -EINVAL;
- err = nla_parse_nested(attrs, TIPC_NLA_MEDIA_MAX,
- info->attrs[TIPC_NLA_MEDIA],
- tipc_nl_media_policy, info->extack);
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MEDIA_MAX,
+ info->attrs[TIPC_NLA_MEDIA],
+ tipc_nl_media_policy, info->extack);
if (err)
return err;
@@ -1123,6 +1289,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
media = tipc_media_find(name);
if (!media) {
+ NL_SET_ERR_MSG(info->extack, "Media not found");
err = -EINVAL;
goto err_out;
}
@@ -1140,49 +1307,71 @@ err_out:
return err;
}
-int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
+int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
{
int err;
char *name;
struct tipc_media *m;
- struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
+ struct nlattr *attrs[TIPC_NLA_MEDIA_MAX + 1];
if (!info->attrs[TIPC_NLA_MEDIA])
return -EINVAL;
- err = nla_parse_nested(attrs, TIPC_NLA_MEDIA_MAX,
- info->attrs[TIPC_NLA_MEDIA],
- tipc_nl_media_policy, info->extack);
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MEDIA_MAX,
+ info->attrs[TIPC_NLA_MEDIA],
+ tipc_nl_media_policy, info->extack);
if (!attrs[TIPC_NLA_MEDIA_NAME])
return -EINVAL;
name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]);
- rtnl_lock();
m = tipc_media_find(name);
if (!m) {
- rtnl_unlock();
+ NL_SET_ERR_MSG(info->extack, "Media not found");
return -EINVAL;
}
-
if (attrs[TIPC_NLA_MEDIA_PROP]) {
struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_MEDIA_PROP],
props);
- if (err) {
- rtnl_unlock();
+ if (err)
return err;
- }
if (props[TIPC_NLA_PROP_TOL])
m->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
if (props[TIPC_NLA_PROP_PRIO])
m->priority = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
if (props[TIPC_NLA_PROP_WIN])
- m->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ m->max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ if (props[TIPC_NLA_PROP_MTU]) {
+ if (m->type_id != TIPC_MEDIA_TYPE_UDP) {
+ NL_SET_ERR_MSG(info->extack,
+ "MTU property is unsupported");
+ return -EINVAL;
+ }
+#ifdef CONFIG_TIPC_MEDIA_UDP
+ if (tipc_udp_mtu_bad(nla_get_u32
+ (props[TIPC_NLA_PROP_MTU]))) {
+ NL_SET_ERR_MSG(info->extack,
+ "MTU value is out-of-range");
+ return -EINVAL;
+ }
+ m->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]);
+#endif
+ }
}
- rtnl_unlock();
return 0;
}
+
+int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+
+ rtnl_lock();
+ err = __tipc_nl_media_set(skb, info);
+ rtnl_unlock();
+
+ return err;
+}
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 635c9086e19a..41eac1ee0c09 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -93,7 +93,10 @@ struct tipc_bearer;
* @raw2addr: convert from raw addr format to media addr format
* @priority: default link (and bearer) priority
* @tolerance: default time (in ms) before declaring link failure
- * @window: default window (in packets) before declaring link congestion
+ * @min_win: minimum window (in packets) before declaring link congestion
+ * @max_win: maximum window (in packets) before declaring link congestion
+ * @mtu: max packet size bearer can support for media type not dependent on
+ * underlying device MTU
* @type_id: TIPC media identifier
* @hwaddr_len: TIPC media address len
* @name: media name
@@ -114,10 +117,12 @@ struct tipc_media {
char *msg);
int (*raw2addr)(struct tipc_bearer *b,
struct tipc_media_addr *addr,
- char *raw);
+ const char *raw);
u32 priority;
u32 tolerance;
- u32 window;
+ u32 min_win;
+ u32 max_win;
+ u32 mtu;
u32 type_id;
u32 hwaddr_len;
char name[TIPC_MAX_MEDIA_NAME];
@@ -131,35 +136,44 @@ struct tipc_media {
* @name: bearer name (format = media:interface)
* @media: ptr to media structure associated with bearer
* @bcast_addr: media address used in broadcasting
+ * @pt: packet type for bearer
* @rcu: rcu struct for tipc_bearer
* @priority: default link priority for bearer
- * @window: default window size for bearer
+ * @min_win: minimum window (in packets) before declaring link congestion
+ * @max_win: maximum window (in packets) before declaring link congestion
* @tolerance: default link tolerance for bearer
* @domain: network domain to which links can be established
* @identity: array index of this bearer within TIPC bearer array
- * @link_req: ptr to (optional) structure making periodic link setup requests
+ * @disc: ptr to link setup request
* @net_plane: network plane ('A' through 'H') currently associated with bearer
+ * @encap_hlen: encap headers length
+ * @up: bearer up flag (bit 0)
+ * @refcnt: tipc_bearer reference counter
*
* Note: media-specific code is responsible for initialization of the fields
* indicated below when a bearer is enabled; TIPC's generic bearer code takes
* care of initializing all other fields.
*/
struct tipc_bearer {
- void __rcu *media_ptr; /* initalized by media */
- u32 mtu; /* initalized by media */
- struct tipc_media_addr addr; /* initalized by media */
+ void __rcu *media_ptr; /* initialized by media */
+ u32 mtu; /* initialized by media */
+ struct tipc_media_addr addr; /* initialized by media */
char name[TIPC_MAX_BEARER_NAME];
struct tipc_media *media;
struct tipc_media_addr bcast_addr;
+ struct packet_type pt;
struct rcu_head rcu;
u32 priority;
- u32 window;
+ u32 min_win;
+ u32 max_win;
u32 tolerance;
u32 domain;
u32 identity;
- struct tipc_link_req *link_req;
+ struct tipc_discoverer *disc;
char net_plane;
+ u16 encap_hlen;
unsigned long up;
+ refcount_t refcnt;
};
struct tipc_bearer_names {
@@ -186,21 +200,25 @@ extern struct tipc_media udp_media_info;
#endif
int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info);
+int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info);
+int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb);
int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info);
+int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb);
int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
+int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
-int tipc_media_set_priority(const char *name, u32 new_value);
-int tipc_media_set_window(const char *name, u32 new_value);
-void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
+int tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
struct nlattr *attrs[]);
+bool tipc_bearer_hold(struct tipc_bearer *b);
+void tipc_bearer_put(struct tipc_bearer *b);
void tipc_disable_l2_media(struct tipc_bearer *b);
int tipc_l2_send_msg(struct net *net, struct sk_buff *buf,
struct tipc_bearer *b, struct tipc_media_addr *dest);
@@ -210,25 +228,36 @@ void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest);
struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name);
int tipc_bearer_get_name(struct net *net, char *name, u32 bearer_id);
struct tipc_media *tipc_media_find(const char *name);
-void tipc_bearer_reset_all(struct net *net);
int tipc_bearer_setup(void);
void tipc_bearer_cleanup(void);
void tipc_bearer_stop(struct net *net);
int tipc_bearer_mtu(struct net *net, u32 bearer_id);
+int tipc_bearer_min_mtu(struct net *net, u32 bearer_id);
bool tipc_bearer_bcast_support(struct net *net, u32 bearer_id);
void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
struct sk_buff *skb,
struct tipc_media_addr *dest);
void tipc_bearer_xmit(struct net *net, u32 bearer_id,
struct sk_buff_head *xmitq,
- struct tipc_media_addr *dst);
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode);
void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
struct sk_buff_head *xmitq);
+void tipc_clone_to_loopback(struct net *net, struct sk_buff_head *pkts);
+int tipc_attach_loopback(struct net *net);
+void tipc_detach_loopback(struct net *net);
+
+static inline void tipc_loopback_trace(struct net *net,
+ struct sk_buff_head *pkts)
+{
+ if (unlikely(dev_nit_active(net->loopback_dev)))
+ tipc_clone_to_loopback(net, pkts);
+}
/* check if device MTU is too low for tipc headers */
-static inline bool tipc_mtu_bad(struct net_device *dev, unsigned int reserve)
+static inline bool tipc_mtu_bad(struct net_device *dev)
{
- if (dev->mtu >= TIPC_MIN_BEARER_MTU + reserve)
+ if (dev->mtu >= TIPC_MIN_BEARER_MTU)
return false;
netdev_warn(dev, "MTU too low for tipc bearer\n");
return true;
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 0b982d048fb9..434e70eabe08 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -34,8 +34,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include "core.h"
#include "name_table.h"
#include "subscr.h"
@@ -43,6 +41,8 @@
#include "net.h"
#include "socket.h"
#include "bcast.h"
+#include "node.h"
+#include "crypto.h"
#include <linux/module.h>
@@ -56,12 +56,23 @@ static int __net_init tipc_init_net(struct net *net)
int err;
tn->net_id = 4711;
- tn->own_addr = 0;
+ tn->node_addr = 0;
+ tn->trial_addr = 0;
+ tn->addr_trial_end = 0;
+ tn->capabilities = TIPC_NODE_CAPABILITIES;
+ INIT_WORK(&tn->work, tipc_net_finalize_work);
+ memset(tn->node_id, 0, sizeof(tn->node_id));
+ memset(tn->node_id_string, 0, sizeof(tn->node_id_string));
tn->mon_threshold = TIPC_DEF_MON_THRESHOLD;
get_random_bytes(&tn->random, sizeof(int));
INIT_LIST_HEAD(&tn->node_list);
spin_lock_init(&tn->node_list_lock);
+#ifdef CONFIG_TIPC_CRYPTO
+ err = tipc_crypto_start(&tn->crypto_tx, net, NULL);
+ if (err)
+ goto out_crypto;
+#endif
err = tipc_sk_rht_init(net);
if (err)
goto out_sk_rht;
@@ -70,36 +81,56 @@ static int __net_init tipc_init_net(struct net *net)
if (err)
goto out_nametbl;
- INIT_LIST_HEAD(&tn->dist_queue);
- err = tipc_topsrv_start(net);
+ err = tipc_bcast_init(net);
if (err)
- goto out_subscr;
+ goto out_bclink;
- err = tipc_bcast_init(net);
+ err = tipc_attach_loopback(net);
if (err)
goto out_bclink;
return 0;
out_bclink:
- tipc_bcast_stop(net);
-out_subscr:
tipc_nametbl_stop(net);
out_nametbl:
tipc_sk_rht_destroy(net);
out_sk_rht:
+
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_stop(&tn->crypto_tx);
+out_crypto:
+#endif
return err;
}
static void __net_exit tipc_exit_net(struct net *net)
{
- tipc_topsrv_stop(net);
+ struct tipc_net *tn = tipc_net(net);
+
+ tipc_detach_loopback(net);
tipc_net_stop(net);
+ /* Make sure the tipc_net_finalize_work() finished */
+ cancel_work_sync(&tn->work);
tipc_bcast_stop(net);
tipc_nametbl_stop(net);
tipc_sk_rht_destroy(net);
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_stop(&tipc_net(net)->crypto_tx);
+#endif
+ while (atomic_read(&tn->wq_count))
+ cond_resched();
+}
+
+static void __net_exit tipc_pernet_pre_exit(struct net *net)
+{
+ tipc_node_pre_cleanup_net(net);
}
+static struct pernet_operations tipc_pernet_pre_exit_ops = {
+ .pre_exit = tipc_pernet_pre_exit,
+};
+
static struct pernet_operations tipc_net_ops = {
.init = tipc_init_net,
.exit = tipc_exit_net,
@@ -107,6 +138,11 @@ static struct pernet_operations tipc_net_ops = {
.size = sizeof(struct tipc_net),
};
+static struct pernet_operations tipc_topsrv_net_ops = {
+ .init = tipc_topsrv_init_net,
+ .exit = tipc_topsrv_exit_net,
+};
+
static int __init tipc_init(void)
{
int err;
@@ -117,54 +153,69 @@ static int __init tipc_init(void)
sysctl_tipc_rmem[1] = RCVBUF_DEF;
sysctl_tipc_rmem[2] = RCVBUF_MAX;
- err = tipc_netlink_start();
+ err = tipc_register_sysctl();
if (err)
- goto out_netlink;
+ goto out_sysctl;
- err = tipc_netlink_compat_start();
+ err = register_pernet_device(&tipc_net_ops);
if (err)
- goto out_netlink_compat;
+ goto out_pernet;
err = tipc_socket_init();
if (err)
goto out_socket;
- err = tipc_register_sysctl();
+ err = register_pernet_device(&tipc_topsrv_net_ops);
if (err)
- goto out_sysctl;
+ goto out_pernet_topsrv;
- err = register_pernet_subsys(&tipc_net_ops);
+ err = register_pernet_subsys(&tipc_pernet_pre_exit_ops);
if (err)
- goto out_pernet;
+ goto out_register_pernet_subsys;
err = tipc_bearer_setup();
if (err)
goto out_bearer;
+ err = tipc_netlink_start();
+ if (err)
+ goto out_netlink;
+
+ err = tipc_netlink_compat_start();
+ if (err)
+ goto out_netlink_compat;
+
pr_info("Started in single node mode\n");
return 0;
+
+out_netlink_compat:
+ tipc_netlink_stop();
+out_netlink:
+ tipc_bearer_cleanup();
out_bearer:
- unregister_pernet_subsys(&tipc_net_ops);
+ unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
+out_register_pernet_subsys:
+ unregister_pernet_device(&tipc_topsrv_net_ops);
+out_pernet_topsrv:
+ tipc_socket_stop();
+out_socket:
+ unregister_pernet_device(&tipc_net_ops);
out_pernet:
tipc_unregister_sysctl();
out_sysctl:
- tipc_socket_stop();
-out_socket:
- tipc_netlink_compat_stop();
-out_netlink_compat:
- tipc_netlink_stop();
-out_netlink:
pr_err("Unable to start in single node mode\n");
return err;
}
static void __exit tipc_exit(void)
{
- tipc_bearer_cleanup();
- unregister_pernet_subsys(&tipc_net_ops);
- tipc_netlink_stop();
tipc_netlink_compat_stop();
+ tipc_netlink_stop();
+ tipc_bearer_cleanup();
+ unregister_pernet_subsys(&tipc_pernet_pre_exit_ops);
+ unregister_pernet_device(&tipc_topsrv_net_ops);
tipc_socket_stop();
+ unregister_pernet_device(&tipc_net_ops);
tipc_unregister_sysctl();
pr_info("Deactivated\n");
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 5cc5398be722..7f3fe3401c45 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -1,8 +1,9 @@
/*
* net/tipc/core.h: Include file for TIPC global declarations
*
- * Copyright (c) 2005-2006, 2013 Ericsson AB
+ * Copyright (c) 2005-2006, 2013-2018 Ericsson AB
* Copyright (c) 2005-2007, 2010-2013, Wind River Systems
+ * Copyright (c) 2020, Red Hat Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -49,7 +50,6 @@
#include <linux/uaccess.h>
#include <linux/interrupt.h>
#include <linux/atomic.h>
-#include <asm/hardirq.h>
#include <linux/netdevice.h>
#include <linux/in.h>
#include <linux/list.h>
@@ -59,29 +59,46 @@
#include <linux/etherdevice.h>
#include <net/netns/generic.h>
#include <linux/rhashtable.h>
+#include <net/genetlink.h>
+#include <net/netns/hash.h>
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
struct tipc_node;
struct tipc_bearer;
struct tipc_bc_base;
struct tipc_link;
-struct tipc_name_table;
-struct tipc_server;
+struct tipc_topsrv;
struct tipc_monitor;
+#ifdef CONFIG_TIPC_CRYPTO
+struct tipc_crypto;
+#endif
#define TIPC_MOD_VER "2.0.0"
#define NODE_HTABLE_SIZE 512
#define MAX_BEARERS 3
#define TIPC_DEF_MON_THRESHOLD 32
+#define NODE_ID_LEN 16
+#define NODE_ID_STR_LEN (NODE_ID_LEN * 2 + 1)
extern unsigned int tipc_net_id __read_mostly;
extern int sysctl_tipc_rmem[3] __read_mostly;
extern int sysctl_tipc_named_timeout __read_mostly;
struct tipc_net {
- u32 own_addr;
+ u8 node_id[NODE_ID_LEN];
+ u32 node_addr;
+ u32 trial_addr;
+ unsigned long addr_trial_end;
+ char node_id_string[NODE_ID_STR_LEN];
int net_id;
int random;
+ bool legacy_addr_format;
/* Node table and node list */
spinlock_t node_list_lock;
@@ -109,12 +126,24 @@ struct tipc_net {
spinlock_t nametbl_lock;
struct name_table *nametbl;
- /* Name dist queue */
- struct list_head dist_queue;
-
/* Topology subscription server */
- struct tipc_server *topsrv;
+ struct tipc_topsrv *topsrv;
atomic_t subscription_count;
+
+ /* Cluster capabilities */
+ u16 capabilities;
+
+ /* Tracing of node internal messages */
+ struct packet_type loopback_pt;
+
+#ifdef CONFIG_TIPC_CRYPTO
+ /* TX crypto handler */
+ struct tipc_crypto *crypto_tx;
+#endif
+ /* Work item for net finalize */
+ struct work_struct work;
+ /* The numbers of work queues in schedule */
+ atomic_t wq_count;
};
static inline struct tipc_net *tipc_net(struct net *net)
@@ -132,6 +161,16 @@ static inline struct list_head *tipc_nodes(struct net *net)
return &tipc_net(net)->node_list;
}
+static inline struct name_table *tipc_name_table(struct net *net)
+{
+ return tipc_net(net)->nametbl;
+}
+
+static inline struct tipc_topsrv *tipc_topsrv(struct net *net)
+{
+ return tipc_net(net)->topsrv;
+}
+
static inline unsigned int tipc_hashfn(u32 addr)
{
return addr & (NODE_HTABLE_SIZE - 1);
@@ -157,11 +196,27 @@ static inline int less(u16 left, u16 right)
return less_eq(left, right) && (mod(right) != mod(left));
}
-static inline int in_range(u16 val, u16 min, u16 max)
+static inline int tipc_in_range(u16 val, u16 min, u16 max)
{
return !less(val, min) && !more(val, max);
}
+static inline u32 tipc_net_hash_mixes(struct net *net, int tn_rand)
+{
+ return net_hash_mix(&init_net) ^ net_hash_mix(net) ^ tn_rand;
+}
+
+static inline u32 hash128to32(char *bytes)
+{
+ __be32 *tmp = (__be32 *)bytes;
+ u32 res;
+
+ res = ntohl(tmp[0] ^ tmp[1] ^ tmp[2] ^ tmp[3]);
+ if (likely(res))
+ return res;
+ return ntohl(tmp[0] | tmp[1] | tmp[2] | tmp[3]);
+}
+
#ifdef CONFIG_SYSCTL
int tipc_register_sysctl(void);
void tipc_unregister_sysctl(void);
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
new file mode 100644
index 000000000000..751904f10aab
--- /dev/null
+++ b/net/tipc/crypto.c
@@ -0,0 +1,2484 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * net/tipc/crypto.c: TIPC crypto for key handling & packet en/decryption
+ *
+ * Copyright (c) 2019, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/rng.h>
+#include "crypto.h"
+#include "msg.h"
+#include "bcast.h"
+
+#define TIPC_TX_GRACE_PERIOD msecs_to_jiffies(5000) /* 5s */
+#define TIPC_TX_LASTING_TIME msecs_to_jiffies(10000) /* 10s */
+#define TIPC_RX_ACTIVE_LIM msecs_to_jiffies(3000) /* 3s */
+#define TIPC_RX_PASSIVE_LIM msecs_to_jiffies(15000) /* 15s */
+
+#define TIPC_MAX_TFMS_DEF 10
+#define TIPC_MAX_TFMS_LIM 1000
+
+#define TIPC_REKEYING_INTV_DEF (60 * 24) /* default: 1 day */
+
+/*
+ * TIPC Key ids
+ */
+enum {
+ KEY_MASTER = 0,
+ KEY_MIN = KEY_MASTER,
+ KEY_1 = 1,
+ KEY_2,
+ KEY_3,
+ KEY_MAX = KEY_3,
+};
+
+/*
+ * TIPC Crypto statistics
+ */
+enum {
+ STAT_OK,
+ STAT_NOK,
+ STAT_ASYNC,
+ STAT_ASYNC_OK,
+ STAT_ASYNC_NOK,
+ STAT_BADKEYS, /* tx only */
+ STAT_BADMSGS = STAT_BADKEYS, /* rx only */
+ STAT_NOKEYS,
+ STAT_SWITCHES,
+
+ MAX_STATS,
+};
+
+/* TIPC crypto statistics' header */
+static const char *hstats[MAX_STATS] = {"ok", "nok", "async", "async_ok",
+ "async_nok", "badmsgs", "nokeys",
+ "switches"};
+
+/* Max TFMs number per key */
+int sysctl_tipc_max_tfms __read_mostly = TIPC_MAX_TFMS_DEF;
+/* Key exchange switch, default: on */
+int sysctl_tipc_key_exchange_enabled __read_mostly = 1;
+
+/*
+ * struct tipc_key - TIPC keys' status indicator
+ *
+ * 7 6 5 4 3 2 1 0
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * key: | (reserved)|passive idx| active idx|pending idx|
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ */
+struct tipc_key {
+#define KEY_BITS (2)
+#define KEY_MASK ((1 << KEY_BITS) - 1)
+ union {
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 pending:2,
+ active:2,
+ passive:2, /* rx only */
+ reserved:2;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u8 reserved:2,
+ passive:2, /* rx only */
+ active:2,
+ pending:2;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ } __packed;
+ u8 keys;
+ };
+};
+
+/**
+ * struct tipc_tfm - TIPC TFM structure to form a list of TFMs
+ * @tfm: cipher handle/key
+ * @list: linked list of TFMs
+ */
+struct tipc_tfm {
+ struct crypto_aead *tfm;
+ struct list_head list;
+};
+
+/**
+ * struct tipc_aead - TIPC AEAD key structure
+ * @tfm_entry: per-cpu pointer to one entry in TFM list
+ * @crypto: TIPC crypto owns this key
+ * @cloned: reference to the source key in case cloning
+ * @users: the number of the key users (TX/RX)
+ * @salt: the key's SALT value
+ * @authsize: authentication tag size (max = 16)
+ * @mode: crypto mode is applied to the key
+ * @hint: a hint for user key
+ * @rcu: struct rcu_head
+ * @key: the aead key
+ * @gen: the key's generation
+ * @seqno: the key seqno (cluster scope)
+ * @refcnt: the key reference counter
+ */
+struct tipc_aead {
+#define TIPC_AEAD_HINT_LEN (5)
+ struct tipc_tfm * __percpu *tfm_entry;
+ struct tipc_crypto *crypto;
+ struct tipc_aead *cloned;
+ atomic_t users;
+ u32 salt;
+ u8 authsize;
+ u8 mode;
+ char hint[2 * TIPC_AEAD_HINT_LEN + 1];
+ struct rcu_head rcu;
+ struct tipc_aead_key *key;
+ u16 gen;
+
+ atomic64_t seqno ____cacheline_aligned;
+ refcount_t refcnt ____cacheline_aligned;
+
+} ____cacheline_aligned;
+
+/**
+ * struct tipc_crypto_stats - TIPC Crypto statistics
+ * @stat: array of crypto statistics
+ */
+struct tipc_crypto_stats {
+ unsigned int stat[MAX_STATS];
+};
+
+/**
+ * struct tipc_crypto - TIPC TX/RX crypto structure
+ * @net: struct net
+ * @node: TIPC node (RX)
+ * @aead: array of pointers to AEAD keys for encryption/decryption
+ * @peer_rx_active: replicated peer RX active key index
+ * @key_gen: TX/RX key generation
+ * @key: the key states
+ * @skey_mode: session key's mode
+ * @skey: received session key
+ * @wq: common workqueue on TX crypto
+ * @work: delayed work sched for TX/RX
+ * @key_distr: key distributing state
+ * @rekeying_intv: rekeying interval (in minutes)
+ * @stats: the crypto statistics
+ * @name: the crypto name
+ * @sndnxt: the per-peer sndnxt (TX)
+ * @timer1: general timer 1 (jiffies)
+ * @timer2: general timer 2 (jiffies)
+ * @working: the crypto is working or not
+ * @key_master: flag indicates if master key exists
+ * @legacy_user: flag indicates if a peer joins w/o master key (for bwd comp.)
+ * @nokey: no key indication
+ * @flags: combined flags field
+ * @lock: tipc_key lock
+ */
+struct tipc_crypto {
+ struct net *net;
+ struct tipc_node *node;
+ struct tipc_aead __rcu *aead[KEY_MAX + 1];
+ atomic_t peer_rx_active;
+ u16 key_gen;
+ struct tipc_key key;
+ u8 skey_mode;
+ struct tipc_aead_key *skey;
+ struct workqueue_struct *wq;
+ struct delayed_work work;
+#define KEY_DISTR_SCHED 1
+#define KEY_DISTR_COMPL 2
+ atomic_t key_distr;
+ u32 rekeying_intv;
+
+ struct tipc_crypto_stats __percpu *stats;
+ char name[48];
+
+ atomic64_t sndnxt ____cacheline_aligned;
+ unsigned long timer1;
+ unsigned long timer2;
+ union {
+ struct {
+ u8 working:1;
+ u8 key_master:1;
+ u8 legacy_user:1;
+ u8 nokey: 1;
+ };
+ u8 flags;
+ };
+ spinlock_t lock; /* crypto lock */
+
+} ____cacheline_aligned;
+
+/* struct tipc_crypto_tx_ctx - TX context for callbacks */
+struct tipc_crypto_tx_ctx {
+ struct tipc_aead *aead;
+ struct tipc_bearer *bearer;
+ struct tipc_media_addr dst;
+};
+
+/* struct tipc_crypto_rx_ctx - RX context for callbacks */
+struct tipc_crypto_rx_ctx {
+ struct tipc_aead *aead;
+ struct tipc_bearer *bearer;
+};
+
+static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead);
+static inline void tipc_aead_put(struct tipc_aead *aead);
+static void tipc_aead_free(struct rcu_head *rp);
+static int tipc_aead_users(struct tipc_aead __rcu *aead);
+static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim);
+static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim);
+static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val);
+static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead);
+static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
+ u8 mode);
+static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src);
+static void *tipc_aead_mem_alloc(struct crypto_aead *tfm,
+ unsigned int crypto_ctx_size,
+ u8 **iv, struct aead_request **req,
+ struct scatterlist **sg, int nsg);
+static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
+ struct tipc_bearer *b,
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode);
+static void tipc_aead_encrypt_done(void *data, int err);
+static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
+ struct sk_buff *skb, struct tipc_bearer *b);
+static void tipc_aead_decrypt_done(void *data, int err);
+static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr);
+static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead,
+ u8 tx_key, struct sk_buff *skb,
+ struct tipc_crypto *__rx);
+static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
+ u8 new_passive,
+ u8 new_active,
+ u8 new_pending);
+static int tipc_crypto_key_attach(struct tipc_crypto *c,
+ struct tipc_aead *aead, u8 pos,
+ bool master_key);
+static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending);
+static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
+ struct tipc_crypto *rx,
+ struct sk_buff *skb,
+ u8 tx_key);
+static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb);
+static int tipc_crypto_key_revoke(struct net *net, u8 tx_key);
+static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb,
+ struct tipc_bearer *b,
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode, u8 type);
+static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
+ struct tipc_bearer *b,
+ struct sk_buff **skb, int err);
+static void tipc_crypto_do_cmd(struct net *net, int cmd);
+static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf);
+static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
+ char *buf);
+static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey,
+ u16 gen, u8 mode, u32 dnode);
+static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr);
+static void tipc_crypto_work_tx(struct work_struct *work);
+static void tipc_crypto_work_rx(struct work_struct *work);
+static int tipc_aead_key_generate(struct tipc_aead_key *skey);
+
+#define is_tx(crypto) (!(crypto)->node)
+#define is_rx(crypto) (!is_tx(crypto))
+
+#define key_next(cur) ((cur) % KEY_MAX + 1)
+
+#define tipc_aead_rcu_ptr(rcu_ptr, lock) \
+ rcu_dereference_protected((rcu_ptr), lockdep_is_held(lock))
+
+#define tipc_aead_rcu_replace(rcu_ptr, ptr, lock) \
+do { \
+ struct tipc_aead *__tmp = rcu_dereference_protected((rcu_ptr), \
+ lockdep_is_held(lock)); \
+ rcu_assign_pointer((rcu_ptr), (ptr)); \
+ tipc_aead_put(__tmp); \
+} while (0)
+
+#define tipc_crypto_key_detach(rcu_ptr, lock) \
+ tipc_aead_rcu_replace((rcu_ptr), NULL, lock)
+
+/**
+ * tipc_aead_key_validate - Validate a AEAD user key
+ * @ukey: pointer to user key data
+ * @info: netlink info pointer
+ */
+int tipc_aead_key_validate(struct tipc_aead_key *ukey, struct genl_info *info)
+{
+ int keylen;
+
+ /* Check if algorithm exists */
+ if (unlikely(!crypto_has_alg(ukey->alg_name, 0, 0))) {
+ GENL_SET_ERR_MSG(info, "unable to load the algorithm (module existed?)");
+ return -ENODEV;
+ }
+
+ /* Currently, we only support the "gcm(aes)" cipher algorithm */
+ if (strcmp(ukey->alg_name, "gcm(aes)")) {
+ GENL_SET_ERR_MSG(info, "not supported yet the algorithm");
+ return -ENOTSUPP;
+ }
+
+ /* Check if key size is correct */
+ keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE;
+ if (unlikely(keylen != TIPC_AES_GCM_KEY_SIZE_128 &&
+ keylen != TIPC_AES_GCM_KEY_SIZE_192 &&
+ keylen != TIPC_AES_GCM_KEY_SIZE_256)) {
+ GENL_SET_ERR_MSG(info, "incorrect key length (20, 28 or 36 octets?)");
+ return -EKEYREJECTED;
+ }
+
+ return 0;
+}
+
+/**
+ * tipc_aead_key_generate - Generate new session key
+ * @skey: input/output key with new content
+ *
+ * Return: 0 in case of success, otherwise < 0
+ */
+static int tipc_aead_key_generate(struct tipc_aead_key *skey)
+{
+ int rc = 0;
+
+ /* Fill the key's content with a random value via RNG cipher */
+ rc = crypto_get_default_rng();
+ if (likely(!rc)) {
+ rc = crypto_rng_get_bytes(crypto_default_rng, skey->key,
+ skey->keylen);
+ crypto_put_default_rng();
+ }
+
+ return rc;
+}
+
+static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead)
+{
+ struct tipc_aead *tmp;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(aead);
+ if (unlikely(!tmp || !refcount_inc_not_zero(&tmp->refcnt)))
+ tmp = NULL;
+ rcu_read_unlock();
+
+ return tmp;
+}
+
+static inline void tipc_aead_put(struct tipc_aead *aead)
+{
+ if (aead && refcount_dec_and_test(&aead->refcnt))
+ call_rcu(&aead->rcu, tipc_aead_free);
+}
+
+/**
+ * tipc_aead_free - Release AEAD key incl. all the TFMs in the list
+ * @rp: rcu head pointer
+ */
+static void tipc_aead_free(struct rcu_head *rp)
+{
+ struct tipc_aead *aead = container_of(rp, struct tipc_aead, rcu);
+ struct tipc_tfm *tfm_entry, *head, *tmp;
+
+ if (aead->cloned) {
+ tipc_aead_put(aead->cloned);
+ } else {
+ head = *get_cpu_ptr(aead->tfm_entry);
+ put_cpu_ptr(aead->tfm_entry);
+ list_for_each_entry_safe(tfm_entry, tmp, &head->list, list) {
+ crypto_free_aead(tfm_entry->tfm);
+ list_del(&tfm_entry->list);
+ kfree(tfm_entry);
+ }
+ /* Free the head */
+ crypto_free_aead(head->tfm);
+ list_del(&head->list);
+ kfree(head);
+ }
+ free_percpu(aead->tfm_entry);
+ kfree_sensitive(aead->key);
+ kfree_sensitive(aead);
+}
+
+static int tipc_aead_users(struct tipc_aead __rcu *aead)
+{
+ struct tipc_aead *tmp;
+ int users = 0;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(aead);
+ if (tmp)
+ users = atomic_read(&tmp->users);
+ rcu_read_unlock();
+
+ return users;
+}
+
+static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim)
+{
+ struct tipc_aead *tmp;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(aead);
+ if (tmp)
+ atomic_add_unless(&tmp->users, 1, lim);
+ rcu_read_unlock();
+}
+
+static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim)
+{
+ struct tipc_aead *tmp;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(aead);
+ if (tmp)
+ atomic_add_unless(&rcu_dereference(aead)->users, -1, lim);
+ rcu_read_unlock();
+}
+
+static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val)
+{
+ struct tipc_aead *tmp;
+ int cur;
+
+ rcu_read_lock();
+ tmp = rcu_dereference(aead);
+ if (tmp) {
+ do {
+ cur = atomic_read(&tmp->users);
+ if (cur == val)
+ break;
+ } while (atomic_cmpxchg(&tmp->users, cur, val) != cur);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * tipc_aead_tfm_next - Move TFM entry to the next one in list and return it
+ * @aead: the AEAD key pointer
+ */
+static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead)
+{
+ struct tipc_tfm **tfm_entry;
+ struct crypto_aead *tfm;
+
+ tfm_entry = get_cpu_ptr(aead->tfm_entry);
+ *tfm_entry = list_next_entry(*tfm_entry, list);
+ tfm = (*tfm_entry)->tfm;
+ put_cpu_ptr(tfm_entry);
+
+ return tfm;
+}
+
+/**
+ * tipc_aead_init - Initiate TIPC AEAD
+ * @aead: returned new TIPC AEAD key handle pointer
+ * @ukey: pointer to user key data
+ * @mode: the key mode
+ *
+ * Allocate a (list of) new cipher transformation (TFM) with the specific user
+ * key data if valid. The number of the allocated TFMs can be set via the sysfs
+ * "net/tipc/max_tfms" first.
+ * Also, all the other AEAD data are also initialized.
+ *
+ * Return: 0 if the initiation is successful, otherwise: < 0
+ */
+static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
+ u8 mode)
+{
+ struct tipc_tfm *tfm_entry, *head;
+ struct crypto_aead *tfm;
+ struct tipc_aead *tmp;
+ int keylen, err, cpu;
+ int tfm_cnt = 0;
+
+ if (unlikely(*aead))
+ return -EEXIST;
+
+ /* Allocate a new AEAD */
+ tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
+ if (unlikely(!tmp))
+ return -ENOMEM;
+
+ /* The key consists of two parts: [AES-KEY][SALT] */
+ keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE;
+
+ /* Allocate per-cpu TFM entry pointer */
+ tmp->tfm_entry = alloc_percpu(struct tipc_tfm *);
+ if (!tmp->tfm_entry) {
+ kfree_sensitive(tmp);
+ return -ENOMEM;
+ }
+
+ /* Make a list of TFMs with the user key data */
+ do {
+ tfm = crypto_alloc_aead(ukey->alg_name, 0, 0);
+ if (IS_ERR(tfm)) {
+ err = PTR_ERR(tfm);
+ break;
+ }
+
+ if (unlikely(!tfm_cnt &&
+ crypto_aead_ivsize(tfm) != TIPC_AES_GCM_IV_SIZE)) {
+ crypto_free_aead(tfm);
+ err = -ENOTSUPP;
+ break;
+ }
+
+ err = crypto_aead_setauthsize(tfm, TIPC_AES_GCM_TAG_SIZE);
+ err |= crypto_aead_setkey(tfm, ukey->key, keylen);
+ if (unlikely(err)) {
+ crypto_free_aead(tfm);
+ break;
+ }
+
+ tfm_entry = kmalloc(sizeof(*tfm_entry), GFP_KERNEL);
+ if (unlikely(!tfm_entry)) {
+ crypto_free_aead(tfm);
+ err = -ENOMEM;
+ break;
+ }
+ INIT_LIST_HEAD(&tfm_entry->list);
+ tfm_entry->tfm = tfm;
+
+ /* First entry? */
+ if (!tfm_cnt) {
+ head = tfm_entry;
+ for_each_possible_cpu(cpu) {
+ *per_cpu_ptr(tmp->tfm_entry, cpu) = head;
+ }
+ } else {
+ list_add_tail(&tfm_entry->list, &head->list);
+ }
+
+ } while (++tfm_cnt < sysctl_tipc_max_tfms);
+
+ /* Not any TFM is allocated? */
+ if (!tfm_cnt) {
+ free_percpu(tmp->tfm_entry);
+ kfree_sensitive(tmp);
+ return err;
+ }
+
+ /* Form a hex string of some last bytes as the key's hint */
+ bin2hex(tmp->hint, ukey->key + keylen - TIPC_AEAD_HINT_LEN,
+ TIPC_AEAD_HINT_LEN);
+
+ /* Initialize the other data */
+ tmp->mode = mode;
+ tmp->cloned = NULL;
+ tmp->authsize = TIPC_AES_GCM_TAG_SIZE;
+ tmp->key = kmemdup(ukey, tipc_aead_key_size(ukey), GFP_KERNEL);
+ if (!tmp->key) {
+ tipc_aead_free(&tmp->rcu);
+ return -ENOMEM;
+ }
+ memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE);
+ atomic_set(&tmp->users, 0);
+ atomic64_set(&tmp->seqno, 0);
+ refcount_set(&tmp->refcnt, 1);
+
+ *aead = tmp;
+ return 0;
+}
+
+/**
+ * tipc_aead_clone - Clone a TIPC AEAD key
+ * @dst: dest key for the cloning
+ * @src: source key to clone from
+ *
+ * Make a "copy" of the source AEAD key data to the dest, the TFMs list is
+ * common for the keys.
+ * A reference to the source is hold in the "cloned" pointer for the later
+ * freeing purposes.
+ *
+ * Note: this must be done in cluster-key mode only!
+ * Return: 0 in case of success, otherwise < 0
+ */
+static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src)
+{
+ struct tipc_aead *aead;
+ int cpu;
+
+ if (!src)
+ return -ENOKEY;
+
+ if (src->mode != CLUSTER_KEY)
+ return -EINVAL;
+
+ if (unlikely(*dst))
+ return -EEXIST;
+
+ aead = kzalloc(sizeof(*aead), GFP_ATOMIC);
+ if (unlikely(!aead))
+ return -ENOMEM;
+
+ aead->tfm_entry = alloc_percpu_gfp(struct tipc_tfm *, GFP_ATOMIC);
+ if (unlikely(!aead->tfm_entry)) {
+ kfree_sensitive(aead);
+ return -ENOMEM;
+ }
+
+ for_each_possible_cpu(cpu) {
+ *per_cpu_ptr(aead->tfm_entry, cpu) =
+ *per_cpu_ptr(src->tfm_entry, cpu);
+ }
+
+ memcpy(aead->hint, src->hint, sizeof(src->hint));
+ aead->mode = src->mode;
+ aead->salt = src->salt;
+ aead->authsize = src->authsize;
+ atomic_set(&aead->users, 0);
+ atomic64_set(&aead->seqno, 0);
+ refcount_set(&aead->refcnt, 1);
+
+ WARN_ON(!refcount_inc_not_zero(&src->refcnt));
+ aead->cloned = src;
+
+ *dst = aead;
+ return 0;
+}
+
+/**
+ * tipc_aead_mem_alloc - Allocate memory for AEAD request operations
+ * @tfm: cipher handle to be registered with the request
+ * @crypto_ctx_size: size of crypto context for callback
+ * @iv: returned pointer to IV data
+ * @req: returned pointer to AEAD request data
+ * @sg: returned pointer to SG lists
+ * @nsg: number of SG lists to be allocated
+ *
+ * Allocate memory to store the crypto context data, AEAD request, IV and SG
+ * lists, the memory layout is as follows:
+ * crypto_ctx || iv || aead_req || sg[]
+ *
+ * Return: the pointer to the memory areas in case of success, otherwise NULL
+ */
+static void *tipc_aead_mem_alloc(struct crypto_aead *tfm,
+ unsigned int crypto_ctx_size,
+ u8 **iv, struct aead_request **req,
+ struct scatterlist **sg, int nsg)
+{
+ unsigned int iv_size, req_size;
+ unsigned int len;
+ u8 *mem;
+
+ iv_size = crypto_aead_ivsize(tfm);
+ req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
+
+ len = crypto_ctx_size;
+ len += iv_size;
+ len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1);
+ len = ALIGN(len, crypto_tfm_ctx_alignment());
+ len += req_size;
+ len = ALIGN(len, __alignof__(struct scatterlist));
+ len += nsg * sizeof(**sg);
+
+ mem = kmalloc(len, GFP_ATOMIC);
+ if (!mem)
+ return NULL;
+
+ *iv = (u8 *)PTR_ALIGN(mem + crypto_ctx_size,
+ crypto_aead_alignmask(tfm) + 1);
+ *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size,
+ crypto_tfm_ctx_alignment());
+ *sg = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size,
+ __alignof__(struct scatterlist));
+
+ return (void *)mem;
+}
+
+/**
+ * tipc_aead_encrypt - Encrypt a message
+ * @aead: TIPC AEAD key for the message encryption
+ * @skb: the input/output skb
+ * @b: TIPC bearer where the message will be delivered after the encryption
+ * @dst: the destination media address
+ * @__dnode: TIPC dest node if "known"
+ *
+ * Return:
+ * * 0 : if the encryption has completed
+ * * -EINPROGRESS/-EBUSY : if a callback will be performed
+ * * < 0 : the encryption has failed
+ */
+static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
+ struct tipc_bearer *b,
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode)
+{
+ struct crypto_aead *tfm = tipc_aead_tfm_next(aead);
+ struct tipc_crypto_tx_ctx *tx_ctx;
+ struct aead_request *req;
+ struct sk_buff *trailer;
+ struct scatterlist *sg;
+ struct tipc_ehdr *ehdr;
+ int ehsz, len, tailen, nsg, rc;
+ void *ctx;
+ u32 salt;
+ u8 *iv;
+
+ /* Make sure message len at least 4-byte aligned */
+ len = ALIGN(skb->len, 4);
+ tailen = len - skb->len + aead->authsize;
+
+ /* Expand skb tail for authentication tag:
+ * As for simplicity, we'd have made sure skb having enough tailroom
+ * for authentication tag @skb allocation. Even when skb is nonlinear
+ * but there is no frag_list, it should be still fine!
+ * Otherwise, we must cow it to be a writable buffer with the tailroom.
+ */
+ SKB_LINEAR_ASSERT(skb);
+ if (tailen > skb_tailroom(skb)) {
+ pr_debug("TX(): skb tailroom is not enough: %d, requires: %d\n",
+ skb_tailroom(skb), tailen);
+ }
+
+ nsg = skb_cow_data(skb, tailen, &trailer);
+ if (unlikely(nsg < 0)) {
+ pr_err("TX: skb_cow_data() returned %d\n", nsg);
+ return nsg;
+ }
+
+ pskb_put(skb, trailer, tailen);
+
+ /* Allocate memory for the AEAD operation */
+ ctx = tipc_aead_mem_alloc(tfm, sizeof(*tx_ctx), &iv, &req, &sg, nsg);
+ if (unlikely(!ctx))
+ return -ENOMEM;
+ TIPC_SKB_CB(skb)->crypto_ctx = ctx;
+
+ /* Map skb to the sg lists */
+ sg_init_table(sg, nsg);
+ rc = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (unlikely(rc < 0)) {
+ pr_err("TX: skb_to_sgvec() returned %d, nsg %d!\n", rc, nsg);
+ goto exit;
+ }
+
+ /* Prepare IV: [SALT (4 octets)][SEQNO (8 octets)]
+ * In case we're in cluster-key mode, SALT is varied by xor-ing with
+ * the source address (or w0 of id), otherwise with the dest address
+ * if dest is known.
+ */
+ ehdr = (struct tipc_ehdr *)skb->data;
+ salt = aead->salt;
+ if (aead->mode == CLUSTER_KEY)
+ salt ^= __be32_to_cpu(ehdr->addr);
+ else if (__dnode)
+ salt ^= tipc_node_get_addr(__dnode);
+ memcpy(iv, &salt, 4);
+ memcpy(iv + 4, (u8 *)&ehdr->seqno, 8);
+
+ /* Prepare request */
+ ehsz = tipc_ehdr_size(ehdr);
+ aead_request_set_tfm(req, tfm);
+ aead_request_set_ad(req, ehsz);
+ aead_request_set_crypt(req, sg, sg, len - ehsz, iv);
+
+ /* Set callback function & data */
+ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tipc_aead_encrypt_done, skb);
+ tx_ctx = (struct tipc_crypto_tx_ctx *)ctx;
+ tx_ctx->aead = aead;
+ tx_ctx->bearer = b;
+ memcpy(&tx_ctx->dst, dst, sizeof(*dst));
+
+ /* Hold bearer */
+ if (unlikely(!tipc_bearer_hold(b))) {
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ /* Get net to avoid freed tipc_crypto when delete namespace */
+ if (!maybe_get_net(aead->crypto->net)) {
+ tipc_bearer_put(b);
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ /* Now, do encrypt */
+ rc = crypto_aead_encrypt(req);
+ if (rc == -EINPROGRESS || rc == -EBUSY)
+ return rc;
+
+ tipc_bearer_put(b);
+ put_net(aead->crypto->net);
+
+exit:
+ kfree(ctx);
+ TIPC_SKB_CB(skb)->crypto_ctx = NULL;
+ return rc;
+}
+
+static void tipc_aead_encrypt_done(void *data, int err)
+{
+ struct sk_buff *skb = data;
+ struct tipc_crypto_tx_ctx *tx_ctx = TIPC_SKB_CB(skb)->crypto_ctx;
+ struct tipc_bearer *b = tx_ctx->bearer;
+ struct tipc_aead *aead = tx_ctx->aead;
+ struct tipc_crypto *tx = aead->crypto;
+ struct net *net = tx->net;
+
+ switch (err) {
+ case 0:
+ this_cpu_inc(tx->stats->stat[STAT_ASYNC_OK]);
+ rcu_read_lock();
+ if (likely(test_bit(0, &b->up)))
+ b->media->send_msg(net, skb, b, &tx_ctx->dst);
+ else
+ kfree_skb(skb);
+ rcu_read_unlock();
+ break;
+ case -EINPROGRESS:
+ return;
+ default:
+ this_cpu_inc(tx->stats->stat[STAT_ASYNC_NOK]);
+ kfree_skb(skb);
+ break;
+ }
+
+ kfree(tx_ctx);
+ tipc_bearer_put(b);
+ tipc_aead_put(aead);
+ put_net(net);
+}
+
+/**
+ * tipc_aead_decrypt - Decrypt an encrypted message
+ * @net: struct net
+ * @aead: TIPC AEAD for the message decryption
+ * @skb: the input/output skb
+ * @b: TIPC bearer where the message has been received
+ *
+ * Return:
+ * * 0 : if the decryption has completed
+ * * -EINPROGRESS/-EBUSY : if a callback will be performed
+ * * < 0 : the decryption has failed
+ */
+static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
+ struct sk_buff *skb, struct tipc_bearer *b)
+{
+ struct tipc_crypto_rx_ctx *rx_ctx;
+ struct aead_request *req;
+ struct crypto_aead *tfm;
+ struct sk_buff *unused;
+ struct scatterlist *sg;
+ struct tipc_ehdr *ehdr;
+ int ehsz, nsg, rc;
+ void *ctx;
+ u32 salt;
+ u8 *iv;
+
+ if (unlikely(!aead))
+ return -ENOKEY;
+
+ nsg = skb_cow_data(skb, 0, &unused);
+ if (unlikely(nsg < 0)) {
+ pr_err("RX: skb_cow_data() returned %d\n", nsg);
+ return nsg;
+ }
+
+ /* Allocate memory for the AEAD operation */
+ tfm = tipc_aead_tfm_next(aead);
+ ctx = tipc_aead_mem_alloc(tfm, sizeof(*rx_ctx), &iv, &req, &sg, nsg);
+ if (unlikely(!ctx))
+ return -ENOMEM;
+ TIPC_SKB_CB(skb)->crypto_ctx = ctx;
+
+ /* Map skb to the sg lists */
+ sg_init_table(sg, nsg);
+ rc = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (unlikely(rc < 0)) {
+ pr_err("RX: skb_to_sgvec() returned %d, nsg %d\n", rc, nsg);
+ goto exit;
+ }
+
+ /* Reconstruct IV: */
+ ehdr = (struct tipc_ehdr *)skb->data;
+ salt = aead->salt;
+ if (aead->mode == CLUSTER_KEY)
+ salt ^= __be32_to_cpu(ehdr->addr);
+ else if (ehdr->destined)
+ salt ^= tipc_own_addr(net);
+ memcpy(iv, &salt, 4);
+ memcpy(iv + 4, (u8 *)&ehdr->seqno, 8);
+
+ /* Prepare request */
+ ehsz = tipc_ehdr_size(ehdr);
+ aead_request_set_tfm(req, tfm);
+ aead_request_set_ad(req, ehsz);
+ aead_request_set_crypt(req, sg, sg, skb->len - ehsz, iv);
+
+ /* Set callback function & data */
+ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ tipc_aead_decrypt_done, skb);
+ rx_ctx = (struct tipc_crypto_rx_ctx *)ctx;
+ rx_ctx->aead = aead;
+ rx_ctx->bearer = b;
+
+ /* Hold bearer */
+ if (unlikely(!tipc_bearer_hold(b))) {
+ rc = -ENODEV;
+ goto exit;
+ }
+
+ /* Now, do decrypt */
+ rc = crypto_aead_decrypt(req);
+ if (rc == -EINPROGRESS || rc == -EBUSY)
+ return rc;
+
+ tipc_bearer_put(b);
+
+exit:
+ kfree(ctx);
+ TIPC_SKB_CB(skb)->crypto_ctx = NULL;
+ return rc;
+}
+
+static void tipc_aead_decrypt_done(void *data, int err)
+{
+ struct sk_buff *skb = data;
+ struct tipc_crypto_rx_ctx *rx_ctx = TIPC_SKB_CB(skb)->crypto_ctx;
+ struct tipc_bearer *b = rx_ctx->bearer;
+ struct tipc_aead *aead = rx_ctx->aead;
+ struct tipc_crypto_stats __percpu *stats = aead->crypto->stats;
+ struct net *net = aead->crypto->net;
+
+ switch (err) {
+ case 0:
+ this_cpu_inc(stats->stat[STAT_ASYNC_OK]);
+ break;
+ case -EINPROGRESS:
+ return;
+ default:
+ this_cpu_inc(stats->stat[STAT_ASYNC_NOK]);
+ break;
+ }
+
+ kfree(rx_ctx);
+ tipc_crypto_rcv_complete(net, aead, b, &skb, err);
+ if (likely(skb)) {
+ if (likely(test_bit(0, &b->up)))
+ tipc_rcv(net, skb, b);
+ else
+ kfree_skb(skb);
+ }
+
+ tipc_bearer_put(b);
+}
+
+static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr)
+{
+ return (ehdr->user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE;
+}
+
+/**
+ * tipc_ehdr_validate - Validate an encryption message
+ * @skb: the message buffer
+ *
+ * Return: "true" if this is a valid encryption message, otherwise "false"
+ */
+bool tipc_ehdr_validate(struct sk_buff *skb)
+{
+ struct tipc_ehdr *ehdr;
+ int ehsz;
+
+ if (unlikely(!pskb_may_pull(skb, EHDR_MIN_SIZE)))
+ return false;
+
+ ehdr = (struct tipc_ehdr *)skb->data;
+ if (unlikely(ehdr->version != TIPC_EVERSION))
+ return false;
+ ehsz = tipc_ehdr_size(ehdr);
+ if (unlikely(!pskb_may_pull(skb, ehsz)))
+ return false;
+ if (unlikely(skb->len <= ehsz + TIPC_AES_GCM_TAG_SIZE))
+ return false;
+
+ return true;
+}
+
+/**
+ * tipc_ehdr_build - Build TIPC encryption message header
+ * @net: struct net
+ * @aead: TX AEAD key to be used for the message encryption
+ * @tx_key: key id used for the message encryption
+ * @skb: input/output message skb
+ * @__rx: RX crypto handle if dest is "known"
+ *
+ * Return: the header size if the building is successful, otherwise < 0
+ */
+static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead,
+ u8 tx_key, struct sk_buff *skb,
+ struct tipc_crypto *__rx)
+{
+ struct tipc_msg *hdr = buf_msg(skb);
+ struct tipc_ehdr *ehdr;
+ u32 user = msg_user(hdr);
+ u64 seqno;
+ int ehsz;
+
+ /* Make room for encryption header */
+ ehsz = (user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE;
+ WARN_ON(skb_headroom(skb) < ehsz);
+ ehdr = (struct tipc_ehdr *)skb_push(skb, ehsz);
+
+ /* Obtain a seqno first:
+ * Use the key seqno (= cluster wise) if dest is unknown or we're in
+ * cluster key mode, otherwise it's better for a per-peer seqno!
+ */
+ if (!__rx || aead->mode == CLUSTER_KEY)
+ seqno = atomic64_inc_return(&aead->seqno);
+ else
+ seqno = atomic64_inc_return(&__rx->sndnxt);
+
+ /* Revoke the key if seqno is wrapped around */
+ if (unlikely(!seqno))
+ return tipc_crypto_key_revoke(net, tx_key);
+
+ /* Word 1-2 */
+ ehdr->seqno = cpu_to_be64(seqno);
+
+ /* Words 0, 3- */
+ ehdr->version = TIPC_EVERSION;
+ ehdr->user = 0;
+ ehdr->keepalive = 0;
+ ehdr->tx_key = tx_key;
+ ehdr->destined = (__rx) ? 1 : 0;
+ ehdr->rx_key_active = (__rx) ? __rx->key.active : 0;
+ ehdr->rx_nokey = (__rx) ? __rx->nokey : 0;
+ ehdr->master_key = aead->crypto->key_master;
+ ehdr->reserved_1 = 0;
+ ehdr->reserved_2 = 0;
+
+ switch (user) {
+ case LINK_CONFIG:
+ ehdr->user = LINK_CONFIG;
+ memcpy(ehdr->id, tipc_own_id(net), NODE_ID_LEN);
+ break;
+ default:
+ if (user == LINK_PROTOCOL && msg_type(hdr) == STATE_MSG) {
+ ehdr->user = LINK_PROTOCOL;
+ ehdr->keepalive = msg_is_keepalive(hdr);
+ }
+ ehdr->addr = hdr->hdr[3];
+ break;
+ }
+
+ return ehsz;
+}
+
+static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
+ u8 new_passive,
+ u8 new_active,
+ u8 new_pending)
+{
+ struct tipc_key old = c->key;
+ char buf[32];
+
+ c->key.keys = ((new_passive & KEY_MASK) << (KEY_BITS * 2)) |
+ ((new_active & KEY_MASK) << (KEY_BITS)) |
+ ((new_pending & KEY_MASK));
+
+ pr_debug("%s: key changing %s ::%pS\n", c->name,
+ tipc_key_change_dump(old, c->key, buf),
+ __builtin_return_address(0));
+}
+
+/**
+ * tipc_crypto_key_init - Initiate a new user / AEAD key
+ * @c: TIPC crypto to which new key is attached
+ * @ukey: the user key
+ * @mode: the key mode (CLUSTER_KEY or PER_NODE_KEY)
+ * @master_key: specify this is a cluster master key
+ *
+ * A new TIPC AEAD key will be allocated and initiated with the specified user
+ * key, then attached to the TIPC crypto.
+ *
+ * Return: new key id in case of success, otherwise: < 0
+ */
+int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey,
+ u8 mode, bool master_key)
+{
+ struct tipc_aead *aead = NULL;
+ int rc = 0;
+
+ /* Initiate with the new user key */
+ rc = tipc_aead_init(&aead, ukey, mode);
+
+ /* Attach it to the crypto */
+ if (likely(!rc)) {
+ rc = tipc_crypto_key_attach(c, aead, 0, master_key);
+ if (rc < 0)
+ tipc_aead_free(&aead->rcu);
+ }
+
+ return rc;
+}
+
+/**
+ * tipc_crypto_key_attach - Attach a new AEAD key to TIPC crypto
+ * @c: TIPC crypto to which the new AEAD key is attached
+ * @aead: the new AEAD key pointer
+ * @pos: desired slot in the crypto key array, = 0 if any!
+ * @master_key: specify this is a cluster master key
+ *
+ * Return: new key id in case of success, otherwise: -EBUSY
+ */
+static int tipc_crypto_key_attach(struct tipc_crypto *c,
+ struct tipc_aead *aead, u8 pos,
+ bool master_key)
+{
+ struct tipc_key key;
+ int rc = -EBUSY;
+ u8 new_key;
+
+ spin_lock_bh(&c->lock);
+ key = c->key;
+ if (master_key) {
+ new_key = KEY_MASTER;
+ goto attach;
+ }
+ if (key.active && key.passive)
+ goto exit;
+ if (key.pending) {
+ if (tipc_aead_users(c->aead[key.pending]) > 0)
+ goto exit;
+ /* if (pos): ok with replacing, will be aligned when needed */
+ /* Replace it */
+ new_key = key.pending;
+ } else {
+ if (pos) {
+ if (key.active && pos != key_next(key.active)) {
+ key.passive = pos;
+ new_key = pos;
+ goto attach;
+ } else if (!key.active && !key.passive) {
+ key.pending = pos;
+ new_key = pos;
+ goto attach;
+ }
+ }
+ key.pending = key_next(key.active ?: key.passive);
+ new_key = key.pending;
+ }
+
+attach:
+ aead->crypto = c;
+ aead->gen = (is_tx(c)) ? ++c->key_gen : c->key_gen;
+ tipc_aead_rcu_replace(c->aead[new_key], aead, &c->lock);
+ if (likely(c->key.keys != key.keys))
+ tipc_crypto_key_set_state(c, key.passive, key.active,
+ key.pending);
+ c->working = 1;
+ c->nokey = 0;
+ c->key_master |= master_key;
+ rc = new_key;
+
+exit:
+ spin_unlock_bh(&c->lock);
+ return rc;
+}
+
+void tipc_crypto_key_flush(struct tipc_crypto *c)
+{
+ struct tipc_crypto *tx, *rx;
+ int k;
+
+ spin_lock_bh(&c->lock);
+ if (is_rx(c)) {
+ /* Try to cancel pending work */
+ rx = c;
+ tx = tipc_net(rx->net)->crypto_tx;
+ if (cancel_delayed_work(&rx->work)) {
+ kfree(rx->skey);
+ rx->skey = NULL;
+ atomic_xchg(&rx->key_distr, 0);
+ tipc_node_put(rx->node);
+ }
+ /* RX stopping => decrease TX key users if any */
+ k = atomic_xchg(&rx->peer_rx_active, 0);
+ if (k) {
+ tipc_aead_users_dec(tx->aead[k], 0);
+ /* Mark the point TX key users changed */
+ tx->timer1 = jiffies;
+ }
+ }
+
+ c->flags = 0;
+ tipc_crypto_key_set_state(c, 0, 0, 0);
+ for (k = KEY_MIN; k <= KEY_MAX; k++)
+ tipc_crypto_key_detach(c->aead[k], &c->lock);
+ atomic64_set(&c->sndnxt, 0);
+ spin_unlock_bh(&c->lock);
+}
+
+/**
+ * tipc_crypto_key_try_align - Align RX keys if possible
+ * @rx: RX crypto handle
+ * @new_pending: new pending slot if aligned (= TX key from peer)
+ *
+ * Peer has used an unknown key slot, this only happens when peer has left and
+ * rejoned, or we are newcomer.
+ * That means, there must be no active key but a pending key at unaligned slot.
+ * If so, we try to move the pending key to the new slot.
+ * Note: A potential passive key can exist, it will be shifted correspondingly!
+ *
+ * Return: "true" if key is successfully aligned, otherwise "false"
+ */
+static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending)
+{
+ struct tipc_aead *tmp1, *tmp2 = NULL;
+ struct tipc_key key;
+ bool aligned = false;
+ u8 new_passive = 0;
+ int x;
+
+ spin_lock(&rx->lock);
+ key = rx->key;
+ if (key.pending == new_pending) {
+ aligned = true;
+ goto exit;
+ }
+ if (key.active)
+ goto exit;
+ if (!key.pending)
+ goto exit;
+ if (tipc_aead_users(rx->aead[key.pending]) > 0)
+ goto exit;
+
+ /* Try to "isolate" this pending key first */
+ tmp1 = tipc_aead_rcu_ptr(rx->aead[key.pending], &rx->lock);
+ if (!refcount_dec_if_one(&tmp1->refcnt))
+ goto exit;
+ rcu_assign_pointer(rx->aead[key.pending], NULL);
+
+ /* Move passive key if any */
+ if (key.passive) {
+ tmp2 = rcu_replace_pointer(rx->aead[key.passive], tmp2, lockdep_is_held(&rx->lock));
+ x = (key.passive - key.pending + new_pending) % KEY_MAX;
+ new_passive = (x <= 0) ? x + KEY_MAX : x;
+ }
+
+ /* Re-allocate the key(s) */
+ tipc_crypto_key_set_state(rx, new_passive, 0, new_pending);
+ rcu_assign_pointer(rx->aead[new_pending], tmp1);
+ if (new_passive)
+ rcu_assign_pointer(rx->aead[new_passive], tmp2);
+ refcount_set(&tmp1->refcnt, 1);
+ aligned = true;
+ pr_info_ratelimited("%s: key[%d] -> key[%d]\n", rx->name, key.pending,
+ new_pending);
+
+exit:
+ spin_unlock(&rx->lock);
+ return aligned;
+}
+
+/**
+ * tipc_crypto_key_pick_tx - Pick one TX key for message decryption
+ * @tx: TX crypto handle
+ * @rx: RX crypto handle (can be NULL)
+ * @skb: the message skb which will be decrypted later
+ * @tx_key: peer TX key id
+ *
+ * This function looks up the existing TX keys and pick one which is suitable
+ * for the message decryption, that must be a cluster key and not used before
+ * on the same message (i.e. recursive).
+ *
+ * Return: the TX AEAD key handle in case of success, otherwise NULL
+ */
+static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
+ struct tipc_crypto *rx,
+ struct sk_buff *skb,
+ u8 tx_key)
+{
+ struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(skb);
+ struct tipc_aead *aead = NULL;
+ struct tipc_key key = tx->key;
+ u8 k, i = 0;
+
+ /* Initialize data if not yet */
+ if (!skb_cb->tx_clone_deferred) {
+ skb_cb->tx_clone_deferred = 1;
+ memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx));
+ }
+
+ skb_cb->tx_clone_ctx.rx = rx;
+ if (++skb_cb->tx_clone_ctx.recurs > 2)
+ return NULL;
+
+ /* Pick one TX key */
+ spin_lock(&tx->lock);
+ if (tx_key == KEY_MASTER) {
+ aead = tipc_aead_rcu_ptr(tx->aead[KEY_MASTER], &tx->lock);
+ goto done;
+ }
+ do {
+ k = (i == 0) ? key.pending :
+ ((i == 1) ? key.active : key.passive);
+ if (!k)
+ continue;
+ aead = tipc_aead_rcu_ptr(tx->aead[k], &tx->lock);
+ if (!aead)
+ continue;
+ if (aead->mode != CLUSTER_KEY ||
+ aead == skb_cb->tx_clone_ctx.last) {
+ aead = NULL;
+ continue;
+ }
+ /* Ok, found one cluster key */
+ skb_cb->tx_clone_ctx.last = aead;
+ WARN_ON(skb->next);
+ skb->next = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!skb->next))
+ pr_warn("Failed to clone skb for next round if any\n");
+ break;
+ } while (++i < 3);
+
+done:
+ if (likely(aead))
+ WARN_ON(!refcount_inc_not_zero(&aead->refcnt));
+ spin_unlock(&tx->lock);
+
+ return aead;
+}
+
+/**
+ * tipc_crypto_key_synch: Synch own key data according to peer key status
+ * @rx: RX crypto handle
+ * @skb: TIPCv2 message buffer (incl. the ehdr from peer)
+ *
+ * This function updates the peer node related data as the peer RX active key
+ * has changed, so the number of TX keys' users on this node are increased and
+ * decreased correspondingly.
+ *
+ * It also considers if peer has no key, then we need to make own master key
+ * (if any) taking over i.e. starting grace period and also trigger key
+ * distributing process.
+ *
+ * The "per-peer" sndnxt is also reset when the peer key has switched.
+ */
+static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb)
+{
+ struct tipc_ehdr *ehdr = (struct tipc_ehdr *)skb_network_header(skb);
+ struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
+ struct tipc_msg *hdr = buf_msg(skb);
+ u32 self = tipc_own_addr(rx->net);
+ u8 cur, new;
+ unsigned long delay;
+
+ /* Update RX 'key_master' flag according to peer, also mark "legacy" if
+ * a peer has no master key.
+ */
+ rx->key_master = ehdr->master_key;
+ if (!rx->key_master)
+ tx->legacy_user = 1;
+
+ /* For later cases, apply only if message is destined to this node */
+ if (!ehdr->destined || msg_short(hdr) || msg_destnode(hdr) != self)
+ return;
+
+ /* Case 1: Peer has no keys, let's make master key take over */
+ if (ehdr->rx_nokey) {
+ /* Set or extend grace period */
+ tx->timer2 = jiffies;
+ /* Schedule key distributing for the peer if not yet */
+ if (tx->key.keys &&
+ !atomic_cmpxchg(&rx->key_distr, 0, KEY_DISTR_SCHED)) {
+ get_random_bytes(&delay, 2);
+ delay %= 5;
+ delay = msecs_to_jiffies(500 * ++delay);
+ if (queue_delayed_work(tx->wq, &rx->work, delay))
+ tipc_node_get(rx->node);
+ }
+ } else {
+ /* Cancel a pending key distributing if any */
+ atomic_xchg(&rx->key_distr, 0);
+ }
+
+ /* Case 2: Peer RX active key has changed, let's update own TX users */
+ cur = atomic_read(&rx->peer_rx_active);
+ new = ehdr->rx_key_active;
+ if (tx->key.keys &&
+ cur != new &&
+ atomic_cmpxchg(&rx->peer_rx_active, cur, new) == cur) {
+ if (new)
+ tipc_aead_users_inc(tx->aead[new], INT_MAX);
+ if (cur)
+ tipc_aead_users_dec(tx->aead[cur], 0);
+
+ atomic64_set(&rx->sndnxt, 0);
+ /* Mark the point TX key users changed */
+ tx->timer1 = jiffies;
+
+ pr_debug("%s: key users changed %d-- %d++, peer %s\n",
+ tx->name, cur, new, rx->name);
+ }
+}
+
+static int tipc_crypto_key_revoke(struct net *net, u8 tx_key)
+{
+ struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
+ struct tipc_key key;
+
+ spin_lock_bh(&tx->lock);
+ key = tx->key;
+ WARN_ON(!key.active || tx_key != key.active);
+
+ /* Free the active key */
+ tipc_crypto_key_set_state(tx, key.passive, 0, key.pending);
+ tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
+ spin_unlock_bh(&tx->lock);
+
+ pr_warn("%s: key is revoked\n", tx->name);
+ return -EKEYREVOKED;
+}
+
+int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
+ struct tipc_node *node)
+{
+ struct tipc_crypto *c;
+
+ if (*crypto)
+ return -EEXIST;
+
+ /* Allocate crypto */
+ c = kzalloc(sizeof(*c), GFP_ATOMIC);
+ if (!c)
+ return -ENOMEM;
+
+ /* Allocate workqueue on TX */
+ if (!node) {
+ c->wq = alloc_ordered_workqueue("tipc_crypto", 0);
+ if (!c->wq) {
+ kfree(c);
+ return -ENOMEM;
+ }
+ }
+
+ /* Allocate statistic structure */
+ c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
+ if (!c->stats) {
+ if (c->wq)
+ destroy_workqueue(c->wq);
+ kfree_sensitive(c);
+ return -ENOMEM;
+ }
+
+ c->flags = 0;
+ c->net = net;
+ c->node = node;
+ get_random_bytes(&c->key_gen, 2);
+ tipc_crypto_key_set_state(c, 0, 0, 0);
+ atomic_set(&c->key_distr, 0);
+ atomic_set(&c->peer_rx_active, 0);
+ atomic64_set(&c->sndnxt, 0);
+ c->timer1 = jiffies;
+ c->timer2 = jiffies;
+ c->rekeying_intv = TIPC_REKEYING_INTV_DEF;
+ spin_lock_init(&c->lock);
+ scnprintf(c->name, 48, "%s(%s)", (is_rx(c)) ? "RX" : "TX",
+ (is_rx(c)) ? tipc_node_get_id_str(c->node) :
+ tipc_own_id_string(c->net));
+
+ if (is_rx(c))
+ INIT_DELAYED_WORK(&c->work, tipc_crypto_work_rx);
+ else
+ INIT_DELAYED_WORK(&c->work, tipc_crypto_work_tx);
+
+ *crypto = c;
+ return 0;
+}
+
+void tipc_crypto_stop(struct tipc_crypto **crypto)
+{
+ struct tipc_crypto *c = *crypto;
+ u8 k;
+
+ if (!c)
+ return;
+
+ /* Flush any queued works & destroy wq */
+ if (is_tx(c)) {
+ c->rekeying_intv = 0;
+ cancel_delayed_work_sync(&c->work);
+ destroy_workqueue(c->wq);
+ }
+
+ /* Release AEAD keys */
+ rcu_read_lock();
+ for (k = KEY_MIN; k <= KEY_MAX; k++)
+ tipc_aead_put(rcu_dereference(c->aead[k]));
+ rcu_read_unlock();
+ pr_debug("%s: has been stopped\n", c->name);
+
+ /* Free this crypto statistics */
+ free_percpu(c->stats);
+
+ *crypto = NULL;
+ kfree_sensitive(c);
+}
+
+void tipc_crypto_timeout(struct tipc_crypto *rx)
+{
+ struct tipc_net *tn = tipc_net(rx->net);
+ struct tipc_crypto *tx = tn->crypto_tx;
+ struct tipc_key key;
+ int cmd;
+
+ /* TX pending: taking all users & stable -> active */
+ spin_lock(&tx->lock);
+ key = tx->key;
+ if (key.active && tipc_aead_users(tx->aead[key.active]) > 0)
+ goto s1;
+ if (!key.pending || tipc_aead_users(tx->aead[key.pending]) <= 0)
+ goto s1;
+ if (time_before(jiffies, tx->timer1 + TIPC_TX_LASTING_TIME))
+ goto s1;
+
+ tipc_crypto_key_set_state(tx, key.passive, key.pending, 0);
+ if (key.active)
+ tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
+ this_cpu_inc(tx->stats->stat[STAT_SWITCHES]);
+ pr_info("%s: key[%d] is activated\n", tx->name, key.pending);
+
+s1:
+ spin_unlock(&tx->lock);
+
+ /* RX pending: having user -> active */
+ spin_lock(&rx->lock);
+ key = rx->key;
+ if (!key.pending || tipc_aead_users(rx->aead[key.pending]) <= 0)
+ goto s2;
+
+ if (key.active)
+ key.passive = key.active;
+ key.active = key.pending;
+ rx->timer2 = jiffies;
+ tipc_crypto_key_set_state(rx, key.passive, key.active, 0);
+ this_cpu_inc(rx->stats->stat[STAT_SWITCHES]);
+ pr_info("%s: key[%d] is activated\n", rx->name, key.pending);
+ goto s5;
+
+s2:
+ /* RX pending: not working -> remove */
+ if (!key.pending || tipc_aead_users(rx->aead[key.pending]) > -10)
+ goto s3;
+
+ tipc_crypto_key_set_state(rx, key.passive, key.active, 0);
+ tipc_crypto_key_detach(rx->aead[key.pending], &rx->lock);
+ pr_debug("%s: key[%d] is removed\n", rx->name, key.pending);
+ goto s5;
+
+s3:
+ /* RX active: timed out or no user -> pending */
+ if (!key.active)
+ goto s4;
+ if (time_before(jiffies, rx->timer1 + TIPC_RX_ACTIVE_LIM) &&
+ tipc_aead_users(rx->aead[key.active]) > 0)
+ goto s4;
+
+ if (key.pending)
+ key.passive = key.active;
+ else
+ key.pending = key.active;
+ rx->timer2 = jiffies;
+ tipc_crypto_key_set_state(rx, key.passive, 0, key.pending);
+ tipc_aead_users_set(rx->aead[key.pending], 0);
+ pr_debug("%s: key[%d] is deactivated\n", rx->name, key.active);
+ goto s5;
+
+s4:
+ /* RX passive: outdated or not working -> free */
+ if (!key.passive)
+ goto s5;
+ if (time_before(jiffies, rx->timer2 + TIPC_RX_PASSIVE_LIM) &&
+ tipc_aead_users(rx->aead[key.passive]) > -10)
+ goto s5;
+
+ tipc_crypto_key_set_state(rx, 0, key.active, key.pending);
+ tipc_crypto_key_detach(rx->aead[key.passive], &rx->lock);
+ pr_debug("%s: key[%d] is freed\n", rx->name, key.passive);
+
+s5:
+ spin_unlock(&rx->lock);
+
+ /* Relax it here, the flag will be set again if it really is, but only
+ * when we are not in grace period for safety!
+ */
+ if (time_after(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD))
+ tx->legacy_user = 0;
+
+ /* Limit max_tfms & do debug commands if needed */
+ if (likely(sysctl_tipc_max_tfms <= TIPC_MAX_TFMS_LIM))
+ return;
+
+ cmd = sysctl_tipc_max_tfms;
+ sysctl_tipc_max_tfms = TIPC_MAX_TFMS_DEF;
+ tipc_crypto_do_cmd(rx->net, cmd);
+}
+
+static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb,
+ struct tipc_bearer *b,
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode, u8 type)
+{
+ struct sk_buff *skb;
+
+ skb = skb_clone(_skb, GFP_ATOMIC);
+ if (skb) {
+ TIPC_SKB_CB(skb)->xmit_type = type;
+ tipc_crypto_xmit(net, &skb, b, dst, __dnode);
+ if (skb)
+ b->media->send_msg(net, skb, b, dst);
+ }
+}
+
+/**
+ * tipc_crypto_xmit - Build & encrypt TIPC message for xmit
+ * @net: struct net
+ * @skb: input/output message skb pointer
+ * @b: bearer used for xmit later
+ * @dst: destination media address
+ * @__dnode: destination node for reference if any
+ *
+ * First, build an encryption message header on the top of the message, then
+ * encrypt the original TIPC message by using the pending, master or active
+ * key with this preference order.
+ * If the encryption is successful, the encrypted skb is returned directly or
+ * via the callback.
+ * Otherwise, the skb is freed!
+ *
+ * Return:
+ * * 0 : the encryption has succeeded (or no encryption)
+ * * -EINPROGRESS/-EBUSY : the encryption is ongoing, a callback will be made
+ * * -ENOKEK : the encryption has failed due to no key
+ * * -EKEYREVOKED : the encryption has failed due to key revoked
+ * * -ENOMEM : the encryption has failed due to no memory
+ * * < 0 : the encryption has failed due to other reasons
+ */
+int tipc_crypto_xmit(struct net *net, struct sk_buff **skb,
+ struct tipc_bearer *b, struct tipc_media_addr *dst,
+ struct tipc_node *__dnode)
+{
+ struct tipc_crypto *__rx = tipc_node_crypto_rx(__dnode);
+ struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
+ struct tipc_crypto_stats __percpu *stats = tx->stats;
+ struct tipc_msg *hdr = buf_msg(*skb);
+ struct tipc_key key = tx->key;
+ struct tipc_aead *aead = NULL;
+ u32 user = msg_user(hdr);
+ u32 type = msg_type(hdr);
+ int rc = -ENOKEY;
+ u8 tx_key = 0;
+
+ /* No encryption? */
+ if (!tx->working)
+ return 0;
+
+ /* Pending key if peer has active on it or probing time */
+ if (unlikely(key.pending)) {
+ tx_key = key.pending;
+ if (!tx->key_master && !key.active)
+ goto encrypt;
+ if (__rx && atomic_read(&__rx->peer_rx_active) == tx_key)
+ goto encrypt;
+ if (TIPC_SKB_CB(*skb)->xmit_type == SKB_PROBING) {
+ pr_debug("%s: probing for key[%d]\n", tx->name,
+ key.pending);
+ goto encrypt;
+ }
+ if (user == LINK_CONFIG || user == LINK_PROTOCOL)
+ tipc_crypto_clone_msg(net, *skb, b, dst, __dnode,
+ SKB_PROBING);
+ }
+
+ /* Master key if this is a *vital* message or in grace period */
+ if (tx->key_master) {
+ tx_key = KEY_MASTER;
+ if (!key.active)
+ goto encrypt;
+ if (TIPC_SKB_CB(*skb)->xmit_type == SKB_GRACING) {
+ pr_debug("%s: gracing for msg (%d %d)\n", tx->name,
+ user, type);
+ goto encrypt;
+ }
+ if (user == LINK_CONFIG ||
+ (user == LINK_PROTOCOL && type == RESET_MSG) ||
+ (user == MSG_CRYPTO && type == KEY_DISTR_MSG) ||
+ time_before(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) {
+ if (__rx && __rx->key_master &&
+ !atomic_read(&__rx->peer_rx_active))
+ goto encrypt;
+ if (!__rx) {
+ if (likely(!tx->legacy_user))
+ goto encrypt;
+ tipc_crypto_clone_msg(net, *skb, b, dst,
+ __dnode, SKB_GRACING);
+ }
+ }
+ }
+
+ /* Else, use the active key if any */
+ if (likely(key.active)) {
+ tx_key = key.active;
+ goto encrypt;
+ }
+
+ goto exit;
+
+encrypt:
+ aead = tipc_aead_get(tx->aead[tx_key]);
+ if (unlikely(!aead))
+ goto exit;
+ rc = tipc_ehdr_build(net, aead, tx_key, *skb, __rx);
+ if (likely(rc > 0))
+ rc = tipc_aead_encrypt(aead, *skb, b, dst, __dnode);
+
+exit:
+ switch (rc) {
+ case 0:
+ this_cpu_inc(stats->stat[STAT_OK]);
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ this_cpu_inc(stats->stat[STAT_ASYNC]);
+ *skb = NULL;
+ return rc;
+ default:
+ this_cpu_inc(stats->stat[STAT_NOK]);
+ if (rc == -ENOKEY)
+ this_cpu_inc(stats->stat[STAT_NOKEYS]);
+ else if (rc == -EKEYREVOKED)
+ this_cpu_inc(stats->stat[STAT_BADKEYS]);
+ kfree_skb(*skb);
+ *skb = NULL;
+ break;
+ }
+
+ tipc_aead_put(aead);
+ return rc;
+}
+
+/**
+ * tipc_crypto_rcv - Decrypt an encrypted TIPC message from peer
+ * @net: struct net
+ * @rx: RX crypto handle
+ * @skb: input/output message skb pointer
+ * @b: bearer where the message has been received
+ *
+ * If the decryption is successful, the decrypted skb is returned directly or
+ * as the callback, the encryption header and auth tag will be trimmed out
+ * before forwarding to tipc_rcv() via the tipc_crypto_rcv_complete().
+ * Otherwise, the skb will be freed!
+ * Note: RX key(s) can be re-aligned, or in case of no key suitable, TX
+ * cluster key(s) can be taken for decryption (- recursive).
+ *
+ * Return:
+ * * 0 : the decryption has successfully completed
+ * * -EINPROGRESS/-EBUSY : the decryption is ongoing, a callback will be made
+ * * -ENOKEY : the decryption has failed due to no key
+ * * -EBADMSG : the decryption has failed due to bad message
+ * * -ENOMEM : the decryption has failed due to no memory
+ * * < 0 : the decryption has failed due to other reasons
+ */
+int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx,
+ struct sk_buff **skb, struct tipc_bearer *b)
+{
+ struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
+ struct tipc_crypto_stats __percpu *stats;
+ struct tipc_aead *aead = NULL;
+ struct tipc_key key;
+ int rc = -ENOKEY;
+ u8 tx_key, n;
+
+ tx_key = ((struct tipc_ehdr *)(*skb)->data)->tx_key;
+
+ /* New peer?
+ * Let's try with TX key (i.e. cluster mode) & verify the skb first!
+ */
+ if (unlikely(!rx || tx_key == KEY_MASTER))
+ goto pick_tx;
+
+ /* Pick RX key according to TX key if any */
+ key = rx->key;
+ if (tx_key == key.active || tx_key == key.pending ||
+ tx_key == key.passive)
+ goto decrypt;
+
+ /* Unknown key, let's try to align RX key(s) */
+ if (tipc_crypto_key_try_align(rx, tx_key))
+ goto decrypt;
+
+pick_tx:
+ /* No key suitable? Try to pick one from TX... */
+ aead = tipc_crypto_key_pick_tx(tx, rx, *skb, tx_key);
+ if (aead)
+ goto decrypt;
+ goto exit;
+
+decrypt:
+ rcu_read_lock();
+ if (!aead)
+ aead = tipc_aead_get(rx->aead[tx_key]);
+ rc = tipc_aead_decrypt(net, aead, *skb, b);
+ rcu_read_unlock();
+
+exit:
+ stats = ((rx) ?: tx)->stats;
+ switch (rc) {
+ case 0:
+ this_cpu_inc(stats->stat[STAT_OK]);
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ this_cpu_inc(stats->stat[STAT_ASYNC]);
+ *skb = NULL;
+ return rc;
+ default:
+ this_cpu_inc(stats->stat[STAT_NOK]);
+ if (rc == -ENOKEY) {
+ kfree_skb(*skb);
+ *skb = NULL;
+ if (rx) {
+ /* Mark rx->nokey only if we dont have a
+ * pending received session key, nor a newer
+ * one i.e. in the next slot.
+ */
+ n = key_next(tx_key);
+ rx->nokey = !(rx->skey ||
+ rcu_access_pointer(rx->aead[n]));
+ pr_debug_ratelimited("%s: nokey %d, key %d/%x\n",
+ rx->name, rx->nokey,
+ tx_key, rx->key.keys);
+ tipc_node_put(rx->node);
+ }
+ this_cpu_inc(stats->stat[STAT_NOKEYS]);
+ return rc;
+ } else if (rc == -EBADMSG) {
+ this_cpu_inc(stats->stat[STAT_BADMSGS]);
+ }
+ break;
+ }
+
+ tipc_crypto_rcv_complete(net, aead, b, skb, rc);
+ return rc;
+}
+
+static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
+ struct tipc_bearer *b,
+ struct sk_buff **skb, int err)
+{
+ struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(*skb);
+ struct tipc_crypto *rx = aead->crypto;
+ struct tipc_aead *tmp = NULL;
+ struct tipc_ehdr *ehdr;
+ struct tipc_node *n;
+
+ /* Is this completed by TX? */
+ if (unlikely(is_tx(aead->crypto))) {
+ rx = skb_cb->tx_clone_ctx.rx;
+ pr_debug("TX->RX(%s): err %d, aead %p, skb->next %p, flags %x\n",
+ (rx) ? tipc_node_get_id_str(rx->node) : "-", err, aead,
+ (*skb)->next, skb_cb->flags);
+ pr_debug("skb_cb [recurs %d, last %p], tx->aead [%p %p %p]\n",
+ skb_cb->tx_clone_ctx.recurs, skb_cb->tx_clone_ctx.last,
+ aead->crypto->aead[1], aead->crypto->aead[2],
+ aead->crypto->aead[3]);
+ if (unlikely(err)) {
+ if (err == -EBADMSG && (*skb)->next)
+ tipc_rcv(net, (*skb)->next, b);
+ goto free_skb;
+ }
+
+ if (likely((*skb)->next)) {
+ kfree_skb((*skb)->next);
+ (*skb)->next = NULL;
+ }
+ ehdr = (struct tipc_ehdr *)(*skb)->data;
+ if (!rx) {
+ WARN_ON(ehdr->user != LINK_CONFIG);
+ n = tipc_node_create(net, 0, ehdr->id, 0xffffu, 0,
+ true);
+ rx = tipc_node_crypto_rx(n);
+ if (unlikely(!rx))
+ goto free_skb;
+ }
+
+ /* Ignore cloning if it was TX master key */
+ if (ehdr->tx_key == KEY_MASTER)
+ goto rcv;
+ if (tipc_aead_clone(&tmp, aead) < 0)
+ goto rcv;
+ WARN_ON(!refcount_inc_not_zero(&tmp->refcnt));
+ if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key, false) < 0) {
+ tipc_aead_free(&tmp->rcu);
+ goto rcv;
+ }
+ tipc_aead_put(aead);
+ aead = tmp;
+ }
+
+ if (unlikely(err)) {
+ tipc_aead_users_dec((struct tipc_aead __force __rcu *)aead, INT_MIN);
+ goto free_skb;
+ }
+
+ /* Set the RX key's user */
+ tipc_aead_users_set((struct tipc_aead __force __rcu *)aead, 1);
+
+ /* Mark this point, RX works */
+ rx->timer1 = jiffies;
+
+rcv:
+ /* Remove ehdr & auth. tag prior to tipc_rcv() */
+ ehdr = (struct tipc_ehdr *)(*skb)->data;
+
+ /* Mark this point, RX passive still works */
+ if (rx->key.passive && ehdr->tx_key == rx->key.passive)
+ rx->timer2 = jiffies;
+
+ skb_reset_network_header(*skb);
+ skb_pull(*skb, tipc_ehdr_size(ehdr));
+ if (pskb_trim(*skb, (*skb)->len - aead->authsize))
+ goto free_skb;
+
+ /* Validate TIPCv2 message */
+ if (unlikely(!tipc_msg_validate(skb))) {
+ pr_err_ratelimited("Packet dropped after decryption!\n");
+ goto free_skb;
+ }
+
+ /* Ok, everything's fine, try to synch own keys according to peers' */
+ tipc_crypto_key_synch(rx, *skb);
+
+ /* Re-fetch skb cb as skb might be changed in tipc_msg_validate */
+ skb_cb = TIPC_SKB_CB(*skb);
+
+ /* Mark skb decrypted */
+ skb_cb->decrypted = 1;
+
+ /* Clear clone cxt if any */
+ if (likely(!skb_cb->tx_clone_deferred))
+ goto exit;
+ skb_cb->tx_clone_deferred = 0;
+ memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx));
+ goto exit;
+
+free_skb:
+ kfree_skb(*skb);
+ *skb = NULL;
+
+exit:
+ tipc_aead_put(aead);
+ if (rx)
+ tipc_node_put(rx->node);
+}
+
+static void tipc_crypto_do_cmd(struct net *net, int cmd)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_crypto *tx = tn->crypto_tx, *rx;
+ struct list_head *p;
+ unsigned int stat;
+ int i, j, cpu;
+ char buf[200];
+
+ /* Currently only one command is supported */
+ switch (cmd) {
+ case 0xfff1:
+ goto print_stats;
+ default:
+ return;
+ }
+
+print_stats:
+ /* Print a header */
+ pr_info("\n=============== TIPC Crypto Statistics ===============\n\n");
+
+ /* Print key status */
+ pr_info("Key status:\n");
+ pr_info("TX(%7.7s)\n%s", tipc_own_id_string(net),
+ tipc_crypto_key_dump(tx, buf));
+
+ rcu_read_lock();
+ for (p = tn->node_list.next; p != &tn->node_list; p = p->next) {
+ rx = tipc_node_crypto_rx_by_list(p);
+ pr_info("RX(%7.7s)\n%s", tipc_node_get_id_str(rx->node),
+ tipc_crypto_key_dump(rx, buf));
+ }
+ rcu_read_unlock();
+
+ /* Print crypto statistics */
+ for (i = 0, j = 0; i < MAX_STATS; i++)
+ j += scnprintf(buf + j, 200 - j, "|%11s ", hstats[i]);
+ pr_info("Counter %s", buf);
+
+ memset(buf, '-', 115);
+ buf[115] = '\0';
+ pr_info("%s\n", buf);
+
+ j = scnprintf(buf, 200, "TX(%7.7s) ", tipc_own_id_string(net));
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < MAX_STATS; i++) {
+ stat = per_cpu_ptr(tx->stats, cpu)->stat[i];
+ j += scnprintf(buf + j, 200 - j, "|%11d ", stat);
+ }
+ pr_info("%s", buf);
+ j = scnprintf(buf, 200, "%12s", " ");
+ }
+
+ rcu_read_lock();
+ for (p = tn->node_list.next; p != &tn->node_list; p = p->next) {
+ rx = tipc_node_crypto_rx_by_list(p);
+ j = scnprintf(buf, 200, "RX(%7.7s) ",
+ tipc_node_get_id_str(rx->node));
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < MAX_STATS; i++) {
+ stat = per_cpu_ptr(rx->stats, cpu)->stat[i];
+ j += scnprintf(buf + j, 200 - j, "|%11d ",
+ stat);
+ }
+ pr_info("%s", buf);
+ j = scnprintf(buf, 200, "%12s", " ");
+ }
+ }
+ rcu_read_unlock();
+
+ pr_info("\n======================== Done ========================\n");
+}
+
+static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf)
+{
+ struct tipc_key key = c->key;
+ struct tipc_aead *aead;
+ int k, i = 0;
+ char *s;
+
+ for (k = KEY_MIN; k <= KEY_MAX; k++) {
+ if (k == KEY_MASTER) {
+ if (is_rx(c))
+ continue;
+ if (time_before(jiffies,
+ c->timer2 + TIPC_TX_GRACE_PERIOD))
+ s = "ACT";
+ else
+ s = "PAS";
+ } else {
+ if (k == key.passive)
+ s = "PAS";
+ else if (k == key.active)
+ s = "ACT";
+ else if (k == key.pending)
+ s = "PEN";
+ else
+ s = "-";
+ }
+ i += scnprintf(buf + i, 200 - i, "\tKey%d: %s", k, s);
+
+ rcu_read_lock();
+ aead = rcu_dereference(c->aead[k]);
+ if (aead)
+ i += scnprintf(buf + i, 200 - i,
+ "{\"0x...%s\", \"%s\"}/%d:%d",
+ aead->hint,
+ (aead->mode == CLUSTER_KEY) ? "c" : "p",
+ atomic_read(&aead->users),
+ refcount_read(&aead->refcnt));
+ rcu_read_unlock();
+ i += scnprintf(buf + i, 200 - i, "\n");
+ }
+
+ if (is_rx(c))
+ i += scnprintf(buf + i, 200 - i, "\tPeer RX active: %d\n",
+ atomic_read(&c->peer_rx_active));
+
+ return buf;
+}
+
+static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
+ char *buf)
+{
+ struct tipc_key *key = &old;
+ int k, i = 0;
+ char *s;
+
+ /* Output format: "[%s %s %s] -> [%s %s %s]", max len = 32 */
+again:
+ i += scnprintf(buf + i, 32 - i, "[");
+ for (k = KEY_1; k <= KEY_3; k++) {
+ if (k == key->passive)
+ s = "pas";
+ else if (k == key->active)
+ s = "act";
+ else if (k == key->pending)
+ s = "pen";
+ else
+ s = "-";
+ i += scnprintf(buf + i, 32 - i,
+ (k != KEY_3) ? "%s " : "%s", s);
+ }
+ if (key != &new) {
+ i += scnprintf(buf + i, 32 - i, "] -> ");
+ key = &new;
+ goto again;
+ }
+ i += scnprintf(buf + i, 32 - i, "]");
+ return buf;
+}
+
+/**
+ * tipc_crypto_msg_rcv - Common 'MSG_CRYPTO' processing point
+ * @net: the struct net
+ * @skb: the receiving message buffer
+ */
+void tipc_crypto_msg_rcv(struct net *net, struct sk_buff *skb)
+{
+ struct tipc_crypto *rx;
+ struct tipc_msg *hdr;
+
+ if (unlikely(skb_linearize(skb)))
+ goto exit;
+
+ hdr = buf_msg(skb);
+ rx = tipc_node_crypto_rx_by_addr(net, msg_prevnode(hdr));
+ if (unlikely(!rx))
+ goto exit;
+
+ switch (msg_type(hdr)) {
+ case KEY_DISTR_MSG:
+ if (tipc_crypto_key_rcv(rx, hdr))
+ goto exit;
+ break;
+ default:
+ break;
+ }
+
+ tipc_node_put(rx->node);
+
+exit:
+ kfree_skb(skb);
+}
+
+/**
+ * tipc_crypto_key_distr - Distribute a TX key
+ * @tx: the TX crypto
+ * @key: the key's index
+ * @dest: the destination tipc node, = NULL if distributing to all nodes
+ *
+ * Return: 0 in case of success, otherwise < 0
+ */
+int tipc_crypto_key_distr(struct tipc_crypto *tx, u8 key,
+ struct tipc_node *dest)
+{
+ struct tipc_aead *aead;
+ u32 dnode = tipc_node_get_addr(dest);
+ int rc = -ENOKEY;
+
+ if (!sysctl_tipc_key_exchange_enabled)
+ return 0;
+
+ if (key) {
+ rcu_read_lock();
+ aead = tipc_aead_get(tx->aead[key]);
+ if (likely(aead)) {
+ rc = tipc_crypto_key_xmit(tx->net, aead->key,
+ aead->gen, aead->mode,
+ dnode);
+ tipc_aead_put(aead);
+ }
+ rcu_read_unlock();
+ }
+
+ return rc;
+}
+
+/**
+ * tipc_crypto_key_xmit - Send a session key
+ * @net: the struct net
+ * @skey: the session key to be sent
+ * @gen: the key's generation
+ * @mode: the key's mode
+ * @dnode: the destination node address, = 0 if broadcasting to all nodes
+ *
+ * The session key 'skey' is packed in a TIPC v2 'MSG_CRYPTO/KEY_DISTR_MSG'
+ * as its data section, then xmit-ed through the uc/bc link.
+ *
+ * Return: 0 in case of success, otherwise < 0
+ */
+static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey,
+ u16 gen, u8 mode, u32 dnode)
+{
+ struct sk_buff_head pkts;
+ struct tipc_msg *hdr;
+ struct sk_buff *skb;
+ u16 size, cong_link_cnt;
+ u8 *data;
+ int rc;
+
+ size = tipc_aead_key_size(skey);
+ skb = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = buf_msg(skb);
+ tipc_msg_init(tipc_own_addr(net), hdr, MSG_CRYPTO, KEY_DISTR_MSG,
+ INT_H_SIZE, dnode);
+ msg_set_size(hdr, INT_H_SIZE + size);
+ msg_set_key_gen(hdr, gen);
+ msg_set_key_mode(hdr, mode);
+
+ data = msg_data(hdr);
+ *((__be32 *)(data + TIPC_AEAD_ALG_NAME)) = htonl(skey->keylen);
+ memcpy(data, skey->alg_name, TIPC_AEAD_ALG_NAME);
+ memcpy(data + TIPC_AEAD_ALG_NAME + sizeof(__be32), skey->key,
+ skey->keylen);
+
+ __skb_queue_head_init(&pkts);
+ __skb_queue_tail(&pkts, skb);
+ if (dnode)
+ rc = tipc_node_xmit(net, &pkts, dnode, 0);
+ else
+ rc = tipc_bcast_xmit(net, &pkts, &cong_link_cnt);
+
+ return rc;
+}
+
+/**
+ * tipc_crypto_key_rcv - Receive a session key
+ * @rx: the RX crypto
+ * @hdr: the TIPC v2 message incl. the receiving session key in its data
+ *
+ * This function retrieves the session key in the message from peer, then
+ * schedules a RX work to attach the key to the corresponding RX crypto.
+ *
+ * Return: "true" if the key has been scheduled for attaching, otherwise
+ * "false".
+ */
+static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr)
+{
+ struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
+ struct tipc_aead_key *skey = NULL;
+ u16 key_gen = msg_key_gen(hdr);
+ u32 size = msg_data_sz(hdr);
+ u8 *data = msg_data(hdr);
+ unsigned int keylen;
+
+ /* Verify whether the size can exist in the packet */
+ if (unlikely(size < sizeof(struct tipc_aead_key) + TIPC_AEAD_KEYLEN_MIN)) {
+ pr_debug("%s: message data size is too small\n", rx->name);
+ goto exit;
+ }
+
+ keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME)));
+
+ /* Verify the supplied size values */
+ if (unlikely(keylen > TIPC_AEAD_KEY_SIZE_MAX ||
+ size != keylen + sizeof(struct tipc_aead_key))) {
+ pr_debug("%s: invalid MSG_CRYPTO key size\n", rx->name);
+ goto exit;
+ }
+
+ spin_lock(&rx->lock);
+ if (unlikely(rx->skey || (key_gen == rx->key_gen && rx->key.keys))) {
+ pr_err("%s: key existed <%p>, gen %d vs %d\n", rx->name,
+ rx->skey, key_gen, rx->key_gen);
+ goto exit_unlock;
+ }
+
+ /* Allocate memory for the key */
+ skey = kmalloc(size, GFP_ATOMIC);
+ if (unlikely(!skey)) {
+ pr_err("%s: unable to allocate memory for skey\n", rx->name);
+ goto exit_unlock;
+ }
+
+ /* Copy key from msg data */
+ skey->keylen = keylen;
+ memcpy(skey->alg_name, data, TIPC_AEAD_ALG_NAME);
+ memcpy(skey->key, data + TIPC_AEAD_ALG_NAME + sizeof(__be32),
+ skey->keylen);
+
+ rx->key_gen = key_gen;
+ rx->skey_mode = msg_key_mode(hdr);
+ rx->skey = skey;
+ rx->nokey = 0;
+ mb(); /* for nokey flag */
+
+exit_unlock:
+ spin_unlock(&rx->lock);
+
+exit:
+ /* Schedule the key attaching on this crypto */
+ if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0)))
+ return true;
+
+ return false;
+}
+
+/**
+ * tipc_crypto_work_rx - Scheduled RX works handler
+ * @work: the struct RX work
+ *
+ * The function processes the previous scheduled works i.e. distributing TX key
+ * or attaching a received session key on RX crypto.
+ */
+static void tipc_crypto_work_rx(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tipc_crypto *rx = container_of(dwork, struct tipc_crypto, work);
+ struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
+ unsigned long delay = msecs_to_jiffies(5000);
+ bool resched = false;
+ u8 key;
+ int rc;
+
+ /* Case 1: Distribute TX key to peer if scheduled */
+ if (atomic_cmpxchg(&rx->key_distr,
+ KEY_DISTR_SCHED,
+ KEY_DISTR_COMPL) == KEY_DISTR_SCHED) {
+ /* Always pick the newest one for distributing */
+ key = tx->key.pending ?: tx->key.active;
+ rc = tipc_crypto_key_distr(tx, key, rx->node);
+ if (unlikely(rc))
+ pr_warn("%s: unable to distr key[%d] to %s, err %d\n",
+ tx->name, key, tipc_node_get_id_str(rx->node),
+ rc);
+
+ /* Sched for key_distr releasing */
+ resched = true;
+ } else {
+ atomic_cmpxchg(&rx->key_distr, KEY_DISTR_COMPL, 0);
+ }
+
+ /* Case 2: Attach a pending received session key from peer if any */
+ if (rx->skey) {
+ rc = tipc_crypto_key_init(rx, rx->skey, rx->skey_mode, false);
+ if (unlikely(rc < 0))
+ pr_warn("%s: unable to attach received skey, err %d\n",
+ rx->name, rc);
+ switch (rc) {
+ case -EBUSY:
+ case -ENOMEM:
+ /* Resched the key attaching */
+ resched = true;
+ break;
+ default:
+ synchronize_rcu();
+ kfree(rx->skey);
+ rx->skey = NULL;
+ break;
+ }
+ }
+
+ if (resched && queue_delayed_work(tx->wq, &rx->work, delay))
+ return;
+
+ tipc_node_put(rx->node);
+}
+
+/**
+ * tipc_crypto_rekeying_sched - (Re)schedule rekeying w/o new interval
+ * @tx: TX crypto
+ * @changed: if the rekeying needs to be rescheduled with new interval
+ * @new_intv: new rekeying interval (when "changed" = true)
+ */
+void tipc_crypto_rekeying_sched(struct tipc_crypto *tx, bool changed,
+ u32 new_intv)
+{
+ unsigned long delay;
+ bool now = false;
+
+ if (changed) {
+ if (new_intv == TIPC_REKEYING_NOW)
+ now = true;
+ else
+ tx->rekeying_intv = new_intv;
+ cancel_delayed_work_sync(&tx->work);
+ }
+
+ if (tx->rekeying_intv || now) {
+ delay = (now) ? 0 : tx->rekeying_intv * 60 * 1000;
+ queue_delayed_work(tx->wq, &tx->work, msecs_to_jiffies(delay));
+ }
+}
+
+/**
+ * tipc_crypto_work_tx - Scheduled TX works handler
+ * @work: the struct TX work
+ *
+ * The function processes the previous scheduled work, i.e. key rekeying, by
+ * generating a new session key based on current one, then attaching it to the
+ * TX crypto and finally distributing it to peers. It also re-schedules the
+ * rekeying if needed.
+ */
+static void tipc_crypto_work_tx(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tipc_crypto *tx = container_of(dwork, struct tipc_crypto, work);
+ struct tipc_aead_key *skey = NULL;
+ struct tipc_key key = tx->key;
+ struct tipc_aead *aead;
+ int rc = -ENOMEM;
+
+ if (unlikely(key.pending))
+ goto resched;
+
+ /* Take current key as a template */
+ rcu_read_lock();
+ aead = rcu_dereference(tx->aead[key.active ?: KEY_MASTER]);
+ if (unlikely(!aead)) {
+ rcu_read_unlock();
+ /* At least one key should exist for securing */
+ return;
+ }
+
+ /* Lets duplicate it first */
+ skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC);
+ rcu_read_unlock();
+
+ /* Now, generate new key, initiate & distribute it */
+ if (likely(skey)) {
+ rc = tipc_aead_key_generate(skey) ?:
+ tipc_crypto_key_init(tx, skey, PER_NODE_KEY, false);
+ if (likely(rc > 0))
+ rc = tipc_crypto_key_distr(tx, rc, NULL);
+ kfree_sensitive(skey);
+ }
+
+ if (unlikely(rc))
+ pr_warn_ratelimited("%s: rekeying returns %d\n", tx->name, rc);
+
+resched:
+ /* Re-schedule rekeying if any */
+ tipc_crypto_rekeying_sched(tx, false, 0);
+}
diff --git a/net/tipc/crypto.h b/net/tipc/crypto.h
new file mode 100644
index 000000000000..ce7d4cc8a9e0
--- /dev/null
+++ b/net/tipc/crypto.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * net/tipc/crypto.h: Include file for TIPC crypto
+ *
+ * Copyright (c) 2019, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef CONFIG_TIPC_CRYPTO
+#ifndef _TIPC_CRYPTO_H
+#define _TIPC_CRYPTO_H
+
+#include "core.h"
+#include "node.h"
+#include "msg.h"
+#include "bearer.h"
+
+#define TIPC_EVERSION 7
+
+/* AEAD aes(gcm) */
+#define TIPC_AES_GCM_KEY_SIZE_128 16
+#define TIPC_AES_GCM_KEY_SIZE_192 24
+#define TIPC_AES_GCM_KEY_SIZE_256 32
+
+#define TIPC_AES_GCM_SALT_SIZE 4
+#define TIPC_AES_GCM_IV_SIZE 12
+#define TIPC_AES_GCM_TAG_SIZE 16
+
+/*
+ * TIPC crypto modes:
+ * - CLUSTER_KEY:
+ * One single key is used for both TX & RX in all nodes in the cluster.
+ * - PER_NODE_KEY:
+ * Each nodes in the cluster has one TX key, for RX a node needs to know
+ * its peers' TX key for the decryption of messages from those nodes.
+ */
+enum {
+ CLUSTER_KEY = 1,
+ PER_NODE_KEY = (1 << 1),
+};
+
+extern int sysctl_tipc_max_tfms __read_mostly;
+extern int sysctl_tipc_key_exchange_enabled __read_mostly;
+
+/*
+ * TIPC encryption message format:
+ *
+ * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
+ * 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w0:|Ver=7| User |D|TX |RX |K|M|N| Rsvd |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w1:| Seqno |
+ * w2:| (8 octets) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w3:\ Prevnode \
+ * / (4 or 16 octets) /
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * \ \
+ * / Encrypted complete TIPC V2 header and user data /
+ * \ \
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | |
+ * | AuthTag |
+ * | (16 octets) |
+ * | |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Word0:
+ * Ver : = 7 i.e. TIPC encryption message version
+ * User : = 7 (for LINK_PROTOCOL); = 13 (for LINK_CONFIG) or = 0
+ * D : The destined bit i.e. the message's destination node is
+ * "known" or not at the message encryption
+ * TX : TX key used for the message encryption
+ * RX : Currently RX active key corresponding to the destination
+ * node's TX key (when the "D" bit is set)
+ * K : Keep-alive bit (for RPS, LINK_PROTOCOL/STATE_MSG only)
+ * M : Bit indicates if sender has master key
+ * N : Bit indicates if sender has no RX keys corresponding to the
+ * receiver's TX (when the "D" bit is set)
+ * Rsvd : Reserved bit, field
+ * Word1-2:
+ * Seqno : The 64-bit sequence number of the encrypted message, also
+ * part of the nonce used for the message encryption/decryption
+ * Word3-:
+ * Prevnode: The source node address, or ID in case LINK_CONFIG only
+ * AuthTag : The authentication tag for the message integrity checking
+ * generated by the message encryption
+ */
+struct tipc_ehdr {
+ union {
+ struct {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 destined:1,
+ user:4,
+ version:3;
+ __u8 reserved_1:1,
+ rx_nokey:1,
+ master_key:1,
+ keepalive:1,
+ rx_key_active:2,
+ tx_key:2;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 version:3,
+ user:4,
+ destined:1;
+ __u8 tx_key:2,
+ rx_key_active:2,
+ keepalive:1,
+ master_key:1,
+ rx_nokey:1,
+ reserved_1:1;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ __be16 reserved_2;
+ } __packed;
+ __be32 w0;
+ };
+ __be64 seqno;
+ union {
+ __be32 addr;
+ __u8 id[NODE_ID_LEN]; /* For a LINK_CONFIG message only! */
+ };
+#define EHDR_SIZE (offsetof(struct tipc_ehdr, addr) + sizeof(__be32))
+#define EHDR_CFG_SIZE (sizeof(struct tipc_ehdr))
+#define EHDR_MIN_SIZE (EHDR_SIZE)
+#define EHDR_MAX_SIZE (EHDR_CFG_SIZE)
+#define EMSG_OVERHEAD (EHDR_SIZE + TIPC_AES_GCM_TAG_SIZE)
+} __packed;
+
+int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
+ struct tipc_node *node);
+void tipc_crypto_stop(struct tipc_crypto **crypto);
+void tipc_crypto_timeout(struct tipc_crypto *rx);
+int tipc_crypto_xmit(struct net *net, struct sk_buff **skb,
+ struct tipc_bearer *b, struct tipc_media_addr *dst,
+ struct tipc_node *__dnode);
+int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx,
+ struct sk_buff **skb, struct tipc_bearer *b);
+int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey,
+ u8 mode, bool master_key);
+void tipc_crypto_key_flush(struct tipc_crypto *c);
+int tipc_crypto_key_distr(struct tipc_crypto *tx, u8 key,
+ struct tipc_node *dest);
+void tipc_crypto_msg_rcv(struct net *net, struct sk_buff *skb);
+void tipc_crypto_rekeying_sched(struct tipc_crypto *tx, bool changed,
+ u32 new_intv);
+int tipc_aead_key_validate(struct tipc_aead_key *ukey, struct genl_info *info);
+bool tipc_ehdr_validate(struct sk_buff *skb);
+
+static inline u32 msg_key_gen(struct tipc_msg *m)
+{
+ return msg_bits(m, 4, 16, 0xffff);
+}
+
+static inline void msg_set_key_gen(struct tipc_msg *m, u32 gen)
+{
+ msg_set_bits(m, 4, 16, 0xffff, gen);
+}
+
+static inline u32 msg_key_mode(struct tipc_msg *m)
+{
+ return msg_bits(m, 4, 0, 0xf);
+}
+
+static inline void msg_set_key_mode(struct tipc_msg *m, u32 mode)
+{
+ msg_set_bits(m, 4, 0, 0xf, mode);
+}
+
+#endif /* _TIPC_CRYPTO_H */
+#endif
diff --git a/net/tipc/diag.c b/net/tipc/diag.c
new file mode 100644
index 000000000000..54dde8c4e4d4
--- /dev/null
+++ b/net/tipc/diag.c
@@ -0,0 +1,118 @@
+/*
+ * net/tipc/diag.c: TIPC socket diag
+ *
+ * Copyright (c) 2018, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "ASIS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "socket.h"
+#include <linux/sock_diag.h>
+#include <linux/tipc_sockets_diag.h>
+
+static u64 __tipc_diag_gen_cookie(struct sock *sk)
+{
+ u32 res[2];
+
+ sock_diag_save_cookie(sk, res);
+ return *((u64 *)res);
+}
+
+static int __tipc_add_sock_diag(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct tipc_sock *tsk)
+{
+ struct tipc_sock_diag_req *req = nlmsg_data(cb->nlh);
+ struct nlmsghdr *nlh;
+ int err;
+
+ nlh = nlmsg_put_answer(skb, cb, SOCK_DIAG_BY_FAMILY, 0,
+ NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ err = tipc_sk_fill_sock_diag(skb, cb, tsk, req->tidiag_states,
+ __tipc_diag_gen_cookie);
+ if (err)
+ return err;
+
+ nlmsg_end(skb, nlh);
+ return 0;
+}
+
+static int tipc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ return tipc_nl_sk_walk(skb, cb, __tipc_add_sock_diag);
+}
+
+static int tipc_sock_diag_handler_dump(struct sk_buff *skb,
+ struct nlmsghdr *h)
+{
+ int hdrlen = sizeof(struct tipc_sock_diag_req);
+ struct net *net = sock_net(skb->sk);
+
+ if (nlmsg_len(h) < hdrlen)
+ return -EINVAL;
+
+ if (h->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .start = tipc_dump_start,
+ .dump = tipc_diag_dump,
+ .done = tipc_dump_done,
+ };
+ netlink_dump_start(net->diag_nlsk, skb, h, &c);
+ return 0;
+ }
+ return -EOPNOTSUPP;
+}
+
+static const struct sock_diag_handler tipc_sock_diag_handler = {
+ .owner = THIS_MODULE,
+ .family = AF_TIPC,
+ .dump = tipc_sock_diag_handler_dump,
+};
+
+static int __init tipc_diag_init(void)
+{
+ return sock_diag_register(&tipc_sock_diag_handler);
+}
+
+static void __exit tipc_diag_exit(void)
+{
+ sock_diag_unregister(&tipc_sock_diag_handler);
+}
+
+module_init(tipc_diag_init);
+module_exit(tipc_diag_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("TIPC socket monitoring via SOCK_DIAG");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, AF_TIPC);
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 02462d67d191..775fd4f3f072 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -1,7 +1,7 @@
/*
* net/tipc/discover.c
*
- * Copyright (c) 2003-2006, 2014-2015, Ericsson AB
+ * Copyright (c) 2003-2006, 2014-2018, Ericsson AB
* Copyright (c) 2005-2006, 2010-2011, Wind River Systems
* All rights reserved.
*
@@ -39,34 +39,34 @@
#include "discover.h"
/* min delay during bearer start up */
-#define TIPC_LINK_REQ_INIT msecs_to_jiffies(125)
+#define TIPC_DISC_INIT msecs_to_jiffies(125)
/* max delay if bearer has no links */
-#define TIPC_LINK_REQ_FAST msecs_to_jiffies(1000)
+#define TIPC_DISC_FAST msecs_to_jiffies(1000)
/* max delay if bearer has links */
-#define TIPC_LINK_REQ_SLOW msecs_to_jiffies(60000)
+#define TIPC_DISC_SLOW msecs_to_jiffies(60000)
/* indicates no timer in use */
-#define TIPC_LINK_REQ_INACTIVE 0xffffffff
+#define TIPC_DISC_INACTIVE 0xffffffff
/**
- * struct tipc_link_req - information about an ongoing link setup request
+ * struct tipc_discoverer - information about an ongoing link setup request
* @bearer_id: identity of bearer issuing requests
* @net: network namespace instance
* @dest: destination address for request messages
* @domain: network domain to which links can be established
* @num_nodes: number of nodes currently discovered (i.e. with an active link)
* @lock: spinlock for controlling access to requests
- * @buf: request message to be (repeatedly) sent
+ * @skb: request message to be (repeatedly) sent
* @timer: timer governing period between requests
* @timer_intv: current interval between requests (in ms)
*/
-struct tipc_link_req {
+struct tipc_discoverer {
u32 bearer_id;
struct tipc_media_addr dest;
struct net *net;
u32 domain;
int num_nodes;
spinlock_t lock;
- struct sk_buff *buf;
+ struct sk_buff *skb;
struct timer_list timer;
unsigned long timer_intv;
};
@@ -74,25 +74,47 @@ struct tipc_link_req {
/**
* tipc_disc_init_msg - initialize a link setup message
* @net: the applicable net namespace
- * @type: message type (request or response)
+ * @skb: buffer containing message
+ * @mtyp: message type (request or response)
* @b: ptr to bearer issuing message
*/
-static void tipc_disc_init_msg(struct net *net, struct sk_buff *buf, u32 type,
- struct tipc_bearer *b)
+static void tipc_disc_init_msg(struct net *net, struct sk_buff *skb,
+ u32 mtyp, struct tipc_bearer *b)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_msg *msg;
+ struct tipc_net *tn = tipc_net(net);
u32 dest_domain = b->domain;
+ struct tipc_msg *hdr;
- msg = buf_msg(buf);
- tipc_msg_init(tn->own_addr, msg, LINK_CONFIG, type,
+ hdr = buf_msg(skb);
+ tipc_msg_init(tn->trial_addr, hdr, LINK_CONFIG, mtyp,
MAX_H_SIZE, dest_domain);
- msg_set_non_seq(msg, 1);
- msg_set_node_sig(msg, tn->random);
- msg_set_node_capabilities(msg, TIPC_NODE_CAPABILITIES);
- msg_set_dest_domain(msg, dest_domain);
- msg_set_bc_netid(msg, tn->net_id);
- b->media->addr2msg(msg_media_addr(msg), &b->addr);
+ msg_set_size(hdr, MAX_H_SIZE + NODE_ID_LEN);
+ msg_set_non_seq(hdr, 1);
+ msg_set_node_sig(hdr, tn->random);
+ msg_set_node_capabilities(hdr, TIPC_NODE_CAPABILITIES);
+ msg_set_dest_domain(hdr, dest_domain);
+ msg_set_bc_netid(hdr, tn->net_id);
+ b->media->addr2msg(msg_media_addr(hdr), &b->addr);
+ msg_set_peer_net_hash(hdr, tipc_net_hash_mixes(net, tn->random));
+ msg_set_node_id(hdr, tipc_own_id(net));
+}
+
+static void tipc_disc_msg_xmit(struct net *net, u32 mtyp, u32 dst,
+ u32 src, u32 sugg_addr,
+ struct tipc_media_addr *maddr,
+ struct tipc_bearer *b)
+{
+ struct tipc_msg *hdr;
+ struct sk_buff *skb;
+
+ skb = tipc_buf_acquire(MAX_H_SIZE + NODE_ID_LEN, GFP_ATOMIC);
+ if (!skb)
+ return;
+ hdr = buf_msg(skb);
+ tipc_disc_init_msg(net, skb, mtyp, b);
+ msg_set_sugg_node_addr(hdr, sugg_addr);
+ msg_set_dest_domain(hdr, dst);
+ tipc_bearer_xmit_skb(net, b->identity, skb, maddr);
}
/**
@@ -104,161 +126,216 @@ static void tipc_disc_init_msg(struct net *net, struct sk_buff *buf, u32 type,
static void disc_dupl_alert(struct tipc_bearer *b, u32 node_addr,
struct tipc_media_addr *media_addr)
{
- char node_addr_str[16];
char media_addr_str[64];
- tipc_addr_string_fill(node_addr_str, node_addr);
tipc_media_addr_printf(media_addr_str, sizeof(media_addr_str),
media_addr);
- pr_warn("Duplicate %s using %s seen on <%s>\n", node_addr_str,
+ pr_warn("Duplicate %x using %s seen on <%s>\n", node_addr,
media_addr_str, b->name);
}
+/* tipc_disc_addr_trial(): - handle an address uniqueness trial from peer
+ * Returns true if message should be dropped by caller, i.e., if it is a
+ * trial message or we are inside trial period. Otherwise false.
+ */
+static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
+ struct tipc_media_addr *maddr,
+ struct tipc_bearer *b,
+ u32 dst, u32 src,
+ u32 sugg_addr,
+ u8 *peer_id,
+ int mtyp)
+{
+ struct net *net = d->net;
+ struct tipc_net *tn = tipc_net(net);
+ u32 self = tipc_own_addr(net);
+ bool trial = time_before(jiffies, tn->addr_trial_end) && !self;
+
+ if (mtyp == DSC_TRIAL_FAIL_MSG) {
+ if (!trial)
+ return true;
+
+ /* Ignore if somebody else already gave new suggestion */
+ if (dst != tn->trial_addr)
+ return true;
+
+ /* Otherwise update trial address and restart trial period */
+ tn->trial_addr = sugg_addr;
+ msg_set_prevnode(buf_msg(d->skb), sugg_addr);
+ tn->addr_trial_end = jiffies + msecs_to_jiffies(1000);
+ return true;
+ }
+
+ /* Apply trial address if we just left trial period */
+ if (!trial && !self) {
+ schedule_work(&tn->work);
+ msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
+ msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
+ }
+
+ /* Accept regular link requests/responses only after trial period */
+ if (mtyp != DSC_TRIAL_MSG)
+ return trial;
+
+ sugg_addr = tipc_node_try_addr(net, peer_id, src);
+ if (sugg_addr)
+ tipc_disc_msg_xmit(net, DSC_TRIAL_FAIL_MSG, src,
+ self, sugg_addr, maddr, b);
+ return true;
+}
+
/**
* tipc_disc_rcv - handle incoming discovery message (request or response)
- * @net: the applicable net namespace
- * @buf: buffer containing message
- * @bearer: bearer that message arrived on
+ * @net: applicable net namespace
+ * @skb: buffer containing message
+ * @b: bearer that message arrived on
*/
void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
- struct tipc_bearer *bearer)
+ struct tipc_bearer *b)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_media_addr maddr;
- struct sk_buff *rskb;
+ struct tipc_net *tn = tipc_net(net);
struct tipc_msg *hdr = buf_msg(skb);
- u32 ddom = msg_dest_domain(hdr);
- u32 onode = msg_prevnode(hdr);
+ u32 pnet_hash = msg_peer_net_hash(hdr);
+ u16 caps = msg_node_capabilities(hdr);
+ bool legacy = tn->legacy_addr_format;
+ u32 sugg = msg_sugg_node_addr(hdr);
+ u32 signature = msg_node_sig(hdr);
+ u8 peer_id[NODE_ID_LEN] = {0,};
+ u32 dst = msg_dest_domain(hdr);
u32 net_id = msg_bc_netid(hdr);
+ struct tipc_media_addr maddr;
+ u32 src = msg_prevnode(hdr);
u32 mtyp = msg_type(hdr);
- u32 signature = msg_node_sig(hdr);
- u16 caps = msg_node_capabilities(hdr);
- bool respond = false;
bool dupl_addr = false;
+ bool respond = false;
+ u32 self;
int err;
- err = bearer->media->msg2addr(bearer, &maddr, msg_media_addr(hdr));
- kfree_skb(skb);
- if (err)
+ if (skb_linearize(skb)) {
+ kfree_skb(skb);
return;
+ }
+ hdr = buf_msg(skb);
- /* Ensure message from node is valid and communication is permitted */
- if (net_id != tn->net_id)
+ if (caps & TIPC_NODE_ID128)
+ memcpy(peer_id, msg_node_id(hdr), NODE_ID_LEN);
+ else
+ sprintf(peer_id, "%x", src);
+
+ err = b->media->msg2addr(b, &maddr, msg_media_addr(hdr));
+ kfree_skb(skb);
+ if (err || maddr.broadcast) {
+ pr_warn_ratelimited("Rcv corrupt discovery message\n");
return;
- if (maddr.broadcast)
+ }
+ /* Ignore discovery messages from own node */
+ if (!memcmp(&maddr, &b->addr, sizeof(maddr)))
return;
- if (!tipc_addr_domain_valid(ddom))
+ if (net_id != tn->net_id)
return;
- if (!tipc_addr_node_valid(onode))
+ if (tipc_disc_addr_trial_msg(b->disc, &maddr, b, dst,
+ src, sugg, peer_id, mtyp))
return;
+ self = tipc_own_addr(net);
- if (in_own_node(net, onode)) {
- if (memcmp(&maddr, &bearer->addr, sizeof(maddr)))
- disc_dupl_alert(bearer, tn->own_addr, &maddr);
+ /* Message from somebody using this node's address */
+ if (in_own_node(net, src)) {
+ disc_dupl_alert(b, self, &maddr);
return;
}
- if (!tipc_in_scope(ddom, tn->own_addr))
+ if (!tipc_in_scope(legacy, dst, self))
return;
- if (!tipc_in_scope(bearer->domain, onode))
+ if (!tipc_in_scope(legacy, b->domain, src))
return;
-
- tipc_node_check_dest(net, onode, bearer, caps, signature,
+ tipc_node_check_dest(net, src, peer_id, b, caps, signature, pnet_hash,
&maddr, &respond, &dupl_addr);
if (dupl_addr)
- disc_dupl_alert(bearer, onode, &maddr);
-
- /* Send response, if necessary */
- if (respond && (mtyp == DSC_REQ_MSG)) {
- rskb = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC);
- if (!rskb)
- return;
- tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
- tipc_bearer_xmit_skb(net, bearer->identity, rskb, &maddr);
- }
+ disc_dupl_alert(b, src, &maddr);
+ if (!respond)
+ return;
+ if (mtyp != DSC_REQ_MSG)
+ return;
+ tipc_disc_msg_xmit(net, DSC_RESP_MSG, src, self, 0, &maddr, b);
}
-/**
- * disc_update - update frequency of periodic link setup requests
- * @req: ptr to link request structure
- *
- * Reinitiates discovery process if discovery object has no associated nodes
- * and is either not currently searching or is searching at a slow rate
+/* tipc_disc_add_dest - increment set of discovered nodes
*/
-static void disc_update(struct tipc_link_req *req)
+void tipc_disc_add_dest(struct tipc_discoverer *d)
{
- if (!req->num_nodes) {
- if ((req->timer_intv == TIPC_LINK_REQ_INACTIVE) ||
- (req->timer_intv > TIPC_LINK_REQ_FAST)) {
- req->timer_intv = TIPC_LINK_REQ_INIT;
- mod_timer(&req->timer, jiffies + req->timer_intv);
- }
- }
+ spin_lock_bh(&d->lock);
+ d->num_nodes++;
+ spin_unlock_bh(&d->lock);
}
-/**
- * tipc_disc_add_dest - increment set of discovered nodes
- * @req: ptr to link request structure
+/* tipc_disc_remove_dest - decrement set of discovered nodes
*/
-void tipc_disc_add_dest(struct tipc_link_req *req)
+void tipc_disc_remove_dest(struct tipc_discoverer *d)
{
- spin_lock_bh(&req->lock);
- req->num_nodes++;
- spin_unlock_bh(&req->lock);
-}
+ int intv, num;
-/**
- * tipc_disc_remove_dest - decrement set of discovered nodes
- * @req: ptr to link request structure
- */
-void tipc_disc_remove_dest(struct tipc_link_req *req)
-{
- spin_lock_bh(&req->lock);
- req->num_nodes--;
- disc_update(req);
- spin_unlock_bh(&req->lock);
+ spin_lock_bh(&d->lock);
+ d->num_nodes--;
+ num = d->num_nodes;
+ intv = d->timer_intv;
+ if (!num && (intv == TIPC_DISC_INACTIVE || intv > TIPC_DISC_FAST)) {
+ d->timer_intv = TIPC_DISC_INIT;
+ mod_timer(&d->timer, jiffies + d->timer_intv);
+ }
+ spin_unlock_bh(&d->lock);
}
-/**
- * disc_timeout - send a periodic link setup request
- * @data: ptr to link request structure
- *
+/* tipc_disc_timeout - send a periodic link setup request
* Called whenever a link setup request timer associated with a bearer expires.
+ * - Keep doubling time between sent request until limit is reached;
+ * - Hold at fast polling rate if we don't have any associated nodes
+ * - Otherwise hold at slow polling rate
*/
-static void disc_timeout(unsigned long data)
+static void tipc_disc_timeout(struct timer_list *t)
{
- struct tipc_link_req *req = (struct tipc_link_req *)data;
- struct sk_buff *skb;
- int max_delay;
+ struct tipc_discoverer *d = timer_container_of(d, t, timer);
+ struct tipc_net *tn = tipc_net(d->net);
+ struct tipc_media_addr maddr;
+ struct sk_buff *skb = NULL;
+ struct net *net = d->net;
+ u32 bearer_id;
- spin_lock_bh(&req->lock);
+ spin_lock_bh(&d->lock);
/* Stop searching if only desired node has been found */
- if (tipc_node(req->domain) && req->num_nodes) {
- req->timer_intv = TIPC_LINK_REQ_INACTIVE;
+ if (tipc_node(d->domain) && d->num_nodes) {
+ d->timer_intv = TIPC_DISC_INACTIVE;
goto exit;
}
- /*
- * Send discovery message, then update discovery timer
- *
- * Keep doubling time between requests until limit is reached;
- * hold at fast polling rate if don't have any associated nodes,
- * otherwise hold at slow polling rate
- */
- skb = skb_clone(req->buf, GFP_ATOMIC);
- if (skb)
- tipc_bearer_xmit_skb(req->net, req->bearer_id, skb, &req->dest);
- req->timer_intv *= 2;
- if (req->num_nodes)
- max_delay = TIPC_LINK_REQ_SLOW;
- else
- max_delay = TIPC_LINK_REQ_FAST;
- if (req->timer_intv > max_delay)
- req->timer_intv = max_delay;
+ /* Did we just leave trial period ? */
+ if (!time_before(jiffies, tn->addr_trial_end) && !tipc_own_addr(net)) {
+ mod_timer(&d->timer, jiffies + TIPC_DISC_INIT);
+ spin_unlock_bh(&d->lock);
+ schedule_work(&tn->work);
+ return;
+ }
+
+ /* Adjust timeout interval according to discovery phase */
+ if (time_before(jiffies, tn->addr_trial_end)) {
+ d->timer_intv = TIPC_DISC_INIT;
+ } else {
+ d->timer_intv *= 2;
+ if (d->num_nodes && d->timer_intv > TIPC_DISC_SLOW)
+ d->timer_intv = TIPC_DISC_SLOW;
+ else if (!d->num_nodes && d->timer_intv > TIPC_DISC_FAST)
+ d->timer_intv = TIPC_DISC_FAST;
+ msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
+ msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
+ }
- mod_timer(&req->timer, jiffies + req->timer_intv);
+ mod_timer(&d->timer, jiffies + d->timer_intv);
+ memcpy(&maddr, &d->dest, sizeof(maddr));
+ skb = skb_clone(d->skb, GFP_ATOMIC);
+ bearer_id = d->bearer_id;
exit:
- spin_unlock_bh(&req->lock);
+ spin_unlock_bh(&d->lock);
+ if (skb)
+ tipc_bearer_xmit_skb(net, bearer_id, skb, &maddr);
}
/**
@@ -266,71 +343,78 @@ exit:
* @net: the applicable net namespace
* @b: ptr to bearer issuing requests
* @dest: destination address for request messages
- * @dest_domain: network domain to which links can be established
+ * @skb: pointer to created frame
*
- * Returns 0 if successful, otherwise -errno.
+ * Return: 0 if successful, otherwise -errno.
*/
int tipc_disc_create(struct net *net, struct tipc_bearer *b,
struct tipc_media_addr *dest, struct sk_buff **skb)
{
- struct tipc_link_req *req;
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_discoverer *d;
- req = kmalloc(sizeof(*req), GFP_ATOMIC);
- if (!req)
+ d = kmalloc(sizeof(*d), GFP_ATOMIC);
+ if (!d)
return -ENOMEM;
- req->buf = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC);
- if (!req->buf) {
- kfree(req);
+ d->skb = tipc_buf_acquire(MAX_H_SIZE + NODE_ID_LEN, GFP_ATOMIC);
+ if (!d->skb) {
+ kfree(d);
return -ENOMEM;
}
+ tipc_disc_init_msg(net, d->skb, DSC_REQ_MSG, b);
- tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b);
- memcpy(&req->dest, dest, sizeof(*dest));
- req->net = net;
- req->bearer_id = b->identity;
- req->domain = b->domain;
- req->num_nodes = 0;
- req->timer_intv = TIPC_LINK_REQ_INIT;
- spin_lock_init(&req->lock);
- setup_timer(&req->timer, disc_timeout, (unsigned long)req);
- mod_timer(&req->timer, jiffies + req->timer_intv);
- b->link_req = req;
- *skb = skb_clone(req->buf, GFP_ATOMIC);
+ /* Do we need an address trial period first ? */
+ if (!tipc_own_addr(net)) {
+ tn->addr_trial_end = jiffies + msecs_to_jiffies(1000);
+ msg_set_type(buf_msg(d->skb), DSC_TRIAL_MSG);
+ }
+ memcpy(&d->dest, dest, sizeof(*dest));
+ d->net = net;
+ d->bearer_id = b->identity;
+ d->domain = b->domain;
+ d->num_nodes = 0;
+ d->timer_intv = TIPC_DISC_INIT;
+ spin_lock_init(&d->lock);
+ timer_setup(&d->timer, tipc_disc_timeout, 0);
+ mod_timer(&d->timer, jiffies + d->timer_intv);
+ b->disc = d;
+ *skb = skb_clone(d->skb, GFP_ATOMIC);
return 0;
}
/**
* tipc_disc_delete - destroy object sending periodic link setup requests
- * @req: ptr to link request structure
+ * @d: ptr to link dest structure
*/
-void tipc_disc_delete(struct tipc_link_req *req)
+void tipc_disc_delete(struct tipc_discoverer *d)
{
- del_timer_sync(&req->timer);
- kfree_skb(req->buf);
- kfree(req);
+ timer_shutdown_sync(&d->timer);
+ kfree_skb(d->skb);
+ kfree(d);
}
/**
* tipc_disc_reset - reset object to send periodic link setup requests
* @net: the applicable net namespace
* @b: ptr to bearer issuing requests
- * @dest_domain: network domain to which links can be established
*/
void tipc_disc_reset(struct net *net, struct tipc_bearer *b)
{
- struct tipc_link_req *req = b->link_req;
+ struct tipc_discoverer *d = b->disc;
+ struct tipc_media_addr maddr;
struct sk_buff *skb;
- spin_lock_bh(&req->lock);
- tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b);
- req->net = net;
- req->bearer_id = b->identity;
- req->domain = b->domain;
- req->num_nodes = 0;
- req->timer_intv = TIPC_LINK_REQ_INIT;
- mod_timer(&req->timer, jiffies + req->timer_intv);
- skb = skb_clone(req->buf, GFP_ATOMIC);
+ spin_lock_bh(&d->lock);
+ tipc_disc_init_msg(net, d->skb, DSC_REQ_MSG, b);
+ d->net = net;
+ d->bearer_id = b->identity;
+ d->domain = b->domain;
+ d->num_nodes = 0;
+ d->timer_intv = TIPC_DISC_INIT;
+ memcpy(&maddr, &d->dest, sizeof(maddr));
+ mod_timer(&d->timer, jiffies + d->timer_intv);
+ skb = skb_clone(d->skb, GFP_ATOMIC);
+ spin_unlock_bh(&d->lock);
if (skb)
- tipc_bearer_xmit_skb(net, req->bearer_id, skb, &req->dest);
- spin_unlock_bh(&req->lock);
+ tipc_bearer_xmit_skb(net, b->identity, skb, &maddr);
}
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index b80a335389c0..521d96c41dfd 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -37,14 +37,14 @@
#ifndef _TIPC_DISCOVER_H
#define _TIPC_DISCOVER_H
-struct tipc_link_req;
+struct tipc_discoverer;
int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr,
struct tipc_media_addr *dest, struct sk_buff **skb);
-void tipc_disc_delete(struct tipc_link_req *req);
+void tipc_disc_delete(struct tipc_discoverer *req);
void tipc_disc_reset(struct net *net, struct tipc_bearer *b_ptr);
-void tipc_disc_add_dest(struct tipc_link_req *req);
-void tipc_disc_remove_dest(struct tipc_link_req *req);
+void tipc_disc_add_dest(struct tipc_discoverer *req);
+void tipc_disc_remove_dest(struct tipc_discoverer *req);
void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
struct tipc_bearer *b_ptr);
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index f69a2fde9f4a..cb0d185e06af 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -60,14 +60,12 @@ static int tipc_eth_addr2msg(char *msg, struct tipc_media_addr *addr)
/* Convert raw mac address format to media addr format */
static int tipc_eth_raw2addr(struct tipc_bearer *b,
struct tipc_media_addr *addr,
- char *msg)
+ const char *msg)
{
- char bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
memset(addr, 0, sizeof(*addr));
ether_addr_copy(addr->value, msg);
addr->media_id = TIPC_MEDIA_TYPE_ETH;
- addr->broadcast = !memcmp(addr->value, bcast_mac, ETH_ALEN);
+ addr->broadcast = is_broadcast_ether_addr(addr->value);
return 0;
}
@@ -92,7 +90,8 @@ struct tipc_media eth_media_info = {
.raw2addr = tipc_eth_raw2addr,
.priority = TIPC_DEF_LINK_PRI,
.tolerance = TIPC_DEF_LINK_TOL,
- .window = TIPC_DEF_LINK_WIN,
+ .min_win = TIPC_DEF_LINK_WIN,
+ .max_win = TIPC_MAX_LINK_WIN,
.type_id = TIPC_MEDIA_TYPE_ETH,
.hwaddr_len = ETH_ALEN,
.name = "eth"
diff --git a/net/tipc/group.c b/net/tipc/group.c
new file mode 100644
index 000000000000..3e137d8c9d2f
--- /dev/null
+++ b/net/tipc/group.c
@@ -0,0 +1,959 @@
+/*
+ * net/tipc/group.c: TIPC group messaging code
+ *
+ * Copyright (c) 2017, Ericsson AB
+ * Copyright (c) 2020, Red Hat Inc
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "addr.h"
+#include "group.h"
+#include "bcast.h"
+#include "topsrv.h"
+#include "msg.h"
+#include "socket.h"
+#include "node.h"
+#include "name_table.h"
+#include "subscr.h"
+
+#define ADV_UNIT (((MAX_MSG_SIZE + MAX_H_SIZE) / FLOWCTL_BLK_SZ) + 1)
+#define ADV_IDLE ADV_UNIT
+#define ADV_ACTIVE (ADV_UNIT * 12)
+
+enum mbr_state {
+ MBR_JOINING,
+ MBR_PUBLISHED,
+ MBR_JOINED,
+ MBR_PENDING,
+ MBR_ACTIVE,
+ MBR_RECLAIMING,
+ MBR_REMITTED,
+ MBR_LEAVING
+};
+
+struct tipc_member {
+ struct rb_node tree_node;
+ struct list_head list;
+ struct list_head small_win;
+ struct sk_buff_head deferredq;
+ struct tipc_group *group;
+ u32 node;
+ u32 port;
+ u32 instance;
+ enum mbr_state state;
+ u16 advertised;
+ u16 window;
+ u16 bc_rcv_nxt;
+ u16 bc_syncpt;
+ u16 bc_acked;
+};
+
+struct tipc_group {
+ struct rb_root members;
+ struct list_head small_win;
+ struct list_head pending;
+ struct list_head active;
+ struct tipc_nlist dests;
+ struct net *net;
+ int subid;
+ u32 type;
+ u32 instance;
+ u32 scope;
+ u32 portid;
+ u16 member_cnt;
+ u16 active_cnt;
+ u16 max_active;
+ u16 bc_snd_nxt;
+ u16 bc_ackers;
+ bool *open;
+ bool loopback;
+ bool events;
+};
+
+static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
+ int mtyp, struct sk_buff_head *xmitq);
+
+static void tipc_group_open(struct tipc_member *m, bool *wakeup)
+{
+ *wakeup = false;
+ if (list_empty(&m->small_win))
+ return;
+ list_del_init(&m->small_win);
+ *m->group->open = true;
+ *wakeup = true;
+}
+
+static void tipc_group_decr_active(struct tipc_group *grp,
+ struct tipc_member *m)
+{
+ if (m->state == MBR_ACTIVE || m->state == MBR_RECLAIMING ||
+ m->state == MBR_REMITTED)
+ grp->active_cnt--;
+}
+
+static int tipc_group_rcvbuf_limit(struct tipc_group *grp)
+{
+ int max_active, active_pool, idle_pool;
+ int mcnt = grp->member_cnt + 1;
+
+ /* Limit simultaneous reception from other members */
+ max_active = min(mcnt / 8, 64);
+ max_active = max(max_active, 16);
+ grp->max_active = max_active;
+
+ /* Reserve blocks for active and idle members */
+ active_pool = max_active * ADV_ACTIVE;
+ idle_pool = (mcnt - max_active) * ADV_IDLE;
+
+ /* Scale to bytes, considering worst-case truesize/msgsize ratio */
+ return (active_pool + idle_pool) * FLOWCTL_BLK_SZ * 4;
+}
+
+u16 tipc_group_bc_snd_nxt(struct tipc_group *grp)
+{
+ return grp->bc_snd_nxt;
+}
+
+static bool tipc_group_is_receiver(struct tipc_member *m)
+{
+ return m && m->state != MBR_JOINING && m->state != MBR_LEAVING;
+}
+
+static bool tipc_group_is_sender(struct tipc_member *m)
+{
+ return m && m->state != MBR_JOINING && m->state != MBR_PUBLISHED;
+}
+
+u32 tipc_group_exclude(struct tipc_group *grp)
+{
+ if (!grp->loopback)
+ return grp->portid;
+ return 0;
+}
+
+struct tipc_group *tipc_group_create(struct net *net, u32 portid,
+ struct tipc_group_req *mreq,
+ bool *group_is_open)
+{
+ u32 filter = TIPC_SUB_PORTS | TIPC_SUB_NO_STATUS;
+ bool global = mreq->scope != TIPC_NODE_SCOPE;
+ struct tipc_group *grp;
+ u32 type = mreq->type;
+
+ grp = kzalloc(sizeof(*grp), GFP_ATOMIC);
+ if (!grp)
+ return NULL;
+ tipc_nlist_init(&grp->dests, tipc_own_addr(net));
+ INIT_LIST_HEAD(&grp->small_win);
+ INIT_LIST_HEAD(&grp->active);
+ INIT_LIST_HEAD(&grp->pending);
+ grp->members = RB_ROOT;
+ grp->net = net;
+ grp->portid = portid;
+ grp->type = type;
+ grp->instance = mreq->instance;
+ grp->scope = mreq->scope;
+ grp->loopback = mreq->flags & TIPC_GROUP_LOOPBACK;
+ grp->events = mreq->flags & TIPC_GROUP_MEMBER_EVTS;
+ grp->open = group_is_open;
+ *grp->open = false;
+ filter |= global ? TIPC_SUB_CLUSTER_SCOPE : TIPC_SUB_NODE_SCOPE;
+ if (tipc_topsrv_kern_subscr(net, portid, type, 0, ~0,
+ filter, &grp->subid))
+ return grp;
+ kfree(grp);
+ return NULL;
+}
+
+void tipc_group_join(struct net *net, struct tipc_group *grp, int *sk_rcvbuf)
+{
+ struct rb_root *tree = &grp->members;
+ struct tipc_member *m, *tmp;
+ struct sk_buff_head xmitq;
+
+ __skb_queue_head_init(&xmitq);
+ rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) {
+ tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, &xmitq);
+ tipc_group_update_member(m, 0);
+ }
+ tipc_node_distr_xmit(net, &xmitq);
+ *sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
+}
+
+void tipc_group_delete(struct net *net, struct tipc_group *grp)
+{
+ struct rb_root *tree = &grp->members;
+ struct tipc_member *m, *tmp;
+ struct sk_buff_head xmitq;
+
+ __skb_queue_head_init(&xmitq);
+
+ rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) {
+ tipc_group_proto_xmit(grp, m, GRP_LEAVE_MSG, &xmitq);
+ __skb_queue_purge(&m->deferredq);
+ list_del(&m->list);
+ kfree(m);
+ }
+ tipc_node_distr_xmit(net, &xmitq);
+ tipc_nlist_purge(&grp->dests);
+ tipc_topsrv_kern_unsubscr(net, grp->subid);
+ kfree(grp);
+}
+
+static struct tipc_member *tipc_group_find_member(struct tipc_group *grp,
+ u32 node, u32 port)
+{
+ struct rb_node *n = grp->members.rb_node;
+ u64 nkey, key = (u64)node << 32 | port;
+ struct tipc_member *m;
+
+ while (n) {
+ m = container_of(n, struct tipc_member, tree_node);
+ nkey = (u64)m->node << 32 | m->port;
+ if (key < nkey)
+ n = n->rb_left;
+ else if (key > nkey)
+ n = n->rb_right;
+ else
+ return m;
+ }
+ return NULL;
+}
+
+static struct tipc_member *tipc_group_find_dest(struct tipc_group *grp,
+ u32 node, u32 port)
+{
+ struct tipc_member *m;
+
+ m = tipc_group_find_member(grp, node, port);
+ if (m && tipc_group_is_receiver(m))
+ return m;
+ return NULL;
+}
+
+static struct tipc_member *tipc_group_find_node(struct tipc_group *grp,
+ u32 node)
+{
+ struct tipc_member *m;
+ struct rb_node *n;
+
+ for (n = rb_first(&grp->members); n; n = rb_next(n)) {
+ m = container_of(n, struct tipc_member, tree_node);
+ if (m->node == node)
+ return m;
+ }
+ return NULL;
+}
+
+static int tipc_group_add_to_tree(struct tipc_group *grp,
+ struct tipc_member *m)
+{
+ u64 nkey, key = (u64)m->node << 32 | m->port;
+ struct rb_node **n, *parent = NULL;
+ struct tipc_member *tmp;
+
+ n = &grp->members.rb_node;
+ while (*n) {
+ tmp = container_of(*n, struct tipc_member, tree_node);
+ parent = *n;
+ tmp = container_of(parent, struct tipc_member, tree_node);
+ nkey = (u64)tmp->node << 32 | tmp->port;
+ if (key < nkey)
+ n = &(*n)->rb_left;
+ else if (key > nkey)
+ n = &(*n)->rb_right;
+ else
+ return -EEXIST;
+ }
+ rb_link_node(&m->tree_node, parent, n);
+ rb_insert_color(&m->tree_node, &grp->members);
+ return 0;
+}
+
+static struct tipc_member *tipc_group_create_member(struct tipc_group *grp,
+ u32 node, u32 port,
+ u32 instance, int state)
+{
+ struct tipc_member *m;
+ int ret;
+
+ m = kzalloc(sizeof(*m), GFP_ATOMIC);
+ if (!m)
+ return NULL;
+ INIT_LIST_HEAD(&m->list);
+ INIT_LIST_HEAD(&m->small_win);
+ __skb_queue_head_init(&m->deferredq);
+ m->group = grp;
+ m->node = node;
+ m->port = port;
+ m->instance = instance;
+ m->bc_acked = grp->bc_snd_nxt - 1;
+ ret = tipc_group_add_to_tree(grp, m);
+ if (ret < 0) {
+ kfree(m);
+ return NULL;
+ }
+ grp->member_cnt++;
+ tipc_nlist_add(&grp->dests, m->node);
+ m->state = state;
+ return m;
+}
+
+void tipc_group_add_member(struct tipc_group *grp, u32 node,
+ u32 port, u32 instance)
+{
+ tipc_group_create_member(grp, node, port, instance, MBR_PUBLISHED);
+}
+
+static void tipc_group_delete_member(struct tipc_group *grp,
+ struct tipc_member *m)
+{
+ rb_erase(&m->tree_node, &grp->members);
+ grp->member_cnt--;
+
+ /* Check if we were waiting for replicast ack from this member */
+ if (grp->bc_ackers && less(m->bc_acked, grp->bc_snd_nxt - 1))
+ grp->bc_ackers--;
+
+ list_del_init(&m->list);
+ list_del_init(&m->small_win);
+ tipc_group_decr_active(grp, m);
+
+ /* If last member on a node, remove node from dest list */
+ if (!tipc_group_find_node(grp, m->node))
+ tipc_nlist_del(&grp->dests, m->node);
+
+ kfree(m);
+}
+
+struct tipc_nlist *tipc_group_dests(struct tipc_group *grp)
+{
+ return &grp->dests;
+}
+
+void tipc_group_self(struct tipc_group *grp, struct tipc_service_range *seq,
+ int *scope)
+{
+ seq->type = grp->type;
+ seq->lower = grp->instance;
+ seq->upper = grp->instance;
+ *scope = grp->scope;
+}
+
+void tipc_group_update_member(struct tipc_member *m, int len)
+{
+ struct tipc_group *grp = m->group;
+ struct tipc_member *_m, *tmp;
+
+ if (!tipc_group_is_receiver(m))
+ return;
+
+ m->window -= len;
+
+ if (m->window >= ADV_IDLE)
+ return;
+
+ list_del_init(&m->small_win);
+
+ /* Sort member into small_window members' list */
+ list_for_each_entry_safe(_m, tmp, &grp->small_win, small_win) {
+ if (_m->window > m->window)
+ break;
+ }
+ list_add_tail(&m->small_win, &_m->small_win);
+}
+
+void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack)
+{
+ u16 prev = grp->bc_snd_nxt - 1;
+ struct tipc_member *m;
+ struct rb_node *n;
+ u16 ackers = 0;
+
+ for (n = rb_first(&grp->members); n; n = rb_next(n)) {
+ m = container_of(n, struct tipc_member, tree_node);
+ if (tipc_group_is_receiver(m)) {
+ tipc_group_update_member(m, len);
+ m->bc_acked = prev;
+ ackers++;
+ }
+ }
+
+ /* Mark number of acknowledges to expect, if any */
+ if (ack)
+ grp->bc_ackers = ackers;
+ grp->bc_snd_nxt++;
+}
+
+bool tipc_group_cong(struct tipc_group *grp, u32 dnode, u32 dport,
+ int len, struct tipc_member **mbr)
+{
+ struct sk_buff_head xmitq;
+ struct tipc_member *m;
+ int adv, state;
+
+ m = tipc_group_find_dest(grp, dnode, dport);
+ if (!tipc_group_is_receiver(m)) {
+ *mbr = NULL;
+ return false;
+ }
+ *mbr = m;
+
+ if (m->window >= len)
+ return false;
+
+ *grp->open = false;
+
+ /* If not fully advertised, do it now to prevent mutual blocking */
+ adv = m->advertised;
+ state = m->state;
+ if (state == MBR_JOINED && adv == ADV_IDLE)
+ return true;
+ if (state == MBR_ACTIVE && adv == ADV_ACTIVE)
+ return true;
+ if (state == MBR_PENDING && adv == ADV_IDLE)
+ return true;
+ __skb_queue_head_init(&xmitq);
+ tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, &xmitq);
+ tipc_node_distr_xmit(grp->net, &xmitq);
+ return true;
+}
+
+bool tipc_group_bc_cong(struct tipc_group *grp, int len)
+{
+ struct tipc_member *m = NULL;
+
+ /* If prev bcast was replicast, reject until all receivers have acked */
+ if (grp->bc_ackers) {
+ *grp->open = false;
+ return true;
+ }
+ if (list_empty(&grp->small_win))
+ return false;
+
+ m = list_first_entry(&grp->small_win, struct tipc_member, small_win);
+ if (m->window >= len)
+ return false;
+
+ return tipc_group_cong(grp, m->node, m->port, len, &m);
+}
+
+/* tipc_group_sort_msg() - sort msg into queue by bcast sequence number
+ */
+static void tipc_group_sort_msg(struct sk_buff *skb, struct sk_buff_head *defq)
+{
+ struct tipc_msg *_hdr, *hdr = buf_msg(skb);
+ u16 bc_seqno = msg_grp_bc_seqno(hdr);
+ struct sk_buff *_skb, *tmp;
+ int mtyp = msg_type(hdr);
+
+ /* Bcast/mcast may be bypassed by ucast or other bcast, - sort it in */
+ if (mtyp == TIPC_GRP_BCAST_MSG || mtyp == TIPC_GRP_MCAST_MSG) {
+ skb_queue_walk_safe(defq, _skb, tmp) {
+ _hdr = buf_msg(_skb);
+ if (!less(bc_seqno, msg_grp_bc_seqno(_hdr)))
+ continue;
+ __skb_queue_before(defq, _skb, skb);
+ return;
+ }
+ /* Bcast was not bypassed, - add to tail */
+ }
+ /* Unicasts are never bypassed, - always add to tail */
+ __skb_queue_tail(defq, skb);
+}
+
+/* tipc_group_filter_msg() - determine if we should accept arriving message
+ */
+void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq,
+ struct sk_buff_head *xmitq)
+{
+ struct sk_buff *skb = __skb_dequeue(inputq);
+ bool ack, deliver, update, leave = false;
+ struct sk_buff_head *defq;
+ struct tipc_member *m;
+ struct tipc_msg *hdr;
+ u32 node, port;
+ int mtyp, blks;
+
+ if (!skb)
+ return;
+
+ hdr = buf_msg(skb);
+ node = msg_orignode(hdr);
+ port = msg_origport(hdr);
+
+ if (!msg_in_group(hdr))
+ goto drop;
+
+ m = tipc_group_find_member(grp, node, port);
+ if (!tipc_group_is_sender(m))
+ goto drop;
+
+ if (less(msg_grp_bc_seqno(hdr), m->bc_rcv_nxt))
+ goto drop;
+
+ TIPC_SKB_CB(skb)->orig_member = m->instance;
+ defq = &m->deferredq;
+ tipc_group_sort_msg(skb, defq);
+
+ while ((skb = skb_peek(defq))) {
+ hdr = buf_msg(skb);
+ mtyp = msg_type(hdr);
+ blks = msg_blocks(hdr);
+ deliver = true;
+ ack = false;
+ update = false;
+
+ if (more(msg_grp_bc_seqno(hdr), m->bc_rcv_nxt))
+ break;
+
+ /* Decide what to do with message */
+ switch (mtyp) {
+ case TIPC_GRP_MCAST_MSG:
+ if (msg_nameinst(hdr) != grp->instance) {
+ update = true;
+ deliver = false;
+ }
+ fallthrough;
+ case TIPC_GRP_BCAST_MSG:
+ m->bc_rcv_nxt++;
+ ack = msg_grp_bc_ack_req(hdr);
+ break;
+ case TIPC_GRP_UCAST_MSG:
+ break;
+ case TIPC_GRP_MEMBER_EVT:
+ if (m->state == MBR_LEAVING)
+ leave = true;
+ if (!grp->events)
+ deliver = false;
+ break;
+ default:
+ break;
+ }
+
+ /* Execute decisions */
+ __skb_dequeue(defq);
+ if (deliver)
+ __skb_queue_tail(inputq, skb);
+ else
+ kfree_skb(skb);
+
+ if (ack)
+ tipc_group_proto_xmit(grp, m, GRP_ACK_MSG, xmitq);
+
+ if (leave) {
+ __skb_queue_purge(defq);
+ tipc_group_delete_member(grp, m);
+ break;
+ }
+ if (!update)
+ continue;
+
+ tipc_group_update_rcv_win(grp, blks, node, port, xmitq);
+ }
+ return;
+drop:
+ kfree_skb(skb);
+}
+
+void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
+ u32 port, struct sk_buff_head *xmitq)
+{
+ struct list_head *active = &grp->active;
+ int max_active = grp->max_active;
+ int reclaim_limit = max_active * 3 / 4;
+ int active_cnt = grp->active_cnt;
+ struct tipc_member *m, *rm, *pm;
+
+ m = tipc_group_find_member(grp, node, port);
+ if (!m)
+ return;
+
+ m->advertised -= blks;
+
+ switch (m->state) {
+ case MBR_JOINED:
+ /* First, decide if member can go active */
+ if (active_cnt <= max_active) {
+ m->state = MBR_ACTIVE;
+ list_add_tail(&m->list, active);
+ grp->active_cnt++;
+ tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
+ } else {
+ m->state = MBR_PENDING;
+ list_add_tail(&m->list, &grp->pending);
+ }
+
+ if (active_cnt < reclaim_limit)
+ break;
+
+ /* Reclaim from oldest active member, if possible */
+ if (!list_empty(active)) {
+ rm = list_first_entry(active, struct tipc_member, list);
+ rm->state = MBR_RECLAIMING;
+ list_del_init(&rm->list);
+ tipc_group_proto_xmit(grp, rm, GRP_RECLAIM_MSG, xmitq);
+ break;
+ }
+ /* Nobody to reclaim from; - revert oldest pending to JOINED */
+ pm = list_first_entry(&grp->pending, struct tipc_member, list);
+ list_del_init(&pm->list);
+ pm->state = MBR_JOINED;
+ tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
+ break;
+ case MBR_ACTIVE:
+ if (!list_is_last(&m->list, &grp->active))
+ list_move_tail(&m->list, &grp->active);
+ if (m->advertised > (ADV_ACTIVE * 3 / 4))
+ break;
+ tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
+ break;
+ case MBR_REMITTED:
+ if (m->advertised > ADV_IDLE)
+ break;
+ m->state = MBR_JOINED;
+ grp->active_cnt--;
+ if (m->advertised < ADV_IDLE) {
+ pr_warn_ratelimited("Rcv unexpected msg after REMIT\n");
+ tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
+ }
+
+ if (list_empty(&grp->pending))
+ return;
+
+ /* Set oldest pending member to active and advertise */
+ pm = list_first_entry(&grp->pending, struct tipc_member, list);
+ pm->state = MBR_ACTIVE;
+ list_move_tail(&pm->list, &grp->active);
+ grp->active_cnt++;
+ tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
+ break;
+ case MBR_RECLAIMING:
+ case MBR_JOINING:
+ case MBR_LEAVING:
+ default:
+ break;
+ }
+}
+
+static void tipc_group_create_event(struct tipc_group *grp,
+ struct tipc_member *m,
+ u32 event, u16 seqno,
+ struct sk_buff_head *inputq)
+{ u32 dnode = tipc_own_addr(grp->net);
+ struct tipc_event evt;
+ struct sk_buff *skb;
+ struct tipc_msg *hdr;
+
+ memset(&evt, 0, sizeof(evt));
+ evt.event = event;
+ evt.found_lower = m->instance;
+ evt.found_upper = m->instance;
+ evt.port.ref = m->port;
+ evt.port.node = m->node;
+ evt.s.seq.type = grp->type;
+ evt.s.seq.lower = m->instance;
+ evt.s.seq.upper = m->instance;
+
+ skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_GRP_MEMBER_EVT,
+ GROUP_H_SIZE, sizeof(evt), dnode, m->node,
+ grp->portid, m->port, 0);
+ if (!skb)
+ return;
+
+ hdr = buf_msg(skb);
+ msg_set_nametype(hdr, grp->type);
+ msg_set_grp_evt(hdr, event);
+ msg_set_dest_droppable(hdr, true);
+ msg_set_grp_bc_seqno(hdr, seqno);
+ memcpy(msg_data(hdr), &evt, sizeof(evt));
+ TIPC_SKB_CB(skb)->orig_member = m->instance;
+ __skb_queue_tail(inputq, skb);
+}
+
+static void tipc_group_proto_xmit(struct tipc_group *grp, struct tipc_member *m,
+ int mtyp, struct sk_buff_head *xmitq)
+{
+ struct tipc_msg *hdr;
+ struct sk_buff *skb;
+ int adv = 0;
+
+ skb = tipc_msg_create(GROUP_PROTOCOL, mtyp, INT_H_SIZE, 0,
+ m->node, tipc_own_addr(grp->net),
+ m->port, grp->portid, 0);
+ if (!skb)
+ return;
+
+ if (m->state == MBR_ACTIVE)
+ adv = ADV_ACTIVE - m->advertised;
+ else if (m->state == MBR_JOINED || m->state == MBR_PENDING)
+ adv = ADV_IDLE - m->advertised;
+
+ hdr = buf_msg(skb);
+
+ if (mtyp == GRP_JOIN_MSG) {
+ msg_set_grp_bc_syncpt(hdr, grp->bc_snd_nxt);
+ msg_set_adv_win(hdr, adv);
+ m->advertised += adv;
+ } else if (mtyp == GRP_LEAVE_MSG) {
+ msg_set_grp_bc_syncpt(hdr, grp->bc_snd_nxt);
+ } else if (mtyp == GRP_ADV_MSG) {
+ msg_set_adv_win(hdr, adv);
+ m->advertised += adv;
+ } else if (mtyp == GRP_ACK_MSG) {
+ msg_set_grp_bc_acked(hdr, m->bc_rcv_nxt);
+ } else if (mtyp == GRP_REMIT_MSG) {
+ msg_set_grp_remitted(hdr, m->window);
+ }
+ msg_set_dest_droppable(hdr, true);
+ __skb_queue_tail(xmitq, skb);
+}
+
+void tipc_group_proto_rcv(struct tipc_group *grp, bool *usr_wakeup,
+ struct tipc_msg *hdr, struct sk_buff_head *inputq,
+ struct sk_buff_head *xmitq)
+{
+ u32 node = msg_orignode(hdr);
+ u32 port = msg_origport(hdr);
+ struct tipc_member *m, *pm;
+ u16 remitted, in_flight;
+
+ if (!grp)
+ return;
+
+ if (grp->scope == TIPC_NODE_SCOPE && node != tipc_own_addr(grp->net))
+ return;
+
+ m = tipc_group_find_member(grp, node, port);
+
+ switch (msg_type(hdr)) {
+ case GRP_JOIN_MSG:
+ if (!m)
+ m = tipc_group_create_member(grp, node, port,
+ 0, MBR_JOINING);
+ if (!m)
+ return;
+ m->bc_syncpt = msg_grp_bc_syncpt(hdr);
+ m->bc_rcv_nxt = m->bc_syncpt;
+ m->window += msg_adv_win(hdr);
+
+ /* Wait until PUBLISH event is received if necessary */
+ if (m->state != MBR_PUBLISHED)
+ return;
+
+ /* Member can be taken into service */
+ m->state = MBR_JOINED;
+ tipc_group_open(m, usr_wakeup);
+ tipc_group_update_member(m, 0);
+ tipc_group_proto_xmit(grp, m, GRP_ADV_MSG, xmitq);
+ tipc_group_create_event(grp, m, TIPC_PUBLISHED,
+ m->bc_syncpt, inputq);
+ return;
+ case GRP_LEAVE_MSG:
+ if (!m)
+ return;
+ m->bc_syncpt = msg_grp_bc_syncpt(hdr);
+ list_del_init(&m->list);
+ tipc_group_open(m, usr_wakeup);
+ tipc_group_decr_active(grp, m);
+ m->state = MBR_LEAVING;
+ tipc_group_create_event(grp, m, TIPC_WITHDRAWN,
+ m->bc_syncpt, inputq);
+ return;
+ case GRP_ADV_MSG:
+ if (!m)
+ return;
+ m->window += msg_adv_win(hdr);
+ tipc_group_open(m, usr_wakeup);
+ return;
+ case GRP_ACK_MSG:
+ if (!m)
+ return;
+ m->bc_acked = msg_grp_bc_acked(hdr);
+ if (--grp->bc_ackers)
+ return;
+ list_del_init(&m->small_win);
+ *m->group->open = true;
+ *usr_wakeup = true;
+ tipc_group_update_member(m, 0);
+ return;
+ case GRP_RECLAIM_MSG:
+ if (!m)
+ return;
+ tipc_group_proto_xmit(grp, m, GRP_REMIT_MSG, xmitq);
+ m->window = ADV_IDLE;
+ tipc_group_open(m, usr_wakeup);
+ return;
+ case GRP_REMIT_MSG:
+ if (!m || m->state != MBR_RECLAIMING)
+ return;
+
+ remitted = msg_grp_remitted(hdr);
+
+ /* Messages preceding the REMIT still in receive queue */
+ if (m->advertised > remitted) {
+ m->state = MBR_REMITTED;
+ in_flight = m->advertised - remitted;
+ m->advertised = ADV_IDLE + in_flight;
+ return;
+ }
+ /* This should never happen */
+ if (m->advertised < remitted)
+ pr_warn_ratelimited("Unexpected REMIT msg\n");
+
+ /* All messages preceding the REMIT have been read */
+ m->state = MBR_JOINED;
+ grp->active_cnt--;
+ m->advertised = ADV_IDLE;
+
+ /* Set oldest pending member to active and advertise */
+ if (list_empty(&grp->pending))
+ return;
+ pm = list_first_entry(&grp->pending, struct tipc_member, list);
+ pm->state = MBR_ACTIVE;
+ list_move_tail(&pm->list, &grp->active);
+ grp->active_cnt++;
+ if (pm->advertised <= (ADV_ACTIVE * 3 / 4))
+ tipc_group_proto_xmit(grp, pm, GRP_ADV_MSG, xmitq);
+ return;
+ default:
+ pr_warn("Received unknown GROUP_PROTO message\n");
+ }
+}
+
+/* tipc_group_member_evt() - receive and handle a member up/down event
+ */
+void tipc_group_member_evt(struct tipc_group *grp,
+ bool *usr_wakeup,
+ int *sk_rcvbuf,
+ struct tipc_msg *hdr,
+ struct sk_buff_head *inputq,
+ struct sk_buff_head *xmitq)
+{
+ struct tipc_event *evt = (void *)msg_data(hdr);
+ u32 instance = evt->found_lower;
+ u32 node = evt->port.node;
+ u32 port = evt->port.ref;
+ int event = evt->event;
+ struct tipc_member *m;
+ struct net *net;
+ u32 self;
+
+ if (!grp)
+ return;
+
+ net = grp->net;
+ self = tipc_own_addr(net);
+ if (!grp->loopback && node == self && port == grp->portid)
+ return;
+
+ m = tipc_group_find_member(grp, node, port);
+
+ switch (event) {
+ case TIPC_PUBLISHED:
+ /* Send and wait for arrival of JOIN message if necessary */
+ if (!m) {
+ m = tipc_group_create_member(grp, node, port, instance,
+ MBR_PUBLISHED);
+ if (!m)
+ break;
+ tipc_group_update_member(m, 0);
+ tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq);
+ break;
+ }
+
+ if (m->state != MBR_JOINING)
+ break;
+
+ /* Member can be taken into service */
+ m->instance = instance;
+ m->state = MBR_JOINED;
+ tipc_group_open(m, usr_wakeup);
+ tipc_group_update_member(m, 0);
+ tipc_group_proto_xmit(grp, m, GRP_JOIN_MSG, xmitq);
+ tipc_group_create_event(grp, m, TIPC_PUBLISHED,
+ m->bc_syncpt, inputq);
+ break;
+ case TIPC_WITHDRAWN:
+ if (!m)
+ break;
+
+ tipc_group_decr_active(grp, m);
+ m->state = MBR_LEAVING;
+ list_del_init(&m->list);
+ tipc_group_open(m, usr_wakeup);
+
+ /* Only send event if no LEAVE message can be expected */
+ if (!tipc_node_is_up(net, node))
+ tipc_group_create_event(grp, m, TIPC_WITHDRAWN,
+ m->bc_rcv_nxt, inputq);
+ break;
+ default:
+ break;
+ }
+ *sk_rcvbuf = tipc_group_rcvbuf_limit(grp);
+}
+
+int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb)
+{
+ struct nlattr *group = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_GROUP);
+
+ if (!group)
+ return -EMSGSIZE;
+
+ if (nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_ID,
+ grp->type) ||
+ nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_INSTANCE,
+ grp->instance) ||
+ nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_BC_SEND_NEXT,
+ grp->bc_snd_nxt))
+ goto group_msg_cancel;
+
+ if (grp->scope == TIPC_NODE_SCOPE)
+ if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_NODE_SCOPE))
+ goto group_msg_cancel;
+
+ if (grp->scope == TIPC_CLUSTER_SCOPE)
+ if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_CLUSTER_SCOPE))
+ goto group_msg_cancel;
+
+ if (*grp->open)
+ if (nla_put_flag(skb, TIPC_NLA_SOCK_GROUP_OPEN))
+ goto group_msg_cancel;
+
+ nla_nest_end(skb, group);
+ return 0;
+
+group_msg_cancel:
+ nla_nest_cancel(skb, group);
+ return -1;
+}
diff --git a/net/tipc/group.h b/net/tipc/group.h
new file mode 100644
index 000000000000..ea4c3be64c78
--- /dev/null
+++ b/net/tipc/group.h
@@ -0,0 +1,77 @@
+/*
+ * net/tipc/group.h: Include file for TIPC group unicast/multicast functions
+ *
+ * Copyright (c) 2017, Ericsson AB
+ * Copyright (c) 2020, Red Hat Inc
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_GROUP_H
+#define _TIPC_GROUP_H
+
+#include "core.h"
+
+struct tipc_group;
+struct tipc_member;
+struct tipc_msg;
+
+struct tipc_group *tipc_group_create(struct net *net, u32 portid,
+ struct tipc_group_req *mreq,
+ bool *group_is_open);
+void tipc_group_join(struct net *net, struct tipc_group *grp, int *sk_rcv_buf);
+void tipc_group_delete(struct net *net, struct tipc_group *grp);
+void tipc_group_add_member(struct tipc_group *grp, u32 node,
+ u32 port, u32 instance);
+struct tipc_nlist *tipc_group_dests(struct tipc_group *grp);
+void tipc_group_self(struct tipc_group *grp, struct tipc_service_range *seq,
+ int *scope);
+u32 tipc_group_exclude(struct tipc_group *grp);
+void tipc_group_filter_msg(struct tipc_group *grp,
+ struct sk_buff_head *inputq,
+ struct sk_buff_head *xmitq);
+void tipc_group_member_evt(struct tipc_group *grp, bool *wakeup,
+ int *sk_rcvbuf, struct tipc_msg *hdr,
+ struct sk_buff_head *inputq,
+ struct sk_buff_head *xmitq);
+void tipc_group_proto_rcv(struct tipc_group *grp, bool *wakeup,
+ struct tipc_msg *hdr,
+ struct sk_buff_head *inputq,
+ struct sk_buff_head *xmitq);
+void tipc_group_update_bc_members(struct tipc_group *grp, int len, bool ack);
+bool tipc_group_cong(struct tipc_group *grp, u32 dnode, u32 dport,
+ int len, struct tipc_member **m);
+bool tipc_group_bc_cong(struct tipc_group *grp, int len);
+void tipc_group_update_rcv_win(struct tipc_group *grp, int blks, u32 node,
+ u32 port, struct sk_buff_head *xmitq);
+u16 tipc_group_bc_snd_nxt(struct tipc_group *grp);
+void tipc_group_update_member(struct tipc_member *m, int len);
+int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb);
+#endif
diff --git a/net/tipc/ib_media.c b/net/tipc/ib_media.c
index e8c16718e3fa..b9ad0434c3cd 100644
--- a/net/tipc/ib_media.c
+++ b/net/tipc/ib_media.c
@@ -42,6 +42,8 @@
#include "core.h"
#include "bearer.h"
+#define TIPC_MAX_IB_LINK_WIN 500
+
/* convert InfiniBand address (media address format) media address to string */
static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf,
int str_size)
@@ -65,7 +67,7 @@ static int tipc_ib_addr2msg(char *msg, struct tipc_media_addr *addr)
/* Convert raw InfiniBand address format to media addr format */
static int tipc_ib_raw2addr(struct tipc_bearer *b,
struct tipc_media_addr *addr,
- char *msg)
+ const char *msg)
{
memset(addr, 0, sizeof(*addr));
memcpy(addr->value, msg, INFINIBAND_ALEN);
@@ -94,7 +96,8 @@ struct tipc_media ib_media_info = {
.raw2addr = tipc_ib_raw2addr,
.priority = TIPC_DEF_LINK_PRI,
.tolerance = TIPC_DEF_LINK_TOL,
- .window = TIPC_DEF_LINK_WIN,
+ .min_win = TIPC_DEF_LINK_WIN,
+ .max_win = TIPC_MAX_IB_LINK_WIN,
.type_id = TIPC_MEDIA_TYPE_IB,
.hwaddr_len = INFINIBAND_ALEN,
.name = "ib"
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 60820dc35a08..931f55f781a1 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -43,6 +43,8 @@
#include "discover.h"
#include "netlink.h"
#include "monitor.h"
+#include "trace.h"
+#include "crypto.h"
#include <linux/pkt_sched.h>
@@ -80,10 +82,7 @@ struct tipc_stats {
* struct tipc_link - TIPC link data structure
* @addr: network address of link's peer node
* @name: link name character string
- * @media_addr: media address to use when sending messages over link
- * @timer: link timer
* @net: pointer to namespace struct
- * @refcnt: reference counter for permanent references (owner node & timer)
* @peer_session: link session # being used by peer end of link
* @peer_bearer_id: bearer id used by link's peer endpoint
* @bearer_id: local bearer id used by link
@@ -92,34 +91,48 @@ struct tipc_stats {
* @state: current state of link FSM
* @peer_caps: bitmap describing capabilities of peer node
* @silent_intv_cnt: # of timer intervals without any reception from peer
- * @proto_msg: template for control messages generated by link
- * @pmsg: convenience pointer to "proto_msg" field
* @priority: current link priority
* @net_plane: current link network plane ('A' through 'H')
* @mon_state: cookie with information needed by link monitor
- * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
- * @exp_msg_count: # of tunnelled messages expected during link changeover
- * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
* @mtu: current maximum packet size for this link
* @advertised_mtu: advertised own mtu when link is being established
- * @transmitq: queue for sent, non-acked messages
* @backlogq: queue for messages waiting to be sent
- * @snt_nxt: next sequence number to use for outbound messages
- * @last_retransmitted: sequence number of most recently retransmitted message
- * @stale_count: # of identical retransmit requests made by peer
* @ackers: # of peers that needs to ack each packet before it can be released
* @acked: # last packet acked by a certain peer. Used for broadcast.
* @rcv_nxt: next sequence number to expect for inbound messages
- * @deferred_queue: deferred queue saved OOS b'cast message received from node
- * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
* @inputq: buffer queue for messages to be delivered upwards
* @namedq: buffer queue for name table messages to be delivered upwards
- * @next_out: ptr to first unsent outbound message in queue
* @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
- * @long_msg_seq_no: next identifier to use for outbound fragmented messages
* @reasm_buf: head of partially reassembled inbound message fragments
- * @bc_rcvr: marks that this is a broadcast receiver link
* @stats: collects statistics regarding link activity
+ * @session: session to be used by link
+ * @snd_nxt_state: next send seq number
+ * @rcv_nxt_state: next rcv seq number
+ * @in_session: have received ACTIVATE_MSG from peer
+ * @active: link is active
+ * @if_name: associated interface name
+ * @rst_cnt: link reset counter
+ * @drop_point: seq number for failover handling (FIXME)
+ * @failover_reasm_skb: saved failover msg ptr (FIXME)
+ * @failover_deferdq: deferred message queue for failover processing (FIXME)
+ * @transmq: the link's transmit queue
+ * @backlog: link's backlog by priority (importance)
+ * @snd_nxt: next sequence number to be used
+ * @rcv_unacked: # messages read by user, but not yet acked back to peer
+ * @deferdq: deferred receive queue
+ * @window: sliding window size for congestion handling
+ * @min_win: minimal send window to be used by link
+ * @ssthresh: slow start threshold for congestion handling
+ * @max_win: maximal send window to be used by link
+ * @cong_acks: congestion acks for congestion avoidance (FIXME)
+ * @checkpoint: seq number for congestion window size handling
+ * @reasm_tnlmsg: fragmentation/reassembly area for tunnel protocol message
+ * @last_gap: last gap ack blocks for bcast (FIXME)
+ * @last_ga: ptr to gap ack blocks
+ * @bc_rcvlink: the peer specific link used for broadcast reception
+ * @bc_sndlink: the namespace global link used for broadcast sending
+ * @nack_state: bcast nack state
+ * @bc_peer_is_up: peer has acked the bcast init msg
*/
struct tipc_link {
u32 addr;
@@ -127,14 +140,17 @@ struct tipc_link {
struct net *net;
/* Management and link supervision data */
- u32 peer_session;
- u32 session;
+ u16 peer_session;
+ u16 session;
+ u16 snd_nxt_state;
+ u16 rcv_nxt_state;
u32 peer_bearer_id;
u32 bearer_id;
u32 tolerance;
u32 abort_limit;
u32 state;
u16 peer_caps;
+ bool in_session;
bool active;
u32 silent_intv_cnt;
char if_name[TIPC_MAX_IF_NAME];
@@ -146,6 +162,7 @@ struct tipc_link {
/* Failover/synch */
u16 drop_point;
struct sk_buff *failover_reasm_skb;
+ struct sk_buff_head failover_deferdq;
/* Max packet negotiation */
u16 mtu;
@@ -157,11 +174,9 @@ struct tipc_link {
struct {
u16 len;
u16 limit;
+ struct sk_buff *target_bskb;
} backlog[5];
u16 snd_nxt;
- u16 last_retransm;
- u16 window;
- u32 stale_count;
/* Reception */
u16 rcv_nxt;
@@ -172,18 +187,24 @@ struct tipc_link {
/* Congestion handling */
struct sk_buff_head wakeupq;
+ u16 window;
+ u16 min_win;
+ u16 ssthresh;
+ u16 max_win;
+ u16 cong_acks;
+ u16 checkpoint;
/* Fragmentation/reassembly */
struct sk_buff *reasm_buf;
+ struct sk_buff *reasm_tnlmsg;
/* Broadcast */
u16 ackers;
u16 acked;
+ u16 last_gap;
+ struct tipc_gap_ack_blks *last_ga;
struct tipc_link *bc_rcvlink;
struct tipc_link *bc_sndlink;
- unsigned long prev_retr;
- u16 prev_from;
- u16 prev_to;
u8 nack_state;
bool bc_peer_is_up;
@@ -205,17 +226,8 @@ enum {
BC_NACK_SND_SUPPRESS,
};
-#define TIPC_BC_RETR_LIMIT 10 /* [ms] */
-
-/*
- * Interval between NACKs when packets arrive out of order
- */
-#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
-
-/* Wildcard value for link session numbers. When it is known that
- * peer endpoint is down, any session number must be accepted.
- */
-#define ANY_SESSION 0x10000
+#define TIPC_BC_RETR_LIM (jiffies + msecs_to_jiffies(10))
+#define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
/* Link FSM states:
*/
@@ -229,31 +241,33 @@ enum {
LINK_SYNCHING = 0xc << 24
};
-/* Link FSM state checking routines
- */
-static int link_is_up(struct tipc_link *l)
-{
- return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
-}
-
static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *xmitq);
static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
- u16 rcvgap, int tolerance, int priority,
+ bool probe_reply, u16 rcvgap,
+ int tolerance, int priority,
struct sk_buff_head *xmitq);
static void link_print(struct tipc_link *l, const char *str);
static int tipc_link_build_nack_msg(struct tipc_link *l,
struct sk_buff_head *xmitq);
static void tipc_link_build_bc_init_msg(struct tipc_link *l,
struct sk_buff_head *xmitq);
-static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
-
+static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
+ struct tipc_link *l, u8 start_index);
+static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr);
+static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
+ u16 acked, u16 gap,
+ struct tipc_gap_ack_blks *ga,
+ struct sk_buff_head *xmitq,
+ bool *retransmitted, int *rc);
+static void tipc_link_update_cwin(struct tipc_link *l, int released,
+ bool retransmitted);
/*
* Simple non-static link routines (i.e. referenced outside this file)
*/
bool tipc_link_is_up(struct tipc_link *l)
{
- return link_is_up(l);
+ return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
}
bool tipc_link_peer_is_down(struct tipc_link *l)
@@ -296,11 +310,6 @@ static bool link_is_bc_rcvlink(struct tipc_link *l)
return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
}
-int tipc_link_is_active(struct tipc_link *l)
-{
- return l->active;
-}
-
void tipc_link_set_active(struct tipc_link *l, bool active)
{
l->active = active;
@@ -311,9 +320,14 @@ u32 tipc_link_id(struct tipc_link *l)
return l->peer_bearer_id << 16 | l->bearer_id;
}
-int tipc_link_window(struct tipc_link *l)
+int tipc_link_min_win(struct tipc_link *l)
{
- return l->window;
+ return l->min_win;
+}
+
+int tipc_link_max_win(struct tipc_link *l)
+{
+ return l->max_win;
}
int tipc_link_prio(struct tipc_link *l)
@@ -336,6 +350,16 @@ char tipc_link_plane(struct tipc_link *l)
return l->net_plane;
}
+struct net *tipc_link_net(struct tipc_link *l)
+{
+ return l->net;
+}
+
+void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
+{
+ l->peer_caps = capabilities;
+}
+
void tipc_link_add_bc_peer(struct tipc_link *snd_l,
struct tipc_link *uc_l,
struct sk_buff_head *xmitq)
@@ -357,10 +381,12 @@ void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
snd_l->ackers--;
rcv_l->bc_peer_is_up = true;
rcv_l->state = LINK_ESTABLISHED;
- tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
+ tipc_link_bc_ack_rcv(rcv_l, ack, 0, NULL, xmitq, NULL);
+ trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
tipc_link_reset(rcv_l);
rcv_l->state = LINK_RESET;
if (!snd_l->ackers) {
+ trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
tipc_link_reset(snd_l);
snd_l->state = LINK_RESET;
__skb_queue_purge(xmitq);
@@ -372,7 +398,7 @@ int tipc_link_bc_peers(struct tipc_link *l)
return l->ackers;
}
-u16 link_bc_rcv_gap(struct tipc_link *l)
+static u16 link_bc_rcv_gap(struct tipc_link *l)
{
struct sk_buff *skb = skb_peek(&l->deferdq);
u16 gap = 0;
@@ -394,6 +420,15 @@ int tipc_link_mtu(struct tipc_link *l)
return l->mtu;
}
+int tipc_link_mss(struct tipc_link *l)
+{
+#ifdef CONFIG_TIPC_CRYPTO
+ return l->mtu - INT_H_SIZE - EMSG_OVERHEAD;
+#else
+ return l->mtu - INT_H_SIZE;
+#endif
+}
+
u16 tipc_link_rcv_nxt(struct tipc_link *l)
{
return l->rcv_nxt;
@@ -409,18 +444,23 @@ char *tipc_link_name(struct tipc_link *l)
return l->name;
}
+u32 tipc_link_state(struct tipc_link *l)
+{
+ return l->state;
+}
+
/**
* tipc_link_create - create a new link
- * @n: pointer to associated node
+ * @net: pointer to associated network namespace
* @if_name: associated interface name
* @bearer_id: id (index) of associated bearer
* @tolerance: link tolerance to be used by link
* @net_plane: network plane (A,B,c..) this link belongs to
* @mtu: mtu to be advertised by link
* @priority: priority to be used by link
- * @window: send window to be used by link
+ * @min_win: minimal send window to be used by link
+ * @max_win: maximal send window to be used by link
* @session: session to be used by link
- * @ownnode: identity of own node
* @peer: node id of peer node
* @peer_caps: bitmap describing peer node capabilities
* @bc_sndlink: the namespace global link used for broadcast sending
@@ -428,19 +468,23 @@ char *tipc_link_name(struct tipc_link *l)
* @inputq: queue to put messages ready for delivery
* @namedq: queue to put binding table update messages ready for delivery
* @link: return value, pointer to put the created link
+ * @self: local unicast link id
+ * @peer_id: 128-bit ID of peer
*
- * Returns true if link was created, otherwise false
+ * Return: true if link was created, otherwise false
*/
bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
int tolerance, char net_plane, u32 mtu, int priority,
- int window, u32 session, u32 ownnode, u32 peer,
- u16 peer_caps,
+ u32 min_win, u32 max_win, u32 session, u32 self,
+ u32 peer, u8 *peer_id, u16 peer_caps,
struct tipc_link *bc_sndlink,
struct tipc_link *bc_rcvlink,
struct sk_buff_head *inputq,
struct sk_buff_head *namedq,
struct tipc_link **link)
{
+ char peer_str[NODE_ID_STR_LEN] = {0,};
+ char self_str[NODE_ID_STR_LEN] = {0,};
struct tipc_link *l;
l = kzalloc(sizeof(*l), GFP_ATOMIC);
@@ -449,22 +493,31 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
*link = l;
l->session = session;
- /* Note: peer i/f name is completed by reset/activate message */
- sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
- tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
- if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
+ /* Set link name for unicast links only */
+ if (peer_id) {
+ if (tipc_nodeid2string(self_str, tipc_own_id(net)) > NODE_ID_LEN)
+ sprintf(self_str, "%x", self);
+ if (tipc_nodeid2string(peer_str, peer_id) > NODE_ID_LEN)
+ sprintf(peer_str, "%x", peer);
+ }
+ /* Peer i/f name will be completed by reset/activate message */
+ snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
+ self_str, if_name, peer_str);
+
strcpy(l->if_name, if_name);
l->addr = peer;
l->peer_caps = peer_caps;
l->net = net;
- l->peer_session = ANY_SESSION;
+ l->in_session = false;
l->bearer_id = bearer_id;
l->tolerance = tolerance;
+ if (bc_rcvlink)
+ bc_rcvlink->tolerance = tolerance;
l->net_plane = net_plane;
l->advertised_mtu = mtu;
l->mtu = mtu;
l->priority = priority;
- tipc_link_set_queue_limits(l, window);
+ tipc_link_set_queue_limits(l, min_win, max_win);
l->ackers = 1;
l->bc_sndlink = bc_sndlink;
l->bc_rcvlink = bc_rcvlink;
@@ -474,6 +527,7 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
__skb_queue_head_init(&l->transmq);
__skb_queue_head_init(&l->backlogq);
__skb_queue_head_init(&l->deferdq);
+ __skb_queue_head_init(&l->failover_deferdq);
skb_queue_head_init(&l->wakeupq);
skb_queue_head_init(l->inputq);
return true;
@@ -481,17 +535,23 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
/**
* tipc_link_bc_create - create new link to be used for broadcast
- * @n: pointer to associated node
- * @mtu: mtu to be used
- * @window: send window to be used
+ * @net: pointer to associated network namespace
+ * @mtu: mtu to be used initially if no peers
+ * @min_win: minimal send window to be used by link
+ * @max_win: maximal send window to be used by link
* @inputq: queue to put messages ready for delivery
* @namedq: queue to put binding table update messages ready for delivery
* @link: return value, pointer to put the created link
+ * @ownnode: identity of own node
+ * @peer: node id of peer node
+ * @peer_id: 128-bit ID of peer
+ * @peer_caps: bitmap describing peer node capabilities
+ * @bc_sndlink: the namespace global link used for broadcast sending
*
- * Returns true if link was created, otherwise false
+ * Return: true if link was created, otherwise false
*/
-bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
- int mtu, int window, u16 peer_caps,
+bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, u8 *peer_id,
+ int mtu, u32 min_win, u32 max_win, u16 peer_caps,
struct sk_buff_head *inputq,
struct sk_buff_head *namedq,
struct tipc_link *bc_sndlink,
@@ -499,13 +559,24 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
{
struct tipc_link *l;
- if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
- 0, ownnode, peer, peer_caps, bc_sndlink,
- NULL, inputq, namedq, link))
+ if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, min_win,
+ max_win, 0, ownnode, peer, NULL, peer_caps,
+ bc_sndlink, NULL, inputq, namedq, link))
return false;
l = *link;
- strcpy(l->name, tipc_bclink_name);
+ if (peer_id) {
+ char peer_str[NODE_ID_STR_LEN] = {0,};
+
+ if (tipc_nodeid2string(peer_str, peer_id) > NODE_ID_LEN)
+ sprintf(peer_str, "%x", peer);
+ /* Broadcast receiver link name: "broadcast-link:<peer>" */
+ snprintf(l->name, sizeof(l->name), "%s:%s", tipc_bclink_name,
+ peer_str);
+ } else {
+ strcpy(l->name, tipc_bclink_name);
+ }
+ trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
tipc_link_reset(l);
l->state = LINK_RESET;
l->ackers = 0;
@@ -517,7 +588,7 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
/* Disable replicast if even a single peer doesn't support it */
if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
- tipc_bcast_disable_rcast(net);
+ tipc_bcast_toggle_rcast(net, false);
return true;
}
@@ -530,6 +601,7 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
int tipc_link_fsm_evt(struct tipc_link *l, int evt)
{
int rc = 0;
+ int old_state = l->state;
switch (l->state) {
case LINK_RESETTING:
@@ -557,6 +629,7 @@ int tipc_link_fsm_evt(struct tipc_link *l, int evt)
break;
case LINK_FAILOVER_BEGIN_EVT:
l->state = LINK_FAILINGOVER;
+ break;
case LINK_FAILURE_EVT:
case LINK_RESET_EVT:
case LINK_ESTABLISH_EVT:
@@ -676,10 +749,12 @@ int tipc_link_fsm_evt(struct tipc_link *l, int evt)
default:
pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
}
+ trace_tipc_link_fsm(l->name, old_state, l->state, evt);
return rc;
illegal_evt:
pr_err("Illegal FSM event %x in state %x on link %s\n",
evt, l->state, l->name);
+ trace_tipc_link_fsm(l->name, old_state, l->state, evt);
return rc;
}
@@ -704,7 +779,7 @@ static void link_profile_stats(struct tipc_link *l)
if (msg_user(msg) == MSG_FRAGMENTER) {
if (msg_type(msg) != FIRST_FRAGMENT)
return;
- length = msg_size(msg_get_wrapped(msg));
+ length = msg_size(msg_inner_hdr(msg));
}
l->stats.msg_lengths_total += length;
l->stats.msg_length_counts++;
@@ -724,6 +799,18 @@ static void link_profile_stats(struct tipc_link *l)
l->stats.msg_length_profile[6]++;
}
+/**
+ * tipc_link_too_silent - check if link is "too silent"
+ * @l: tipc link to be checked
+ *
+ * Return: true if the link 'silent_intv_cnt' is about to reach the
+ * 'abort_limit' value, otherwise false
+ */
+bool tipc_link_too_silent(struct tipc_link *l)
+{
+ return (l->silent_intv_cnt + 2 > l->abort_limit);
+}
+
/* tipc_link_timeout - perform periodic task as instructed from node timeout
*/
int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
@@ -737,6 +824,8 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
u16 bc_acked = l->bc_rcvlink->acked;
struct tipc_mon_state *mstate = &l->mon_state;
+ trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
+ trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
switch (l->state) {
case LINK_ESTABLISHED:
case LINK_SYNCHING:
@@ -749,11 +838,16 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
state |= l->bc_rcvlink->rcv_unacked;
state |= l->rcv_unacked;
state |= !skb_queue_empty(&l->transmq);
- state |= !skb_queue_empty(&l->deferdq);
probe = mstate->probing;
probe |= l->silent_intv_cnt;
if (probe || mstate->monitoring)
l->silent_intv_cnt++;
+ probe |= !skb_queue_empty(&l->deferdq);
+ if (l->snd_nxt == l->checkpoint) {
+ tipc_link_update_cwin(l, 0, 0);
+ probe = true;
+ }
+ l->checkpoint = l->snd_nxt;
break;
case LINK_RESET:
setup = l->rst_cnt++ <= 4;
@@ -773,7 +867,7 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
}
if (state || probe || setup)
- tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, xmitq);
+ tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
return rc;
}
@@ -799,6 +893,7 @@ static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
skb_queue_tail(&l->wakeupq, skb);
l->stats.link_congs++;
+ trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
return -ELINKCONG;
}
@@ -808,47 +903,96 @@ static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
* Wake up a number of waiting users, as permitted by available space
* in the send queue
*/
-void link_prepare_wakeup(struct tipc_link *l)
+static void link_prepare_wakeup(struct tipc_link *l)
{
+ struct sk_buff_head *wakeupq = &l->wakeupq;
+ struct sk_buff_head *inputq = l->inputq;
struct sk_buff *skb, *tmp;
- int imp, i = 0;
+ struct sk_buff_head tmpq;
+ int avail[5] = {0,};
+ int imp = 0;
+
+ __skb_queue_head_init(&tmpq);
- skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
+ for (; imp <= TIPC_SYSTEM_IMPORTANCE; imp++)
+ avail[imp] = l->backlog[imp].limit - l->backlog[imp].len;
+
+ skb_queue_walk_safe(wakeupq, skb, tmp) {
imp = TIPC_SKB_CB(skb)->chain_imp;
- if (l->backlog[imp].len < l->backlog[imp].limit) {
- skb_unlink(skb, &l->wakeupq);
- skb_queue_tail(l->inputq, skb);
- } else if (i++ > 10) {
- break;
- }
+ if (avail[imp] <= 0)
+ continue;
+ avail[imp]--;
+ __skb_unlink(skb, wakeupq);
+ __skb_queue_tail(&tmpq, skb);
}
+
+ spin_lock_bh(&inputq->lock);
+ skb_queue_splice_tail(&tmpq, inputq);
+ spin_unlock_bh(&inputq->lock);
+
+}
+
+/**
+ * tipc_link_set_skb_retransmit_time - set the time at which retransmission of
+ * the given skb should be next attempted
+ * @skb: skb to set a future retransmission time for
+ * @l: link the skb will be transmitted on
+ */
+static void tipc_link_set_skb_retransmit_time(struct sk_buff *skb,
+ struct tipc_link *l)
+{
+ if (link_is_bc_sndlink(l))
+ TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
+ else
+ TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
}
void tipc_link_reset(struct tipc_link *l)
{
- l->peer_session = ANY_SESSION;
+ struct sk_buff_head list;
+ u32 imp;
+
+ __skb_queue_head_init(&list);
+
+ l->in_session = false;
+ /* Force re-synch of peer session number before establishing */
+ l->peer_session--;
l->session++;
l->mtu = l->advertised_mtu;
+
+ spin_lock_bh(&l->wakeupq.lock);
+ skb_queue_splice_init(&l->wakeupq, &list);
+ spin_unlock_bh(&l->wakeupq.lock);
+
+ spin_lock_bh(&l->inputq->lock);
+ skb_queue_splice_init(&list, l->inputq);
+ spin_unlock_bh(&l->inputq->lock);
+
__skb_queue_purge(&l->transmq);
__skb_queue_purge(&l->deferdq);
- skb_queue_splice_init(&l->wakeupq, l->inputq);
__skb_queue_purge(&l->backlogq);
- l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
- l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
- l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
- l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
- l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
+ __skb_queue_purge(&l->failover_deferdq);
+ for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
+ l->backlog[imp].len = 0;
+ l->backlog[imp].target_bskb = NULL;
+ }
kfree_skb(l->reasm_buf);
+ kfree_skb(l->reasm_tnlmsg);
kfree_skb(l->failover_reasm_skb);
l->reasm_buf = NULL;
+ l->reasm_tnlmsg = NULL;
l->failover_reasm_skb = NULL;
l->rcv_unacked = 0;
l->snd_nxt = 1;
l->rcv_nxt = 1;
+ l->snd_nxt_state = 1;
+ l->rcv_nxt_state = 1;
l->acked = 0;
+ l->last_gap = 0;
+ kfree(l->last_ga);
+ l->last_ga = NULL;
l->silent_intv_cnt = 0;
l->rst_cnt = 0;
- l->stale_count = 0;
l->bc_peer_is_up = false;
memset(&l->mon_state, 0, sizeof(l->mon_state));
tipc_link_reset_stats(l);
@@ -856,39 +1000,50 @@ void tipc_link_reset(struct tipc_link *l)
/**
* tipc_link_xmit(): enqueue buffer list according to queue situation
- * @link: link to use
+ * @l: link to use
* @list: chain of buffers containing message
* @xmitq: returned list of packets to be sent by caller
*
* Consumes the buffer chain.
- * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
* Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
+ * Return: 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
*/
int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
struct sk_buff_head *xmitq)
{
- struct tipc_msg *hdr = buf_msg(skb_peek(list));
- unsigned int maxwin = l->window;
- int imp = msg_importance(hdr);
- unsigned int mtu = l->mtu;
+ struct sk_buff_head *backlogq = &l->backlogq;
+ struct sk_buff_head *transmq = &l->transmq;
+ struct sk_buff *skb, *_skb;
+ u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
u16 ack = l->rcv_nxt - 1;
u16 seqno = l->snd_nxt;
- u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
- struct sk_buff_head *transmq = &l->transmq;
- struct sk_buff_head *backlogq = &l->backlogq;
- struct sk_buff *skb, *_skb, *bskb;
int pkt_cnt = skb_queue_len(list);
+ unsigned int mss = tipc_link_mss(l);
+ unsigned int cwin = l->window;
+ unsigned int mtu = l->mtu;
+ struct tipc_msg *hdr;
+ bool new_bundle;
int rc = 0;
+ int imp;
+ if (pkt_cnt <= 0)
+ return 0;
+
+ hdr = buf_msg(skb_peek(list));
if (unlikely(msg_size(hdr) > mtu)) {
- skb_queue_purge(list);
+ pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
+ skb_queue_len(list), msg_user(hdr),
+ msg_type(hdr), msg_size(hdr), mtu);
+ __skb_queue_purge(list);
return -EMSGSIZE;
}
+ imp = msg_importance(hdr);
/* Allow oversubscription of one data msg per source at congestion */
if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
if (imp == TIPC_SYSTEM_IMPORTANCE) {
pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
+ __skb_queue_purge(list);
return -ENOBUFS;
}
rc = link_schedule_user(l, hdr);
@@ -900,21 +1055,20 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
}
/* Prepare each packet for sending, and add to relevant queue: */
- while (skb_queue_len(list)) {
- skb = skb_peek(list);
- hdr = buf_msg(skb);
- msg_set_seqno(hdr, seqno);
- msg_set_ack(hdr, ack);
- msg_set_bcast_ack(hdr, bc_ack);
-
- if (likely(skb_queue_len(transmq) < maxwin)) {
+ while ((skb = __skb_dequeue(list))) {
+ if (likely(skb_queue_len(transmq) < cwin)) {
+ hdr = buf_msg(skb);
+ msg_set_seqno(hdr, seqno);
+ msg_set_ack(hdr, ack);
+ msg_set_bcast_ack(hdr, bc_ack);
_skb = skb_clone(skb, GFP_ATOMIC);
if (!_skb) {
- skb_queue_purge(list);
+ kfree_skb(skb);
+ __skb_queue_purge(list);
return -ENOBUFS;
}
- __skb_dequeue(list);
__skb_queue_tail(transmq, skb);
+ tipc_link_set_skb_retransmit_time(skb, l);
__skb_queue_tail(xmitq, _skb);
TIPC_SKB_CB(skb)->ackers = l->ackers;
l->rcv_unacked = 0;
@@ -922,35 +1076,86 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
seqno++;
continue;
}
- if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
- kfree_skb(__skb_dequeue(list));
- l->stats.sent_bundled++;
- continue;
- }
- if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
- kfree_skb(__skb_dequeue(list));
- __skb_queue_tail(backlogq, bskb);
- l->backlog[msg_importance(buf_msg(bskb))].len++;
- l->stats.sent_bundled++;
- l->stats.sent_bundles++;
+ if (tipc_msg_try_bundle(l->backlog[imp].target_bskb, &skb,
+ mss, l->addr, &new_bundle)) {
+ if (skb) {
+ /* Keep a ref. to the skb for next try */
+ l->backlog[imp].target_bskb = skb;
+ l->backlog[imp].len++;
+ __skb_queue_tail(backlogq, skb);
+ } else {
+ if (new_bundle) {
+ l->stats.sent_bundles++;
+ l->stats.sent_bundled++;
+ }
+ l->stats.sent_bundled++;
+ }
continue;
}
- l->backlog[imp].len += skb_queue_len(list);
+ l->backlog[imp].target_bskb = NULL;
+ l->backlog[imp].len += (1 + skb_queue_len(list));
+ __skb_queue_tail(backlogq, skb);
skb_queue_splice_tail_init(list, backlogq);
}
l->snd_nxt = seqno;
return rc;
}
-void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
+static void tipc_link_update_cwin(struct tipc_link *l, int released,
+ bool retransmitted)
+{
+ int bklog_len = skb_queue_len(&l->backlogq);
+ struct sk_buff_head *txq = &l->transmq;
+ int txq_len = skb_queue_len(txq);
+ u16 cwin = l->window;
+
+ /* Enter fast recovery */
+ if (unlikely(retransmitted)) {
+ l->ssthresh = max_t(u16, l->window / 2, 300);
+ l->window = min_t(u16, l->ssthresh, l->window);
+ return;
+ }
+ /* Enter slow start */
+ if (unlikely(!released)) {
+ l->ssthresh = max_t(u16, l->window / 2, 300);
+ l->window = l->min_win;
+ return;
+ }
+ /* Don't increase window if no pressure on the transmit queue */
+ if (txq_len + bklog_len < cwin)
+ return;
+
+ /* Don't increase window if there are holes the transmit queue */
+ if (txq_len && l->snd_nxt - buf_seqno(skb_peek(txq)) != txq_len)
+ return;
+
+ l->cong_acks += released;
+
+ /* Slow start */
+ if (cwin <= l->ssthresh) {
+ l->window = min_t(u16, cwin + released, l->max_win);
+ return;
+ }
+ /* Congestion avoidance */
+ if (l->cong_acks < cwin)
+ return;
+ l->window = min_t(u16, ++cwin, l->max_win);
+ l->cong_acks = 0;
+}
+
+static void tipc_link_advance_backlog(struct tipc_link *l,
+ struct sk_buff_head *xmitq)
{
+ u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
+ struct sk_buff_head *txq = &l->transmq;
struct sk_buff *skb, *_skb;
- struct tipc_msg *hdr;
- u16 seqno = l->snd_nxt;
u16 ack = l->rcv_nxt - 1;
- u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
+ u16 seqno = l->snd_nxt;
+ struct tipc_msg *hdr;
+ u16 cwin = l->window;
+ u32 imp;
- while (skb_queue_len(&l->transmq) < l->window) {
+ while (skb_queue_len(txq) < cwin) {
skb = skb_peek(&l->backlogq);
if (!skb)
break;
@@ -959,8 +1164,13 @@ void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
break;
__skb_dequeue(&l->backlogq);
hdr = buf_msg(skb);
- l->backlog[msg_importance(hdr)].len--;
+ imp = msg_importance(hdr);
+ l->backlog[imp].len--;
+ if (unlikely(skb == l->backlog[imp].target_bskb))
+ l->backlog[imp].target_bskb = NULL;
__skb_queue_tail(&l->transmq, skb);
+ tipc_link_set_skb_retransmit_time(skb, l);
+
__skb_queue_tail(xmitq, _skb);
TIPC_SKB_CB(skb)->ackers = l->ackers;
msg_set_seqno(hdr, seqno);
@@ -973,59 +1183,57 @@ void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
l->snd_nxt = seqno;
}
-static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
-{
- struct tipc_msg *hdr = buf_msg(skb);
-
- pr_warn("Retransmission failure on link <%s>\n", l->name);
- link_print(l, "Resetting link ");
- pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
- msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
- pr_info("sqno %u, prev: %x, src: %x\n",
- msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
-}
-
-int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to,
- struct sk_buff_head *xmitq)
+/**
+ * link_retransmit_failure() - Detect repeated retransmit failures
+ * @l: tipc link sender
+ * @r: tipc link receiver (= l in case of unicast)
+ * @rc: returned code
+ *
+ * Return: true if the repeated retransmit failures happens, otherwise
+ * false
+ */
+static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
+ int *rc)
{
- struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
+ struct sk_buff *skb = skb_peek(&l->transmq);
struct tipc_msg *hdr;
- u16 ack = l->rcv_nxt - 1;
- u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
if (!skb)
- return 0;
+ return false;
- /* Detect repeated retransmit failures on same packet */
- if (likely(l->last_retransm != buf_seqno(skb))) {
- l->last_retransm = buf_seqno(skb);
- l->stale_count = 1;
- } else if (++l->stale_count > 100) {
- link_retransmit_failure(l, skb);
- return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
- }
+ if (!TIPC_SKB_CB(skb)->retr_cnt)
+ return false;
- /* Move forward to where retransmission should start */
- skb_queue_walk(&l->transmq, skb) {
- if (!less(buf_seqno(skb), from))
- break;
- }
+ if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
+ msecs_to_jiffies(r->tolerance * 10)))
+ return false;
- skb_queue_walk_from(&l->transmq, skb) {
- if (more(buf_seqno(skb), to))
- break;
- hdr = buf_msg(skb);
- _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
- if (!_skb)
- return 0;
- hdr = buf_msg(_skb);
- msg_set_ack(hdr, ack);
- msg_set_bcast_ack(hdr, bc_ack);
- _skb->priority = TC_PRIO_CONTROL;
- __skb_queue_tail(xmitq, _skb);
- l->stats.retransmitted++;
+ hdr = buf_msg(skb);
+ if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr)))
+ return false;
+
+ pr_warn("Retransmission failure on link <%s>\n", l->name);
+ link_print(l, "State of link ");
+ pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
+ msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
+ pr_info("sqno %u, prev: %x, dest: %x\n",
+ msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr));
+ pr_info("retr_stamp %d, retr_cnt %d\n",
+ jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
+ TIPC_SKB_CB(skb)->retr_cnt);
+
+ trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
+ trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
+ trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
+
+ if (link_is_bc_sndlink(l)) {
+ r->state = LINK_RESET;
+ *rc |= TIPC_LINK_DOWN_EVT;
+ } else {
+ *rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
}
- return 0;
+
+ return true;
}
/* tipc_data_input - deliver data and name distr msgs to upper layer
@@ -1036,6 +1244,7 @@ int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to,
static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *inputq)
{
+ struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
struct tipc_msg *hdr = buf_msg(skb);
switch (msg_user(hdr)) {
@@ -1043,13 +1252,17 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
case TIPC_MEDIUM_IMPORTANCE:
case TIPC_HIGH_IMPORTANCE:
case TIPC_CRITICAL_IMPORTANCE:
- if (unlikely(msg_type(hdr) == TIPC_MCAST_MSG)) {
- skb_queue_tail(l->bc_rcvlink->inputq, skb);
+ if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
+ skb_queue_tail(mc_inputq, skb);
return true;
}
+ fallthrough;
case CONN_MANAGER:
skb_queue_tail(inputq, skb);
return true;
+ case GROUP_PROTOCOL:
+ skb_queue_tail(mc_inputq, skb);
+ return true;
case NAME_DISTRIBUTOR:
l->bc_rcvlink->state = LINK_ESTABLISHED;
skb_queue_tail(l->namedq, skb);
@@ -1059,11 +1272,20 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
case MSG_FRAGMENTER:
case BCAST_PROTOCOL:
return false;
+#ifdef CONFIG_TIPC_CRYPTO
+ case MSG_CRYPTO:
+ if (sysctl_tipc_key_exchange_enabled &&
+ TIPC_SKB_CB(skb)->decrypted) {
+ tipc_crypto_msg_rcv(l->net, skb);
+ return true;
+ }
+ fallthrough;
+#endif
default:
pr_warn("Dropping received illegal msg type\n");
kfree_skb(skb);
- return false;
- };
+ return true;
+ }
}
/* tipc_link_input - process packet that has passed link protocol check
@@ -1071,34 +1293,14 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
* Consumes buffer
*/
static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
- struct sk_buff_head *inputq)
+ struct sk_buff_head *inputq,
+ struct sk_buff **reasm_skb)
{
struct tipc_msg *hdr = buf_msg(skb);
- struct sk_buff **reasm_skb = &l->reasm_buf;
struct sk_buff *iskb;
struct sk_buff_head tmpq;
int usr = msg_user(hdr);
- int rc = 0;
int pos = 0;
- int ipos = 0;
-
- if (unlikely(usr == TUNNEL_PROTOCOL)) {
- if (msg_type(hdr) == SYNCH_MSG) {
- __skb_queue_purge(&l->deferdq);
- goto drop;
- }
- if (!tipc_msg_extract(skb, &iskb, &ipos))
- return rc;
- kfree_skb(skb);
- skb = iskb;
- hdr = buf_msg(skb);
- if (less(msg_seqno(hdr), l->drop_point))
- goto drop;
- if (tipc_data_input(l, skb, inputq))
- return rc;
- usr = msg_user(hdr);
- reasm_skb = &l->failover_reasm_skb;
- }
if (usr == MSG_BUNDLER) {
skb_queue_head_init(&tmpq);
@@ -1123,24 +1325,359 @@ static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
tipc_bcast_unlock(l->net);
}
-drop:
+
kfree_skb(skb);
return 0;
}
-static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
+/* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the
+ * inner message along with the ones in the old link's
+ * deferdq
+ * @l: tunnel link
+ * @skb: TUNNEL_PROTOCOL message
+ * @inputq: queue to put messages ready for delivery
+ */
+static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
+ struct sk_buff_head *inputq)
{
- bool released = false;
- struct sk_buff *skb, *tmp;
+ struct sk_buff **reasm_skb = &l->failover_reasm_skb;
+ struct sk_buff **reasm_tnlmsg = &l->reasm_tnlmsg;
+ struct sk_buff_head *fdefq = &l->failover_deferdq;
+ struct tipc_msg *hdr = buf_msg(skb);
+ struct sk_buff *iskb;
+ int ipos = 0;
+ int rc = 0;
+ u16 seqno;
- skb_queue_walk_safe(&l->transmq, skb, tmp) {
- if (more(buf_seqno(skb), acked))
- break;
- __skb_unlink(skb, &l->transmq);
+ if (msg_type(hdr) == SYNCH_MSG) {
+ kfree_skb(skb);
+ return 0;
+ }
+
+ /* Not a fragment? */
+ if (likely(!msg_nof_fragms(hdr))) {
+ if (unlikely(!tipc_msg_extract(skb, &iskb, &ipos))) {
+ pr_warn_ratelimited("Unable to extract msg, defq: %d\n",
+ skb_queue_len(fdefq));
+ return 0;
+ }
kfree_skb(skb);
- released = true;
+ } else {
+ /* Set fragment type for buf_append */
+ if (msg_fragm_no(hdr) == 1)
+ msg_set_type(hdr, FIRST_FRAGMENT);
+ else if (msg_fragm_no(hdr) < msg_nof_fragms(hdr))
+ msg_set_type(hdr, FRAGMENT);
+ else
+ msg_set_type(hdr, LAST_FRAGMENT);
+
+ if (!tipc_buf_append(reasm_tnlmsg, &skb)) {
+ /* Successful but non-complete reassembly? */
+ if (*reasm_tnlmsg || link_is_bc_rcvlink(l))
+ return 0;
+ pr_warn_ratelimited("Unable to reassemble tunnel msg\n");
+ return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+ }
+ iskb = skb;
}
- return released;
+
+ do {
+ seqno = buf_seqno(iskb);
+ if (unlikely(less(seqno, l->drop_point))) {
+ kfree_skb(iskb);
+ continue;
+ }
+ if (unlikely(seqno != l->drop_point)) {
+ __tipc_skb_queue_sorted(fdefq, seqno, iskb);
+ continue;
+ }
+
+ l->drop_point++;
+ if (!tipc_data_input(l, iskb, inputq))
+ rc |= tipc_link_input(l, iskb, inputq, reasm_skb);
+ if (unlikely(rc))
+ break;
+ } while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point)));
+
+ return rc;
+}
+
+/**
+ * tipc_get_gap_ack_blks - get Gap ACK blocks from PROTOCOL/STATE_MSG
+ * @ga: returned pointer to the Gap ACK blocks if any
+ * @l: the tipc link
+ * @hdr: the PROTOCOL/STATE_MSG header
+ * @uc: desired Gap ACK blocks type, i.e. unicast (= 1) or broadcast (= 0)
+ *
+ * Return: the total Gap ACK blocks size
+ */
+u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
+ struct tipc_msg *hdr, bool uc)
+{
+ struct tipc_gap_ack_blks *p;
+ u16 sz = 0;
+
+ /* Does peer support the Gap ACK blocks feature? */
+ if (l->peer_caps & TIPC_GAP_ACK_BLOCK) {
+ p = (struct tipc_gap_ack_blks *)msg_data(hdr);
+ sz = ntohs(p->len);
+ /* Sanity check */
+ if (sz == struct_size(p, gacks, size_add(p->ugack_cnt, p->bgack_cnt))) {
+ /* Good, check if the desired type exists */
+ if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt))
+ goto ok;
+ /* Backward compatible: peer might not support bc, but uc? */
+ } else if (uc && sz == struct_size(p, gacks, p->ugack_cnt)) {
+ if (p->ugack_cnt) {
+ p->bgack_cnt = 0;
+ goto ok;
+ }
+ }
+ }
+ /* Other cases: ignore! */
+ p = NULL;
+
+ok:
+ *ga = p;
+ return sz;
+}
+
+static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
+ struct tipc_link *l, u8 start_index)
+{
+ struct tipc_gap_ack *gacks = &ga->gacks[start_index];
+ struct sk_buff *skb = skb_peek(&l->deferdq);
+ u16 expect, seqno = 0;
+ u8 n = 0;
+
+ if (!skb)
+ return 0;
+
+ expect = buf_seqno(skb);
+ skb_queue_walk(&l->deferdq, skb) {
+ seqno = buf_seqno(skb);
+ if (unlikely(more(seqno, expect))) {
+ gacks[n].ack = htons(expect - 1);
+ gacks[n].gap = htons(seqno - expect);
+ if (++n >= MAX_GAP_ACK_BLKS / 2) {
+ pr_info_ratelimited("Gacks on %s: %d, ql: %d!\n",
+ l->name, n,
+ skb_queue_len(&l->deferdq));
+ return n;
+ }
+ } else if (unlikely(less(seqno, expect))) {
+ pr_warn("Unexpected skb in deferdq!\n");
+ continue;
+ }
+ expect = seqno + 1;
+ }
+
+ /* last block */
+ gacks[n].ack = htons(seqno);
+ gacks[n].gap = 0;
+ n++;
+ return n;
+}
+
+/* tipc_build_gap_ack_blks - build Gap ACK blocks
+ * @l: tipc unicast link
+ * @hdr: the tipc message buffer to store the Gap ACK blocks after built
+ *
+ * The function builds Gap ACK blocks for both the unicast & broadcast receiver
+ * links of a certain peer, the buffer after built has the network data format
+ * as found at the struct tipc_gap_ack_blks definition.
+ *
+ * returns the actual allocated memory size
+ */
+static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr)
+{
+ struct tipc_link *bcl = l->bc_rcvlink;
+ struct tipc_gap_ack_blks *ga;
+ u16 len;
+
+ ga = (struct tipc_gap_ack_blks *)msg_data(hdr);
+
+ /* Start with broadcast link first */
+ tipc_bcast_lock(bcl->net);
+ msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
+ msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
+ ga->bgack_cnt = __tipc_build_gap_ack_blks(ga, bcl, 0);
+ tipc_bcast_unlock(bcl->net);
+
+ /* Now for unicast link, but an explicit NACK only (???) */
+ ga->ugack_cnt = (msg_seq_gap(hdr)) ?
+ __tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0;
+
+ /* Total len */
+ len = struct_size(ga, gacks, size_add(ga->bgack_cnt, ga->ugack_cnt));
+ ga->len = htons(len);
+ return len;
+}
+
+/* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing
+ * acked packets, also doing retransmissions if
+ * gaps found
+ * @l: tipc link with transmq queue to be advanced
+ * @r: tipc link "receiver" i.e. in case of broadcast (= "l" if unicast)
+ * @acked: seqno of last packet acked by peer without any gaps before
+ * @gap: # of gap packets
+ * @ga: buffer pointer to Gap ACK blocks from peer
+ * @xmitq: queue for accumulating the retransmitted packets if any
+ * @retransmitted: returned boolean value if a retransmission is really issued
+ * @rc: returned code e.g. TIPC_LINK_DOWN_EVT if a repeated retransmit failures
+ * happens (- unlikely case)
+ *
+ * Return: the number of packets released from the link transmq
+ */
+static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
+ u16 acked, u16 gap,
+ struct tipc_gap_ack_blks *ga,
+ struct sk_buff_head *xmitq,
+ bool *retransmitted, int *rc)
+{
+ struct tipc_gap_ack_blks *last_ga = r->last_ga, *this_ga = NULL;
+ struct tipc_gap_ack *gacks = NULL;
+ struct sk_buff *skb, *_skb, *tmp;
+ struct tipc_msg *hdr;
+ u32 qlen = skb_queue_len(&l->transmq);
+ u16 nacked = acked, ngap = gap, gack_cnt = 0;
+ u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
+ u16 ack = l->rcv_nxt - 1;
+ u16 seqno, n = 0;
+ u16 end = r->acked, start = end, offset = r->last_gap;
+ u16 si = (last_ga) ? last_ga->start_index : 0;
+ bool is_uc = !link_is_bc_sndlink(l);
+ bool bc_has_acked = false;
+
+ trace_tipc_link_retrans(r, acked + 1, acked + gap, &l->transmq);
+
+ /* Determine Gap ACK blocks if any for the particular link */
+ if (ga && is_uc) {
+ /* Get the Gap ACKs, uc part */
+ gack_cnt = ga->ugack_cnt;
+ gacks = &ga->gacks[ga->bgack_cnt];
+ } else if (ga) {
+ /* Copy the Gap ACKs, bc part, for later renewal if needed */
+ this_ga = kmemdup(ga, struct_size(ga, gacks, ga->bgack_cnt),
+ GFP_ATOMIC);
+ if (likely(this_ga)) {
+ this_ga->start_index = 0;
+ /* Start with the bc Gap ACKs */
+ gack_cnt = this_ga->bgack_cnt;
+ gacks = &this_ga->gacks[0];
+ } else {
+ /* Hmm, we can get in trouble..., simply ignore it */
+ pr_warn_ratelimited("Ignoring bc Gap ACKs, no memory\n");
+ }
+ }
+
+ /* Advance the link transmq */
+ skb_queue_walk_safe(&l->transmq, skb, tmp) {
+ seqno = buf_seqno(skb);
+
+next_gap_ack:
+ if (less_eq(seqno, nacked)) {
+ if (is_uc)
+ goto release;
+ /* Skip packets peer has already acked */
+ if (!more(seqno, r->acked))
+ continue;
+ /* Get the next of last Gap ACK blocks */
+ while (more(seqno, end)) {
+ if (!last_ga || si >= last_ga->bgack_cnt)
+ break;
+ start = end + offset + 1;
+ end = ntohs(last_ga->gacks[si].ack);
+ offset = ntohs(last_ga->gacks[si].gap);
+ si++;
+ WARN_ONCE(more(start, end) ||
+ (!offset &&
+ si < last_ga->bgack_cnt) ||
+ si > MAX_GAP_ACK_BLKS,
+ "Corrupted Gap ACK: %d %d %d %d %d\n",
+ start, end, offset, si,
+ last_ga->bgack_cnt);
+ }
+ /* Check against the last Gap ACK block */
+ if (tipc_in_range(seqno, start, end))
+ continue;
+ /* Update/release the packet peer is acking */
+ bc_has_acked = true;
+ if (--TIPC_SKB_CB(skb)->ackers)
+ continue;
+release:
+ /* release skb */
+ __skb_unlink(skb, &l->transmq);
+ kfree_skb(skb);
+ } else if (less_eq(seqno, nacked + ngap)) {
+ /* First gap: check if repeated retrans failures? */
+ if (unlikely(seqno == acked + 1 &&
+ link_retransmit_failure(l, r, rc))) {
+ /* Ignore this bc Gap ACKs if any */
+ kfree(this_ga);
+ this_ga = NULL;
+ break;
+ }
+ /* retransmit skb if unrestricted*/
+ if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
+ continue;
+ tipc_link_set_skb_retransmit_time(skb, l);
+ _skb = pskb_copy(skb, GFP_ATOMIC);
+ if (!_skb)
+ continue;
+ hdr = buf_msg(_skb);
+ msg_set_ack(hdr, ack);
+ msg_set_bcast_ack(hdr, bc_ack);
+ _skb->priority = TC_PRIO_CONTROL;
+ __skb_queue_tail(xmitq, _skb);
+ l->stats.retransmitted++;
+ if (!is_uc)
+ r->stats.retransmitted++;
+ *retransmitted = true;
+ /* Increase actual retrans counter & mark first time */
+ if (!TIPC_SKB_CB(skb)->retr_cnt++)
+ TIPC_SKB_CB(skb)->retr_stamp = jiffies;
+ } else {
+ /* retry with Gap ACK blocks if any */
+ if (n >= gack_cnt)
+ break;
+ nacked = ntohs(gacks[n].ack);
+ ngap = ntohs(gacks[n].gap);
+ n++;
+ goto next_gap_ack;
+ }
+ }
+
+ /* Renew last Gap ACK blocks for bc if needed */
+ if (bc_has_acked) {
+ if (this_ga) {
+ kfree(last_ga);
+ r->last_ga = this_ga;
+ r->last_gap = gap;
+ } else if (last_ga) {
+ if (less(acked, start)) {
+ si--;
+ offset = start - acked - 1;
+ } else if (less(acked, end)) {
+ acked = end;
+ }
+ if (si < last_ga->bgack_cnt) {
+ last_ga->start_index = si;
+ r->last_gap = offset;
+ } else {
+ kfree(last_ga);
+ r->last_ga = NULL;
+ r->last_gap = 0;
+ }
+ } else {
+ r->last_gap = 0;
+ }
+ r->acked = acked;
+ } else {
+ kfree(this_ga);
+ }
+
+ return qlen - skb_queue_len(&l->transmq);
}
/* tipc_link_build_state_msg: prepare link state message for transmission
@@ -1163,11 +1700,10 @@ int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
l->snd_nxt = l->rcv_nxt;
return TIPC_LINK_SND_STATE;
}
-
/* Unicast ACK */
l->rcv_unacked = 0;
l->stats.sent_acks++;
- tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
+ tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
return 0;
}
@@ -1181,7 +1717,7 @@ void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
if (l->state == LINK_ESTABLISHING)
mtyp = ACTIVATE_MSG;
- tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq);
+ tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
/* Inform peer that this endpoint is going down if applicable */
skb = skb_peek_tail(xmitq);
@@ -1197,6 +1733,8 @@ static int tipc_link_build_nack_msg(struct tipc_link *l,
struct sk_buff_head *xmitq)
{
u32 def_cnt = ++l->stats.deferred_recv;
+ struct sk_buff_head *dfq = &l->deferdq;
+ u32 defq_len = skb_queue_len(dfq);
int match1, match2;
if (link_is_bc_rcvlink(l)) {
@@ -1207,8 +1745,12 @@ static int tipc_link_build_nack_msg(struct tipc_link *l,
return 0;
}
- if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
- tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
+ if (defq_len >= 3 && !((defq_len - 3) % 16)) {
+ u16 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
+
+ tipc_link_build_proto_msg(l, STATE_MSG, 0, 0,
+ rcvgap, 0, 0, xmitq);
+ }
return 0;
}
@@ -1221,45 +1763,44 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *xmitq)
{
struct sk_buff_head *defq = &l->deferdq;
- struct tipc_msg *hdr;
+ struct tipc_msg *hdr = buf_msg(skb);
u16 seqno, rcv_nxt, win_lim;
+ int released = 0;
int rc = 0;
+ /* Verify and update link state */
+ if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
+ return tipc_link_proto_rcv(l, skb, xmitq);
+
+ /* Don't send probe at next timeout expiration */
+ l->silent_intv_cnt = 0;
+
do {
hdr = buf_msg(skb);
seqno = msg_seqno(hdr);
rcv_nxt = l->rcv_nxt;
win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
- /* Verify and update link state */
- if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
- return tipc_link_proto_rcv(l, skb, xmitq);
-
- if (unlikely(!link_is_up(l))) {
+ if (unlikely(!tipc_link_is_up(l))) {
if (l->state == LINK_ESTABLISHING)
rc = TIPC_LINK_UP_EVT;
- goto drop;
+ kfree_skb(skb);
+ break;
}
- /* Don't send probe at next timeout expiration */
- l->silent_intv_cnt = 0;
-
/* Drop if outside receive window */
if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
l->stats.duplicates++;
- goto drop;
- }
-
- /* Forward queues and wake up waiting users */
- if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
- tipc_link_advance_backlog(l, xmitq);
- if (unlikely(!skb_queue_empty(&l->wakeupq)))
- link_prepare_wakeup(l);
+ kfree_skb(skb);
+ break;
}
+ released += tipc_link_advance_transmq(l, l, msg_ack(hdr), 0,
+ NULL, NULL, NULL, NULL);
/* Defer delivery if sequence gap */
if (unlikely(seqno != rcv_nxt)) {
- __tipc_skb_queue_sorted(defq, seqno, skb);
+ if (!__tipc_skb_queue_sorted(defq, seqno, skb))
+ l->stats.duplicates++;
rc |= tipc_link_build_nack_msg(l, xmitq);
break;
}
@@ -1267,30 +1808,39 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
/* Deliver packet */
l->rcv_nxt++;
l->stats.recv_pkts++;
- if (!tipc_data_input(l, skb, l->inputq))
- rc |= tipc_link_input(l, skb, l->inputq);
+
+ if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL))
+ rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
+ else if (!tipc_data_input(l, skb, l->inputq))
+ rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
rc |= tipc_link_build_state_msg(l, xmitq);
if (unlikely(rc & ~TIPC_LINK_SND_STATE))
break;
- } while ((skb = __skb_dequeue(defq)));
+ } while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
- return rc;
-drop:
- kfree_skb(skb);
+ /* Forward queues and wake up waiting users */
+ if (released) {
+ tipc_link_update_cwin(l, released, 0);
+ tipc_link_advance_backlog(l, xmitq);
+ if (unlikely(!skb_queue_empty(&l->wakeupq)))
+ link_prepare_wakeup(l);
+ }
return rc;
}
static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
- u16 rcvgap, int tolerance, int priority,
+ bool probe_reply, u16 rcvgap,
+ int tolerance, int priority,
struct sk_buff_head *xmitq)
{
+ struct tipc_mon_state *mstate = &l->mon_state;
+ struct sk_buff_head *dfq = &l->deferdq;
struct tipc_link *bcl = l->bc_rcvlink;
- struct sk_buff *skb;
struct tipc_msg *hdr;
- struct sk_buff_head *dfq = &l->deferdq;
- bool node_up = link_is_up(bcl);
- struct tipc_mon_state *mstate = &l->mon_state;
+ struct sk_buff *skb;
+ bool node_up = tipc_link_is_up(bcl);
+ u16 glen = 0, bc_rcvgap = 0;
int dlen = 0;
void *data;
@@ -1301,12 +1851,12 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
return;
- if (!skb_queue_empty(dfq))
+ if ((probe || probe_reply) && !skb_queue_empty(dfq))
rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
- tipc_max_domain_size, l->addr,
- tipc_own_addr(l->net), 0, 0, 0);
+ tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ,
+ l->addr, tipc_own_addr(l->net), 0, 0, 0);
if (!skb)
return;
@@ -1327,16 +1877,26 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
if (mtyp == STATE_MSG) {
+ if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
+ msg_set_seqno(hdr, l->snd_nxt_state++);
msg_set_seq_gap(hdr, rcvgap);
- msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
+ bc_rcvgap = link_bc_rcv_gap(bcl);
+ msg_set_bc_gap(hdr, bc_rcvgap);
msg_set_probe(hdr, probe);
- tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id);
- msg_set_size(hdr, INT_H_SIZE + dlen);
- skb_trim(skb, INT_H_SIZE + dlen);
+ msg_set_is_keepalive(hdr, probe || probe_reply);
+ if (l->peer_caps & TIPC_GAP_ACK_BLOCK)
+ glen = tipc_build_gap_ack_blks(l, hdr);
+ tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id);
+ msg_set_size(hdr, INT_H_SIZE + glen + dlen);
+ skb_trim(skb, INT_H_SIZE + glen + dlen);
l->stats.sent_states++;
l->rcv_unacked = 0;
} else {
/* RESET_MSG or ACTIVATE_MSG */
+ if (mtyp == ACTIVATE_MSG) {
+ msg_set_dest_session_valid(hdr, 1);
+ msg_set_dest_session(hdr, l->peer_session);
+ }
msg_set_max_pkt(hdr, l->advertised_mtu);
strcpy(data, l->if_name);
msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
@@ -1346,8 +1906,41 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
l->stats.sent_probes++;
if (rcvgap)
l->stats.sent_nacks++;
+ if (bc_rcvgap)
+ bcl->stats.sent_nacks++;
skb->priority = TC_PRIO_CONTROL;
__skb_queue_tail(xmitq, skb);
+ trace_tipc_proto_build(skb, false, l->name);
+}
+
+void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
+ struct sk_buff_head *xmitq)
+{
+ u32 onode = tipc_own_addr(l->net);
+ struct tipc_msg *hdr, *ihdr;
+ struct sk_buff_head tnlq;
+ struct sk_buff *skb;
+ u32 dnode = l->addr;
+
+ __skb_queue_head_init(&tnlq);
+ skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
+ INT_H_SIZE, BASIC_H_SIZE,
+ dnode, onode, 0, 0, 0);
+ if (!skb) {
+ pr_warn("%sunable to create tunnel packet\n", link_co_err);
+ return;
+ }
+
+ hdr = buf_msg(skb);
+ msg_set_msgcnt(hdr, 1);
+ msg_set_bearer_id(hdr, l->peer_bearer_id);
+
+ ihdr = (struct tipc_msg *)msg_data(hdr);
+ tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
+ BASIC_H_SIZE, dnode);
+ msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
+ __skb_queue_tail(&tnlq, skb);
+ tipc_link_xmit(l, &tnlq, xmitq);
}
/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
@@ -1359,15 +1952,43 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
struct sk_buff *skb, *tnlskb;
struct tipc_msg *hdr, tnlhdr;
struct sk_buff_head *queue = &l->transmq;
- struct sk_buff_head tmpxq, tnlq;
+ struct sk_buff_head tmpxq, tnlq, frags;
u16 pktlen, pktcnt, seqno = l->snd_nxt;
+ bool pktcnt_need_update = false;
+ u16 syncpt;
+ int rc;
if (!tnl)
return;
- skb_queue_head_init(&tnlq);
- skb_queue_head_init(&tmpxq);
+ __skb_queue_head_init(&tnlq);
+ /* Link Synching:
+ * From now on, send only one single ("dummy") SYNCH message
+ * to peer. The SYNCH message does not contain any data, just
+ * a header conveying the synch point to the peer.
+ */
+ if (mtyp == SYNCH_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
+ tnlskb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG,
+ INT_H_SIZE, 0, l->addr,
+ tipc_own_addr(l->net),
+ 0, 0, 0);
+ if (!tnlskb) {
+ pr_warn("%sunable to create dummy SYNCH_MSG\n",
+ link_co_err);
+ return;
+ }
+ hdr = buf_msg(tnlskb);
+ syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1;
+ msg_set_syncpt(hdr, syncpt);
+ msg_set_bearer_id(hdr, l->peer_bearer_id);
+ __skb_queue_tail(&tnlq, tnlskb);
+ tipc_link_xmit(tnl, &tnlq, xmitq);
+ return;
+ }
+
+ __skb_queue_head_init(&tmpxq);
+ __skb_queue_head_init(&frags);
/* At least one packet required for safe algorithm => add dummy */
skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
@@ -1376,14 +1997,18 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
pr_warn("%sunable to create tunnel packet\n", link_co_err);
return;
}
- skb_queue_tail(&tnlq, skb);
+ __skb_queue_tail(&tnlq, skb);
tipc_link_xmit(l, &tnlq, &tmpxq);
__skb_queue_purge(&tmpxq);
/* Initialize reusable tunnel packet header */
tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
mtyp, INT_H_SIZE, l->addr);
- pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
+ if (mtyp == SYNCH_MSG)
+ pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
+ else
+ pktcnt = skb_queue_len(&l->transmq);
+ pktcnt += skb_queue_len(&l->backlogq);
msg_set_msgcnt(&tnlhdr, pktcnt);
msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
tnl:
@@ -1393,6 +2018,39 @@ tnl:
if (queue == &l->backlogq)
msg_set_seqno(hdr, seqno++);
pktlen = msg_size(hdr);
+
+ /* Tunnel link MTU is not large enough? This could be
+ * due to:
+ * 1) Link MTU has just changed or set differently;
+ * 2) Or FAILOVER on the top of a SYNCH message
+ *
+ * The 2nd case should not happen if peer supports
+ * TIPC_TUNNEL_ENHANCED
+ */
+ if (pktlen > tnl->mtu - INT_H_SIZE) {
+ if (mtyp == FAILOVER_MSG &&
+ (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
+ rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu,
+ &frags);
+ if (rc) {
+ pr_warn("%sunable to frag msg: rc %d\n",
+ link_co_err, rc);
+ return;
+ }
+ pktcnt += skb_queue_len(&frags) - 1;
+ pktcnt_need_update = true;
+ skb_queue_splice_tail_init(&frags, &tnlq);
+ continue;
+ }
+ /* Unluckily, peer doesn't have TIPC_TUNNEL_ENHANCED
+ * => Just warn it and return!
+ */
+ pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n",
+ link_co_err, msg_user(hdr),
+ msg_type(hdr), msg_size(hdr));
+ return;
+ }
+
msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
if (!tnlskb) {
@@ -1408,12 +2066,103 @@ tnl:
goto tnl;
}
+ if (pktcnt_need_update)
+ skb_queue_walk(&tnlq, skb) {
+ hdr = buf_msg(skb);
+ msg_set_msgcnt(hdr, pktcnt);
+ }
+
tipc_link_xmit(tnl, &tnlq, xmitq);
if (mtyp == FAILOVER_MSG) {
+ struct sk_buff_head *fdefq = &tnl->failover_deferdq;
+
tnl->drop_point = l->rcv_nxt;
tnl->failover_reasm_skb = l->reasm_buf;
l->reasm_buf = NULL;
+
+ /* Failover the link's deferdq */
+ if (unlikely(!skb_queue_empty(fdefq))) {
+ pr_warn("Link failover deferdq not empty: %d!\n",
+ skb_queue_len(fdefq));
+ __skb_queue_purge(fdefq);
+ }
+ skb_queue_splice_init(&l->deferdq, fdefq);
+ }
+}
+
+/**
+ * tipc_link_failover_prepare() - prepare tnl for link failover
+ *
+ * This is a special version of the precursor - tipc_link_tnl_prepare(),
+ * see the tipc_node_link_failover() for details
+ *
+ * @l: failover link
+ * @tnl: tunnel link
+ * @xmitq: queue for messages to be xmited
+ */
+void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
+ struct sk_buff_head *xmitq)
+{
+ struct sk_buff_head *fdefq = &tnl->failover_deferdq;
+
+ tipc_link_create_dummy_tnl_msg(tnl, xmitq);
+
+ /* This failover link endpoint was never established before,
+ * so it has not received anything from peer.
+ * Otherwise, it must be a normal failover situation or the
+ * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
+ * would have to start over from scratch instead.
+ */
+ tnl->drop_point = 1;
+ tnl->failover_reasm_skb = NULL;
+
+ /* Initiate the link's failover deferdq */
+ if (unlikely(!skb_queue_empty(fdefq))) {
+ pr_warn("Link failover deferdq not empty: %d!\n",
+ skb_queue_len(fdefq));
+ __skb_queue_purge(fdefq);
+ }
+}
+
+/* tipc_link_validate_msg(): validate message against current link state
+ * Returns true if message should be accepted, otherwise false
+ */
+bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
+{
+ u16 curr_session = l->peer_session;
+ u16 session = msg_session(hdr);
+ int mtyp = msg_type(hdr);
+
+ if (msg_user(hdr) != LINK_PROTOCOL)
+ return true;
+
+ switch (mtyp) {
+ case RESET_MSG:
+ if (!l->in_session)
+ return true;
+ /* Accept only RESET with new session number */
+ return more(session, curr_session);
+ case ACTIVATE_MSG:
+ if (!l->in_session)
+ return true;
+ /* Accept only ACTIVATE with new or current session number */
+ return !less(session, curr_session);
+ case STATE_MSG:
+ /* Accept only STATE with current session number */
+ if (!l->in_session)
+ return false;
+ if (session != curr_session)
+ return false;
+ /* Extra sanity check */
+ if (!tipc_link_is_up(l) && msg_ack(hdr))
+ return false;
+ if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
+ return true;
+ /* Accept only STATE with new sequence number */
+ return !less(msg_seqno(hdr), l->rcv_nxt_state);
+ default:
+ return false;
}
}
@@ -1426,18 +2175,25 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *xmitq)
{
struct tipc_msg *hdr = buf_msg(skb);
- u16 rcvgap = 0;
- u16 ack = msg_ack(hdr);
- u16 gap = msg_seq_gap(hdr);
+ struct tipc_gap_ack_blks *ga = NULL;
+ bool reply = msg_probe(hdr), retransmitted = false;
+ u32 dlen = msg_data_sz(hdr), glen = 0, msg_max;
u16 peers_snd_nxt = msg_next_sent(hdr);
u16 peers_tol = msg_link_tolerance(hdr);
u16 peers_prio = msg_linkprio(hdr);
+ u16 gap = msg_seq_gap(hdr);
+ u16 ack = msg_ack(hdr);
u16 rcv_nxt = l->rcv_nxt;
- u16 dlen = msg_data_sz(hdr);
+ u16 rcvgap = 0;
int mtyp = msg_type(hdr);
- void *data;
+ int rc = 0, released;
char *if_name;
- int rc = 0;
+ void *data;
+
+ trace_tipc_proto_rcv(skb, false, l->name);
+
+ if (dlen > U16_MAX)
+ goto exit;
if (tipc_link_is_blocked(l) || !xmitq)
goto exit;
@@ -1445,62 +2201,88 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
if (tipc_own_addr(l->net) > msg_prevnode(hdr))
l->net_plane = msg_net_plane(hdr);
- skb_linearize(skb);
+ if (skb_linearize(skb))
+ goto exit;
+
hdr = buf_msg(skb);
data = msg_data(hdr);
+ if (!tipc_link_validate_msg(l, hdr)) {
+ trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
+ trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
+ goto exit;
+ }
+
switch (mtyp) {
case RESET_MSG:
-
- /* Ignore duplicate RESET with old session number */
- if ((less_eq(msg_session(hdr), l->peer_session)) &&
- (l->peer_session != ANY_SESSION))
- break;
- /* fall thru' */
-
case ACTIVATE_MSG:
-
+ msg_max = msg_max_pkt(hdr);
+ if (msg_max < tipc_bearer_min_mtu(l->net, l->bearer_id))
+ break;
/* Complete own link name with peer's interface name */
if_name = strrchr(l->name, ':') + 1;
if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
break;
if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
break;
- strncpy(if_name, data, TIPC_MAX_IF_NAME);
+ strscpy(if_name, data, TIPC_MAX_IF_NAME);
/* Update own tolerance if peer indicates a non-zero value */
- if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+ if (tipc_in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
l->tolerance = peers_tol;
-
+ l->bc_rcvlink->tolerance = peers_tol;
+ }
/* Update own priority if peer's priority is higher */
- if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
+ if (tipc_in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
l->priority = peers_prio;
- /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
- if (msg_peer_stopping(hdr))
+ /* If peer is going down we want full re-establish cycle */
+ if (msg_peer_stopping(hdr)) {
rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
- else if ((mtyp == RESET_MSG) || !link_is_up(l))
+ break;
+ }
+
+ /* If this endpoint was re-created while peer was ESTABLISHING
+ * it doesn't know current session number. Force re-synch.
+ */
+ if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
+ l->session != msg_dest_session(hdr)) {
+ if (less(l->session, msg_dest_session(hdr)))
+ l->session = msg_dest_session(hdr) + 1;
+ break;
+ }
+
+ /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
+ if (mtyp == RESET_MSG || !tipc_link_is_up(l))
rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
/* ACTIVATE_MSG takes up link if it was already locally reset */
- if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
+ if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
rc = TIPC_LINK_UP_EVT;
l->peer_session = msg_session(hdr);
+ l->in_session = true;
l->peer_bearer_id = msg_bearer_id(hdr);
- if (l->mtu > msg_max_pkt(hdr))
- l->mtu = msg_max_pkt(hdr);
+ if (l->mtu > msg_max)
+ l->mtu = msg_max;
break;
case STATE_MSG:
+ /* Validate Gap ACK blocks, drop if invalid */
+ glen = tipc_get_gap_ack_blks(&ga, l, hdr, true);
+ if (glen > dlen)
+ break;
+
+ l->rcv_nxt_state = msg_seqno(hdr) + 1;
/* Update own tolerance if peer indicates a non-zero value */
- if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+ if (tipc_in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
l->tolerance = peers_tol;
-
+ l->bc_rcvlink->tolerance = peers_tol;
+ }
/* Update own prio if peer indicates a different value */
if ((peers_prio != l->priority) &&
- in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
+ tipc_in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
l->priority = peers_prio;
rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
}
@@ -1510,29 +2292,33 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
if (msg_probe(hdr))
l->stats.recv_probes++;
- if (!link_is_up(l)) {
+ if (!tipc_link_is_up(l)) {
if (l->state == LINK_ESTABLISHING)
rc = TIPC_LINK_UP_EVT;
break;
}
- tipc_mon_rcv(l->net, data, dlen, l->addr,
+
+ tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
&l->mon_state, l->bearer_id);
/* Send NACK if peer has sent pkts we haven't received yet */
- if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
+ if ((reply || msg_is_keepalive(hdr)) &&
+ more(peers_snd_nxt, rcv_nxt) &&
+ !tipc_link_is_synching(l) &&
+ skb_queue_empty(&l->deferdq))
rcvgap = peers_snd_nxt - l->rcv_nxt;
- if (rcvgap || (msg_probe(hdr)))
- tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
- 0, 0, xmitq);
- tipc_link_release_pkts(l, ack);
-
- /* If NACK, retransmit will now start at right position */
- if (gap) {
- rc = tipc_link_retrans(l, ack + 1, ack + gap, xmitq);
- l->stats.recv_nacks++;
- }
+ if (rcvgap || reply)
+ tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
+ rcvgap, 0, 0, xmitq);
- tipc_link_advance_backlog(l, xmitq);
+ released = tipc_link_advance_transmq(l, l, ack, gap, ga, xmitq,
+ &retransmitted, &rc);
+ if (gap)
+ l->stats.recv_nacks++;
+ if (released || retransmitted)
+ tipc_link_update_cwin(l, released, retransmitted);
+ if (released)
+ tipc_link_advance_backlog(l, xmitq);
if (unlikely(!skb_queue_empty(&l->wakeupq)))
link_prepare_wakeup(l);
}
@@ -1593,7 +2379,7 @@ void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
int mtyp = msg_type(hdr);
u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
- if (link_is_up(l))
+ if (tipc_link_is_up(l))
return;
if (msg_user(hdr) == BCAST_PROTOCOL) {
@@ -1613,75 +2399,31 @@ void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
l->rcv_nxt = peers_snd_nxt;
}
-/* link_bc_retr eval()- check if the indicated range can be retransmitted now
- * - Adjust permitted range if there is overlap with previous retransmission
- */
-static bool link_bc_retr_eval(struct tipc_link *l, u16 *from, u16 *to)
-{
- unsigned long elapsed = jiffies_to_msecs(jiffies - l->prev_retr);
-
- if (less(*to, *from))
- return false;
-
- /* New retransmission request */
- if ((elapsed > TIPC_BC_RETR_LIMIT) ||
- less(*to, l->prev_from) || more(*from, l->prev_to)) {
- l->prev_from = *from;
- l->prev_to = *to;
- l->prev_retr = jiffies;
- return true;
- }
-
- /* Inside range of previous retransmit */
- if (!less(*from, l->prev_from) && !more(*to, l->prev_to))
- return false;
-
- /* Fully or partially outside previous range => exclude overlap */
- if (less(*from, l->prev_from)) {
- *to = l->prev_from - 1;
- l->prev_from = *from;
- }
- if (more(*to, l->prev_to)) {
- *from = l->prev_to + 1;
- l->prev_to = *to;
- }
- l->prev_retr = jiffies;
- return true;
-}
-
/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
*/
int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
struct sk_buff_head *xmitq)
{
- struct tipc_link *snd_l = l->bc_sndlink;
u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
- u16 from = msg_bcast_ack(hdr) + 1;
- u16 to = from + msg_bc_gap(hdr) - 1;
int rc = 0;
- if (!link_is_up(l))
+ if (!tipc_link_is_up(l))
return rc;
if (!msg_peer_node_is_up(hdr))
return rc;
- /* Open when peer ackowledges our bcast init msg (pkt #1) */
+ /* Open when peer acknowledges our bcast init msg (pkt #1) */
if (msg_ack(hdr))
l->bc_peer_is_up = true;
if (!l->bc_peer_is_up)
return rc;
- l->stats.recv_nacks++;
-
/* Ignore if peers_snd_nxt goes beyond receive window */
if (more(peers_snd_nxt, l->rcv_nxt + l->window))
return rc;
- if (link_bc_retr_eval(snd_l, &from, &to))
- rc = tipc_link_retrans(snd_l, from, to, xmitq);
-
l->snd_nxt = peers_snd_nxt;
if (link_bc_rcv_gap(l))
rc |= TIPC_LINK_SND_STATE;
@@ -1716,37 +2458,34 @@ int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
return 0;
}
-void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
- struct sk_buff_head *xmitq)
+int tipc_link_bc_ack_rcv(struct tipc_link *r, u16 acked, u16 gap,
+ struct tipc_gap_ack_blks *ga,
+ struct sk_buff_head *xmitq,
+ struct sk_buff_head *retrq)
{
- struct sk_buff *skb, *tmp;
- struct tipc_link *snd_l = l->bc_sndlink;
-
- if (!link_is_up(l) || !l->bc_peer_is_up)
- return;
+ struct tipc_link *l = r->bc_sndlink;
+ bool unused = false;
+ int rc = 0;
- if (!more(acked, l->acked))
- return;
+ if (!tipc_link_is_up(r) || !r->bc_peer_is_up)
+ return 0;
- /* Skip over packets peer has already acked */
- skb_queue_walk(&snd_l->transmq, skb) {
- if (more(buf_seqno(skb), l->acked))
- break;
+ if (gap) {
+ l->stats.recv_nacks++;
+ r->stats.recv_nacks++;
}
- /* Update/release the packets peer is acking now */
- skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
- if (more(buf_seqno(skb), acked))
- break;
- if (!--TIPC_SKB_CB(skb)->ackers) {
- __skb_unlink(skb, &snd_l->transmq);
- kfree_skb(skb);
- }
- }
- l->acked = acked;
- tipc_link_advance_backlog(snd_l, xmitq);
- if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
- link_prepare_wakeup(snd_l);
+ if (less(acked, r->acked) || (acked == r->acked && !gap && !ga))
+ return 0;
+
+ trace_tipc_link_bc_ack(r, acked, gap, &l->transmq);
+ tipc_link_advance_transmq(l, r, acked, gap, ga, retrq, &unused, &rc);
+
+ tipc_link_advance_backlog(l, xmitq);
+ if (unlikely(!skb_queue_empty(&l->wakeupq)))
+ link_prepare_wakeup(l);
+
+ return rc;
}
/* tipc_link_bc_nack_rcv(): receive broadcast nack message
@@ -1774,8 +2513,8 @@ int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
return 0;
if (dnode == tipc_own_addr(l->net)) {
- tipc_link_bc_ack_rcv(l, acked, xmitq);
- rc = tipc_link_retrans(l->bc_sndlink, from, to, xmitq);
+ rc = tipc_link_bc_ack_rcv(l, acked, to - acked, NULL, xmitq,
+ xmitq);
l->stats.recv_nacks++;
return rc;
}
@@ -1787,20 +2526,23 @@ int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
return 0;
}
-void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
+void tipc_link_set_queue_limits(struct tipc_link *l, u32 min_win, u32 max_win)
{
- int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
+ int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
- l->window = win;
- l->backlog[TIPC_LOW_IMPORTANCE].limit = max_t(u16, 50, win);
- l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = max_t(u16, 100, win * 2);
- l->backlog[TIPC_HIGH_IMPORTANCE].limit = max_t(u16, 150, win * 3);
- l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
+ l->min_win = min_win;
+ l->ssthresh = max_win;
+ l->max_win = max_win;
+ l->window = min_win;
+ l->backlog[TIPC_LOW_IMPORTANCE].limit = min_win * 2;
+ l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = min_win * 4;
+ l->backlog[TIPC_HIGH_IMPORTANCE].limit = min_win * 6;
+ l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = min_win * 8;
l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
}
/**
- * link_reset_stats - reset link statistics
+ * tipc_link_reset_stats - reset link statistics
* @l: pointer to link
*/
void tipc_link_reset_stats(struct tipc_link *l)
@@ -1826,8 +2568,8 @@ int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
{
int err;
- err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
- tipc_nl_prop_policy, NULL);
+ err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop,
+ tipc_nl_prop_policy, NULL);
if (err)
return err;
@@ -1848,10 +2590,10 @@ int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
}
if (props[TIPC_NLA_PROP_WIN]) {
- u32 win;
+ u32 max_win;
- win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
- if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
+ max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ if (max_win < TIPC_DEF_LINK_WIN || max_win > TIPC_MAX_LINK_WIN)
return -EINVAL;
}
@@ -1906,7 +2648,7 @@ static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
(s->accu_queue_sz / s->queue_sz_counts) : 0}
};
- stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
+ stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
if (!stats)
return -EMSGSIZE;
@@ -1927,25 +2669,24 @@ msg_full:
int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
struct tipc_link *link, int nlflags)
{
- int err;
- void *hdr;
+ u32 self = tipc_own_addr(net);
struct nlattr *attrs;
struct nlattr *prop;
- struct tipc_net *tn = net_generic(net, tipc_net_id);
+ void *hdr;
+ int err;
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
nlflags, TIPC_NL_LINK_GET);
if (!hdr)
return -EMSGSIZE;
- attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
if (!attrs)
goto msg_full;
if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
goto attr_msg_full;
- if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
- tipc_cluster_mask(tn->own_addr)))
+ if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
goto attr_msg_full;
@@ -1961,7 +2702,7 @@ int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
goto attr_msg_full;
- prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
+ prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
if (!prop)
goto attr_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
@@ -2028,7 +2769,7 @@ static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
(stats->accu_queue_sz / stats->queue_sz_counts) : 0}
};
- nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
+ nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
if (!nest)
return -EMSGSIZE;
@@ -2045,14 +2786,15 @@ msg_full:
return -EMSGSIZE;
}
-int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
+int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg,
+ struct tipc_link *bcl)
{
int err;
void *hdr;
struct nlattr *attrs;
struct nlattr *prop;
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_link *bcl = tn->bcl;
+ u32 bc_mode = tipc_bcast_get_mode(net);
+ u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net);
if (!bcl)
return 0;
@@ -2066,7 +2808,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
return -EMSGSIZE;
}
- attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
if (!attrs)
goto msg_full;
@@ -2083,11 +2825,17 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
goto attr_msg_full;
- prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
+ prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
if (!prop)
goto attr_msg_full;
- if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->max_win))
+ goto prop_msg_full;
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode))
goto prop_msg_full;
+ if (bc_mode & BCLINK_MODE_SEL)
+ if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO,
+ bc_ratio))
+ goto prop_msg_full;
nla_nest_end(msg->skb, prop);
err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
@@ -2115,17 +2863,124 @@ void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
struct sk_buff_head *xmitq)
{
l->tolerance = tol;
- tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, tol, 0, xmitq);
+ if (l->bc_rcvlink)
+ l->bc_rcvlink->tolerance = tol;
+ if (tipc_link_is_up(l))
+ tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
}
void tipc_link_set_prio(struct tipc_link *l, u32 prio,
struct sk_buff_head *xmitq)
{
l->priority = prio;
- tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, prio, xmitq);
+ tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
}
void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
{
l->abort_limit = limit;
}
+
+/**
+ * tipc_link_dump - dump TIPC link data
+ * @l: tipc link to be dumped
+ * @dqueues: bitmask to decide if any link queue to be dumped?
+ * - TIPC_DUMP_NONE: don't dump link queues
+ * - TIPC_DUMP_TRANSMQ: dump link transmq queue
+ * - TIPC_DUMP_BACKLOGQ: dump link backlog queue
+ * - TIPC_DUMP_DEFERDQ: dump link deferd queue
+ * - TIPC_DUMP_INPUTQ: dump link input queue
+ * - TIPC_DUMP_WAKEUP: dump link wakeup queue
+ * - TIPC_DUMP_ALL: dump all the link queues above
+ * @buf: returned buffer of dump data in format
+ */
+int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
+{
+ int i = 0;
+ size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
+ struct sk_buff_head *list;
+ struct sk_buff *hskb, *tskb;
+ u32 len;
+
+ if (!l) {
+ i += scnprintf(buf, sz, "link data: (null)\n");
+ return i;
+ }
+
+ i += scnprintf(buf, sz, "link data: %x", l->addr);
+ i += scnprintf(buf + i, sz - i, " %x", l->state);
+ i += scnprintf(buf + i, sz - i, " %u", l->in_session);
+ i += scnprintf(buf + i, sz - i, " %u", l->session);
+ i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
+ i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
+ i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
+ i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
+ i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
+ i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
+ i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
+ i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
+ i += scnprintf(buf + i, sz - i, " %u", 0);
+ i += scnprintf(buf + i, sz - i, " %u", 0);
+ i += scnprintf(buf + i, sz - i, " %u", l->acked);
+
+ list = &l->transmq;
+ len = skb_queue_len(list);
+ hskb = skb_peek(list);
+ tskb = skb_peek_tail(list);
+ i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
+ (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
+ (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
+
+ list = &l->deferdq;
+ len = skb_queue_len(list);
+ hskb = skb_peek(list);
+ tskb = skb_peek_tail(list);
+ i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
+ (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
+ (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
+
+ list = &l->backlogq;
+ len = skb_queue_len(list);
+ hskb = skb_peek(list);
+ tskb = skb_peek_tail(list);
+ i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
+ (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
+ (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
+
+ list = l->inputq;
+ len = skb_queue_len(list);
+ hskb = skb_peek(list);
+ tskb = skb_peek_tail(list);
+ i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
+ (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
+ (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
+
+ if (dqueues & TIPC_DUMP_TRANSMQ) {
+ i += scnprintf(buf + i, sz - i, "transmq: ");
+ i += tipc_list_dump(&l->transmq, false, buf + i);
+ }
+ if (dqueues & TIPC_DUMP_BACKLOGQ) {
+ i += scnprintf(buf + i, sz - i,
+ "backlogq: <%u %u %u %u %u>, ",
+ l->backlog[TIPC_LOW_IMPORTANCE].len,
+ l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
+ l->backlog[TIPC_HIGH_IMPORTANCE].len,
+ l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
+ l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
+ i += tipc_list_dump(&l->backlogq, false, buf + i);
+ }
+ if (dqueues & TIPC_DUMP_DEFERDQ) {
+ i += scnprintf(buf + i, sz - i, "deferdq: ");
+ i += tipc_list_dump(&l->deferdq, false, buf + i);
+ }
+ if (dqueues & TIPC_DUMP_INPUTQ) {
+ i += scnprintf(buf + i, sz - i, "inputq: ");
+ i += tipc_list_dump(l->inputq, false, buf + i);
+ }
+ if (dqueues & TIPC_DUMP_WAKEUP) {
+ i += scnprintf(buf + i, sz - i, "wakeup: ");
+ i += tipc_list_dump(&l->wakeupq, false, buf + i);
+ }
+
+ return i;
+}
diff --git a/net/tipc/link.h b/net/tipc/link.h
index d1bd1787a768..d80f5649b395 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -73,21 +73,25 @@ enum {
bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
int tolerance, char net_plane, u32 mtu, int priority,
- int window, u32 session, u32 ownnode, u32 peer,
- u16 peer_caps,
+ u32 min_win, u32 max_win, u32 session, u32 ownnode,
+ u32 peer, u8 *peer_id, u16 peer_caps,
struct tipc_link *bc_sndlink,
struct tipc_link *bc_rcvlink,
struct sk_buff_head *inputq,
struct sk_buff_head *namedq,
struct tipc_link **link);
-bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
- int mtu, int window, u16 peer_caps,
+bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, u8 *peer_id,
+ int mtu, u32 min_win, u32 max_win, u16 peer_caps,
struct sk_buff_head *inputq,
struct sk_buff_head *namedq,
struct tipc_link *bc_sndlink,
struct tipc_link **link);
void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
int mtyp, struct sk_buff_head *xmitq);
+void tipc_link_create_dummy_tnl_msg(struct tipc_link *tnl,
+ struct sk_buff_head *xmitq);
+void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
+ struct sk_buff_head *xmitq);
void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
int tipc_link_fsm_evt(struct tipc_link *l, int evt);
bool tipc_link_is_up(struct tipc_link *l);
@@ -107,16 +111,20 @@ u16 tipc_link_rcv_nxt(struct tipc_link *l);
u16 tipc_link_acked(struct tipc_link *l);
u32 tipc_link_id(struct tipc_link *l);
char *tipc_link_name(struct tipc_link *l);
+u32 tipc_link_state(struct tipc_link *l);
char tipc_link_plane(struct tipc_link *l);
int tipc_link_prio(struct tipc_link *l);
-int tipc_link_window(struct tipc_link *l);
+int tipc_link_min_win(struct tipc_link *l);
+int tipc_link_max_win(struct tipc_link *l);
+void tipc_link_update_caps(struct tipc_link *l, u16 capabilities);
+bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr);
unsigned long tipc_link_tolerance(struct tipc_link *l);
void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
struct sk_buff_head *xmitq);
void tipc_link_set_prio(struct tipc_link *l, u32 prio,
struct sk_buff_head *xmitq);
void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit);
-void tipc_link_set_queue_limits(struct tipc_link *l, u32 window);
+void tipc_link_set_queue_limits(struct tipc_link *l, u32 min_win, u32 max_win);
int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
struct tipc_link *link, int nlflags);
int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]);
@@ -133,13 +141,18 @@ void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
int tipc_link_bc_peers(struct tipc_link *l);
void tipc_link_set_mtu(struct tipc_link *l, int mtu);
int tipc_link_mtu(struct tipc_link *l);
-void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
- struct sk_buff_head *xmitq);
-void tipc_link_build_bc_sync_msg(struct tipc_link *l,
- struct sk_buff_head *xmitq);
+int tipc_link_mss(struct tipc_link *l);
+u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
+ struct tipc_msg *hdr, bool uc);
+int tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked, u16 gap,
+ struct tipc_gap_ack_blks *ga,
+ struct sk_buff_head *xmitq,
+ struct sk_buff_head *retrq);
void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr);
int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
struct sk_buff_head *xmitq);
int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *xmitq);
+bool tipc_link_too_silent(struct tipc_link *l);
+struct net *tipc_link_net(struct tipc_link *l);
#endif
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index 9e109bb1a207..572b79bf76ce 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -104,11 +104,41 @@ static struct tipc_monitor *tipc_monitor(struct net *net, int bearer_id)
const int tipc_max_domain_size = sizeof(struct tipc_mon_domain);
+static inline u16 mon_cpu_to_le16(u16 val)
+{
+ return (__force __u16)htons(val);
+}
+
+static inline u32 mon_cpu_to_le32(u32 val)
+{
+ return (__force __u32)htonl(val);
+}
+
+static inline u64 mon_cpu_to_le64(u64 val)
+{
+ return (__force __u64)cpu_to_be64(val);
+}
+
+static inline u16 mon_le16_to_cpu(u16 val)
+{
+ return ntohs((__force __be16)val);
+}
+
+static inline u32 mon_le32_to_cpu(u32 val)
+{
+ return ntohl((__force __be32)val);
+}
+
+static inline u64 mon_le64_to_cpu(u64 val)
+{
+ return be64_to_cpu((__force __be64)val);
+}
+
/* dom_rec_len(): actual length of domain record for transport
*/
static int dom_rec_len(struct tipc_mon_domain *dom, u16 mcnt)
{
- return ((void *)&dom->members - (void *)dom) + (mcnt * sizeof(u32));
+ return (offsetof(struct tipc_mon_domain, members)) + (mcnt * sizeof(u32));
}
/* dom_size() : calculate size of own domain based on number of peers
@@ -119,7 +149,7 @@ static int dom_size(int peers)
while ((i * i) < peers)
i++;
- return i < MAX_MON_DOMAIN ? i : MAX_MON_DOMAIN;
+ return min(i, MAX_MON_DOMAIN);
}
static void map_set(u64 *up_map, int i, unsigned int v)
@@ -130,7 +160,7 @@ static void map_set(u64 *up_map, int i, unsigned int v)
static int map_get(u64 up_map, int i)
{
- return (up_map & (1 << i)) >> i;
+ return (up_map & (1ULL << i)) >> i;
}
static struct tipc_peer *peer_prev(struct tipc_peer *peer)
@@ -260,16 +290,16 @@ static void mon_update_local_domain(struct tipc_monitor *mon)
diff |= dom->members[i] != peer->addr;
dom->members[i] = peer->addr;
map_set(&dom->up_map, i, peer->is_up);
- cache->members[i] = htonl(peer->addr);
+ cache->members[i] = mon_cpu_to_le32(peer->addr);
}
diff |= dom->up_map != prev_up_map;
if (!diff)
return;
dom->gen = ++mon->dom_gen;
- cache->len = htons(dom->len);
- cache->gen = htons(dom->gen);
- cache->member_cnt = htons(member_cnt);
- cache->up_map = cpu_to_be64(dom->up_map);
+ cache->len = mon_cpu_to_le16(dom->len);
+ cache->gen = mon_cpu_to_le16(dom->gen);
+ cache->member_cnt = mon_cpu_to_le16(member_cnt);
+ cache->up_map = mon_cpu_to_le64(dom->up_map);
mon_apply_domain(mon, self);
}
@@ -322,9 +352,13 @@ static void mon_assign_roles(struct tipc_monitor *mon, struct tipc_peer *head)
void tipc_mon_remove_peer(struct net *net, u32 addr, int bearer_id)
{
struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
- struct tipc_peer *self = get_self(net, bearer_id);
+ struct tipc_peer *self;
struct tipc_peer *peer, *prev, *head;
+ if (!mon)
+ return;
+
+ self = get_self(net, bearer_id);
write_lock_bh(&mon->lock);
peer = get_peer(mon, addr);
if (!peer)
@@ -407,11 +441,15 @@ exit:
void tipc_mon_peer_down(struct net *net, u32 addr, int bearer_id)
{
struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
- struct tipc_peer *self = get_self(net, bearer_id);
+ struct tipc_peer *self;
struct tipc_peer *peer, *head;
struct tipc_mon_domain *dom;
int applied;
+ if (!mon)
+ return;
+
+ self = get_self(net, bearer_id);
write_lock_bh(&mon->lock);
peer = get_peer(mon, addr);
if (!peer) {
@@ -447,21 +485,24 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr,
struct tipc_mon_domain dom_bef;
struct tipc_mon_domain *dom;
struct tipc_peer *peer;
- u16 new_member_cnt = ntohs(arrv_dom->member_cnt);
+ u16 new_member_cnt = mon_le16_to_cpu(arrv_dom->member_cnt);
int new_dlen = dom_rec_len(arrv_dom, new_member_cnt);
- u16 new_gen = ntohs(arrv_dom->gen);
- u16 acked_gen = ntohs(arrv_dom->ack_gen);
+ u16 new_gen = mon_le16_to_cpu(arrv_dom->gen);
+ u16 acked_gen = mon_le16_to_cpu(arrv_dom->ack_gen);
+ u16 arrv_dlen = mon_le16_to_cpu(arrv_dom->len);
bool probing = state->probing;
int i, applied_bef;
state->probing = false;
/* Sanity check received domain record */
+ if (new_member_cnt > MAX_MON_DOMAIN)
+ return;
if (dlen < dom_rec_len(arrv_dom, 0))
return;
if (dlen != dom_rec_len(arrv_dom, new_member_cnt))
return;
- if ((dlen < new_dlen) || ntohs(arrv_dom->len) != new_dlen)
+ if (dlen < new_dlen || arrv_dlen != new_dlen)
return;
/* Synch generation numbers with peer if link just came up */
@@ -509,9 +550,9 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr,
dom->len = new_dlen;
dom->gen = new_gen;
dom->member_cnt = new_member_cnt;
- dom->up_map = be64_to_cpu(arrv_dom->up_map);
+ dom->up_map = mon_le64_to_cpu(arrv_dom->up_map);
for (i = 0; i < new_member_cnt; i++)
- dom->members[i] = ntohl(arrv_dom->members[i]);
+ dom->members[i] = mon_le32_to_cpu(arrv_dom->members[i]);
/* Update peers affected by this domain record */
applied_bef = peer->applied;
@@ -530,26 +571,29 @@ void tipc_mon_prep(struct net *net, void *data, int *dlen,
u16 gen = mon->dom_gen;
u16 len;
- if (!tipc_mon_is_active(net, mon))
+ /* Send invalid record if not active */
+ if (!tipc_mon_is_active(net, mon)) {
+ dom->len = 0;
return;
+ }
/* Send only a dummy record with ack if peer has acked our last sent */
if (likely(state->acked_gen == gen)) {
len = dom_rec_len(dom, 0);
*dlen = len;
- dom->len = htons(len);
- dom->gen = htons(gen);
- dom->ack_gen = htons(state->peer_gen);
+ dom->len = mon_cpu_to_le16(len);
+ dom->gen = mon_cpu_to_le16(gen);
+ dom->ack_gen = mon_cpu_to_le16(state->peer_gen);
dom->member_cnt = 0;
return;
}
/* Send the full record */
read_lock_bh(&mon->lock);
- len = ntohs(mon->cache.len);
+ len = mon_le16_to_cpu(mon->cache.len);
*dlen = len;
memcpy(data, &mon->cache, len);
read_unlock_bh(&mon->lock);
- dom->ack_gen = htons(state->peer_gen);
+ dom->ack_gen = mon_cpu_to_le16(state->peer_gen);
}
void tipc_mon_get_state(struct net *net, u32 addr,
@@ -559,6 +603,12 @@ void tipc_mon_get_state(struct net *net, u32 addr,
struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
struct tipc_peer *peer;
+ if (!tipc_mon_is_active(net, mon)) {
+ state->probing = false;
+ state->monitoring = true;
+ return;
+ }
+
/* Used cached state if table has not changed */
if (!state->probing &&
(state->list_gen == mon->list_gen) &&
@@ -578,9 +628,9 @@ void tipc_mon_get_state(struct net *net, u32 addr,
read_unlock_bh(&mon->lock);
}
-static void mon_timeout(unsigned long m)
+static void mon_timeout(struct timer_list *t)
{
- struct tipc_monitor *mon = (void *)m;
+ struct tipc_monitor *mon = timer_container_of(mon, t, timer);
struct tipc_peer *self;
int best_member_cnt = dom_size(mon->peer_cnt) - 1;
@@ -623,7 +673,7 @@ int tipc_mon_create(struct net *net, int bearer_id)
self->is_up = true;
self->is_head = true;
INIT_LIST_HEAD(&self->list);
- setup_timer(&mon->timer, mon_timeout, (unsigned long)mon);
+ timer_setup(&mon->timer, mon_timeout, 0);
mon->timer_intv = msecs_to_jiffies(MON_TIMEOUT + (tn->random & 0xffff));
mod_timer(&mon->timer, jiffies + mon->timer_intv);
return 0;
@@ -633,9 +683,13 @@ void tipc_mon_delete(struct net *net, int bearer_id)
{
struct tipc_net *tn = tipc_net(net);
struct tipc_monitor *mon = tipc_monitor(net, bearer_id);
- struct tipc_peer *self = get_self(net, bearer_id);
+ struct tipc_peer *self;
struct tipc_peer *peer, *tmp;
+ if (!mon)
+ return;
+
+ self = get_self(net, bearer_id);
write_lock_bh(&mon->lock);
tn->monitors[bearer_id] = NULL;
list_for_each_entry_safe(peer, tmp, &self->list, list) {
@@ -646,12 +700,28 @@ void tipc_mon_delete(struct net *net, int bearer_id)
}
mon->self = NULL;
write_unlock_bh(&mon->lock);
- del_timer_sync(&mon->timer);
+ timer_shutdown_sync(&mon->timer);
kfree(self->domain);
kfree(self);
kfree(mon);
}
+void tipc_mon_reinit_self(struct net *net)
+{
+ struct tipc_monitor *mon;
+ int bearer_id;
+
+ for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+ mon = tipc_monitor(net, bearer_id);
+ if (!mon)
+ continue;
+ write_lock_bh(&mon->lock);
+ if (mon->self)
+ mon->self->addr = tipc_own_addr(net);
+ write_unlock_bh(&mon->lock);
+ }
+}
+
int tipc_nl_monitor_set_threshold(struct net *net, u32 cluster_size)
{
struct tipc_net *tn = tipc_net(net);
@@ -671,7 +741,8 @@ int tipc_nl_monitor_get_threshold(struct net *net)
return tn->mon_threshold;
}
-int __tipc_nl_add_monitor_peer(struct tipc_peer *peer, struct tipc_nl_msg *msg)
+static int __tipc_nl_add_monitor_peer(struct tipc_peer *peer,
+ struct tipc_nl_msg *msg)
{
struct tipc_mon_domain *dom = peer->domain;
struct nlattr *attrs;
@@ -682,7 +753,7 @@ int __tipc_nl_add_monitor_peer(struct tipc_peer *peer, struct tipc_nl_msg *msg)
if (!hdr)
return -EMSGSIZE;
- attrs = nla_nest_start(msg->skb, TIPC_NLA_MON_PEER);
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON_PEER);
if (!attrs)
goto msg_full;
@@ -764,14 +835,14 @@ int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg,
ret = tipc_bearer_get_name(net, bearer_name, bearer_id);
if (ret || !mon)
- return -EINVAL;
+ return 0;
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
NLM_F_MULTI, TIPC_NL_MON_GET);
if (!hdr)
return -EMSGSIZE;
- attrs = nla_nest_start(msg->skb, TIPC_NLA_MON);
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
if (!attrs)
goto msg_full;
diff --git a/net/tipc/monitor.h b/net/tipc/monitor.h
index 2a21b93e0d04..ed63d2e650b0 100644
--- a/net/tipc/monitor.h
+++ b/net/tipc/monitor.h
@@ -77,6 +77,7 @@ int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg,
u32 bearer_id);
int tipc_nl_add_monitor_peer(struct net *net, struct tipc_nl_msg *msg,
u32 bearer_id, u32 *prev_node);
+void tipc_mon_reinit_self(struct net *net);
extern const int tipc_max_domain_size;
#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index ab3087687a32..76284fc538eb 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -39,31 +39,37 @@
#include "msg.h"
#include "addr.h"
#include "name_table.h"
+#include "crypto.h"
+#define BUF_ALIGN(x) ALIGN(x, 4)
#define MAX_FORWARD_SIZE 1024
+#ifdef CONFIG_TIPC_CRYPTO
+#define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
+#define BUF_OVERHEAD (BUF_HEADROOM + TIPC_AES_GCM_TAG_SIZE)
+#else
#define BUF_HEADROOM (LL_MAX_HEADER + 48)
-#define BUF_TAILROOM 16
+#define BUF_OVERHEAD BUF_HEADROOM
+#endif
-static unsigned int align(unsigned int i)
-{
- return (i + 3) & ~3u;
-}
+const int one_page_mtu = PAGE_SIZE - SKB_DATA_ALIGN(BUF_OVERHEAD) -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
/**
* tipc_buf_acquire - creates a TIPC message buffer
* @size: message size (including TIPC header)
+ * @gfp: memory allocation flags
*
- * Returns a new buffer with data pointers set to the specified size.
+ * Return: a new buffer with data pointers set to the specified size.
*
- * NOTE: Headroom is reserved to allow prepending of a data link header.
- * There may also be unrequested tailroom present at the buffer's end.
+ * NOTE:
+ * Headroom is reserved to allow prepending of a data link header.
+ * There may also be unrequested tailroom present at the buffer's end.
*/
struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
{
struct sk_buff *skb;
- unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
- skb = alloc_skb_fclone(buf_size, gfp);
+ skb = alloc_skb_fclone(BUF_OVERHEAD + size, gfp);
if (skb) {
skb_reserve(skb, BUF_HEADROOM);
skb_put(skb, size);
@@ -105,10 +111,6 @@ struct sk_buff *tipc_msg_create(uint user, uint type,
msg_set_origport(msg, oport);
msg_set_destport(msg, dport);
msg_set_errcode(msg, errcode);
- if (hdr_sz > SHORT_H_SIZE) {
- msg_set_orignode(msg, onode);
- msg_set_destnode(msg, dnode);
- }
return buf;
}
@@ -140,24 +142,25 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
if (fragid == FIRST_FRAGMENT) {
if (unlikely(head))
goto err;
- if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
+ if (skb_has_frag_list(frag) && __skb_linearize(frag))
goto err;
- head = *headbuf = frag;
*buf = NULL;
+ frag = skb_unshare(frag, GFP_ATOMIC);
+ if (unlikely(!frag))
+ goto err;
+ head = *headbuf = frag;
TIPC_SKB_CB(head)->tail = NULL;
- if (skb_is_nonlinear(head)) {
- skb_walk_frags(head, tail) {
- TIPC_SKB_CB(head)->tail = tail;
- }
- } else {
- skb_frag_list_init(head);
- }
return 0;
}
if (!head)
goto err;
+ /* Either the input skb ownership is transferred to headskb
+ * or the input skb is freed, clear the reference to avoid
+ * bad access on error path.
+ */
+ *buf = NULL;
if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
kfree_skb_partial(frag, headstolen);
} else {
@@ -173,15 +176,14 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
}
if (fragid == LAST_FRAGMENT) {
- TIPC_SKB_CB(head)->validated = false;
- if (unlikely(!tipc_msg_validate(head)))
+ TIPC_SKB_CB(head)->validated = 0;
+ if (unlikely(!tipc_msg_validate(&head)))
goto err;
*buf = head;
TIPC_SKB_CB(head)->tail = NULL;
*headbuf = NULL;
return 1;
}
- *buf = NULL;
return 0;
err:
kfree_skb(*buf);
@@ -190,6 +192,56 @@ err:
return 0;
}
+/**
+ * tipc_msg_append(): Append data to tail of an existing buffer queue
+ * @_hdr: header to be used
+ * @m: the data to be appended
+ * @mss: max allowable size of buffer
+ * @dlen: size of data to be appended
+ * @txq: queue to append to
+ *
+ * Return: the number of 1k blocks appended or errno value
+ */
+int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
+ int mss, struct sk_buff_head *txq)
+{
+ struct sk_buff *skb;
+ int accounted, total, curr;
+ int mlen, cpy, rem = dlen;
+ struct tipc_msg *hdr;
+
+ skb = skb_peek_tail(txq);
+ accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
+ total = accounted;
+
+ do {
+ if (!skb || skb->len >= mss) {
+ skb = tipc_buf_acquire(mss, GFP_KERNEL);
+ if (unlikely(!skb))
+ return -ENOMEM;
+ skb_orphan(skb);
+ skb_trim(skb, MIN_H_SIZE);
+ hdr = buf_msg(skb);
+ skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE);
+ msg_set_hdr_sz(hdr, MIN_H_SIZE);
+ msg_set_size(hdr, MIN_H_SIZE);
+ __skb_queue_tail(txq, skb);
+ total += 1;
+ }
+ hdr = buf_msg(skb);
+ curr = msg_blocks(hdr);
+ mlen = msg_size(hdr);
+ cpy = min_t(size_t, rem, mss - mlen);
+ if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter))
+ return -EFAULT;
+ msg_set_size(hdr, mlen + cpy);
+ skb_put(skb, cpy);
+ rem -= cpy;
+ total += msg_blocks(hdr) - curr;
+ } while (rem > 0);
+ return total - accounted;
+}
+
/* tipc_msg_validate - validate basic format of received message
*
* This routine ensures a TIPC message has an acceptable header, and at least
@@ -201,13 +253,24 @@ err:
* TIPC will ignore the excess, under the assumption that it is optional info
* introduced by a later release of the protocol.
*/
-bool tipc_msg_validate(struct sk_buff *skb)
+bool tipc_msg_validate(struct sk_buff **_skb)
{
- struct tipc_msg *msg;
+ struct sk_buff *skb = *_skb;
+ struct tipc_msg *hdr;
int msz, hsz;
+ /* Ensure that flow control ratio condition is satisfied */
+ if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
+ skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
+ if (!skb)
+ return false;
+ kfree_skb(*_skb);
+ *_skb = skb;
+ }
+
if (unlikely(TIPC_SKB_CB(skb)->validated))
return true;
+
if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
return false;
@@ -217,11 +280,11 @@ bool tipc_msg_validate(struct sk_buff *skb)
if (unlikely(!pskb_may_pull(skb, hsz)))
return false;
- msg = buf_msg(skb);
- if (unlikely(msg_version(msg) != TIPC_VERSION))
+ hdr = buf_msg(skb);
+ if (unlikely(msg_version(hdr) != TIPC_VERSION))
return false;
- msz = msg_size(msg);
+ msz = msg_size(hdr);
if (unlikely(msz < hsz))
return false;
if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
@@ -229,32 +292,95 @@ bool tipc_msg_validate(struct sk_buff *skb)
if (unlikely(skb->len < msz))
return false;
- TIPC_SKB_CB(skb)->validated = true;
+ TIPC_SKB_CB(skb)->validated = 1;
return true;
}
/**
+ * tipc_msg_fragment - build a fragment skb list for TIPC message
+ *
+ * @skb: TIPC message skb
+ * @hdr: internal msg header to be put on the top of the fragments
+ * @pktmax: max size of a fragment incl. the header
+ * @frags: returned fragment skb list
+ *
+ * Return: 0 if the fragmentation is successful, otherwise: -EINVAL
+ * or -ENOMEM
+ */
+int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
+ int pktmax, struct sk_buff_head *frags)
+{
+ int pktno, nof_fragms, dsz, dmax, eat;
+ struct tipc_msg *_hdr;
+ struct sk_buff *_skb;
+ u8 *data;
+
+ /* Non-linear buffer? */
+ if (skb_linearize(skb))
+ return -ENOMEM;
+
+ data = (u8 *)skb->data;
+ dsz = msg_size(buf_msg(skb));
+ dmax = pktmax - INT_H_SIZE;
+ if (dsz <= dmax || !dmax)
+ return -EINVAL;
+
+ nof_fragms = dsz / dmax + 1;
+ for (pktno = 1; pktno <= nof_fragms; pktno++) {
+ if (pktno < nof_fragms)
+ eat = dmax;
+ else
+ eat = dsz % dmax;
+ /* Allocate a new fragment */
+ _skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC);
+ if (!_skb)
+ goto error;
+ skb_orphan(_skb);
+ __skb_queue_tail(frags, _skb);
+ /* Copy header & data to the fragment */
+ skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE);
+ skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat);
+ data += eat;
+ /* Update the fragment's header */
+ _hdr = buf_msg(_skb);
+ msg_set_fragm_no(_hdr, pktno);
+ msg_set_nof_fragms(_hdr, nof_fragms);
+ msg_set_size(_hdr, INT_H_SIZE + eat);
+ }
+ return 0;
+
+error:
+ __skb_queue_purge(frags);
+ __skb_queue_head_init(frags);
+ return -ENOMEM;
+}
+
+/**
* tipc_msg_build - create buffer chain containing specified header and data
* @mhdr: Message header, to be prepended to data
* @m: User message
+ * @offset: buffer offset for fragmented messages (FIXME)
* @dsz: Total length of user data
* @pktmax: Max packet size that can be used
* @list: Buffer or chain of buffers to be returned to caller
*
- * Returns message data size or errno: -ENOMEM, -EFAULT
+ * Note that the recursive call we are making here is safe, since it can
+ * logically go only one further level down.
+ *
+ * Return: message data size or errno: -ENOMEM, -EFAULT
*/
-int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
- int offset, int dsz, int pktmax, struct sk_buff_head *list)
+int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
+ int dsz, int pktmax, struct sk_buff_head *list)
{
int mhsz = msg_hdr_sz(mhdr);
+ struct tipc_msg pkthdr;
int msz = mhsz + dsz;
- int pktno = 1;
- int pktsz;
int pktrem = pktmax;
- int drem = dsz;
- struct tipc_msg pkthdr;
struct sk_buff *skb;
+ int drem = dsz;
+ int pktno = 1;
char *pktpos;
+ int pktsz;
int rc;
msg_set_size(mhdr, msz);
@@ -262,8 +388,19 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
/* No fragmentation needed? */
if (likely(msz <= pktmax)) {
skb = tipc_buf_acquire(msz, GFP_KERNEL);
- if (unlikely(!skb))
+
+ /* Fall back to smaller MTU if node local message */
+ if (unlikely(!skb)) {
+ if (pktmax != MAX_MSG_SIZE)
+ return -ENOMEM;
+ rc = tipc_msg_build(mhdr, m, offset, dsz,
+ one_page_mtu, list);
+ if (rc != dsz)
+ return rc;
+ if (tipc_msg_assemble(list))
+ return dsz;
return -ENOMEM;
+ }
skb_orphan(skb);
__skb_queue_tail(list, skb);
skb_copy_to_linear_data(skb, mhdr, mhsz);
@@ -337,48 +474,98 @@ error:
}
/**
- * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
- * @skb: the buffer to append to ("bundle")
- * @msg: message to be appended
- * @mtu: max allowable size for the bundle buffer
- * Consumes buffer if successful
- * Returns true if bundling could be performed, otherwise false
+ * tipc_msg_bundle - Append contents of a buffer to tail of an existing one
+ * @bskb: the bundle buffer to append to
+ * @msg: message to be appended
+ * @max: max allowable size for the bundle buffer
+ *
+ * Return: "true" if bundling has been performed, otherwise "false"
*/
-bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu)
+static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
+ u32 max)
{
- struct tipc_msg *bmsg;
- unsigned int bsz;
- unsigned int msz = msg_size(msg);
- u32 start, pad;
- u32 max = mtu - INT_H_SIZE;
+ struct tipc_msg *bmsg = buf_msg(bskb);
+ u32 msz, bsz, offset, pad;
- if (likely(msg_user(msg) == MSG_FRAGMENTER))
- return false;
- if (!skb)
- return false;
- bmsg = buf_msg(skb);
+ msz = msg_size(msg);
bsz = msg_size(bmsg);
- start = align(bsz);
- pad = start - bsz;
+ offset = BUF_ALIGN(bsz);
+ pad = offset - bsz;
- if (unlikely(msg_user(msg) == TUNNEL_PROTOCOL))
+ if (unlikely(skb_tailroom(bskb) < (pad + msz)))
return false;
- if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
+ if (unlikely(max < (offset + msz)))
return false;
- if (unlikely(msg_user(bmsg) != MSG_BUNDLER))
+
+ skb_put(bskb, pad + msz);
+ skb_copy_to_linear_data_offset(bskb, offset, msg, msz);
+ msg_set_size(bmsg, offset + msz);
+ msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
+ return true;
+}
+
+/**
+ * tipc_msg_try_bundle - Try to bundle a new message to the last one
+ * @tskb: the last/target message to which the new one will be appended
+ * @skb: the new message skb pointer
+ * @mss: max message size (header inclusive)
+ * @dnode: destination node for the message
+ * @new_bundle: if this call made a new bundle or not
+ *
+ * Return: "true" if the new message skb is potential for bundling this time or
+ * later, in the case a bundling has been done this time, the skb is consumed
+ * (the skb pointer = NULL).
+ * Otherwise, "false" if the skb cannot be bundled at all.
+ */
+bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
+ u32 dnode, bool *new_bundle)
+{
+ struct tipc_msg *msg, *inner, *outer;
+ u32 tsz;
+
+ /* First, check if the new buffer is suitable for bundling */
+ msg = buf_msg(*skb);
+ if (msg_user(msg) == MSG_FRAGMENTER)
return false;
- if (unlikely(skb_tailroom(skb) < (pad + msz)))
+ if (msg_user(msg) == TUNNEL_PROTOCOL)
return false;
- if (unlikely(max < (start + msz)))
+ if (msg_user(msg) == BCAST_PROTOCOL)
return false;
- if ((msg_importance(msg) < TIPC_SYSTEM_IMPORTANCE) &&
- (msg_importance(bmsg) == TIPC_SYSTEM_IMPORTANCE))
+ if (mss <= INT_H_SIZE + msg_size(msg))
return false;
- skb_put(skb, pad + msz);
- skb_copy_to_linear_data_offset(skb, start, msg, msz);
- msg_set_size(bmsg, start + msz);
- msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
+ /* Ok, but the last/target buffer can be empty? */
+ if (unlikely(!tskb))
+ return true;
+
+ /* Is it a bundle already? Try to bundle the new message to it */
+ if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) {
+ *new_bundle = false;
+ goto bundle;
+ }
+
+ /* Make a new bundle of the two messages if possible */
+ tsz = msg_size(buf_msg(tskb));
+ if (unlikely(mss < BUF_ALIGN(INT_H_SIZE + tsz) + msg_size(msg)))
+ return true;
+ if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
+ GFP_ATOMIC)))
+ return true;
+ inner = buf_msg(tskb);
+ skb_push(tskb, INT_H_SIZE);
+ outer = buf_msg(tskb);
+ tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE,
+ dnode);
+ msg_set_importance(outer, msg_importance(inner));
+ msg_set_size(outer, INT_H_SIZE + tsz);
+ msg_set_msgcnt(outer, 1);
+ *new_bundle = true;
+
+bundle:
+ if (likely(tipc_msg_bundle(tskb, msg, mss))) {
+ consume_skb(*skb);
+ *skb = NULL;
+ }
return true;
}
@@ -387,33 +574,38 @@ bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu)
* @skb: buffer to be extracted from.
* @iskb: extracted inner buffer, to be returned
* @pos: position in outer message of msg to be extracted.
- * Returns position of next msg
+ * Returns position of next msg.
* Consumes outer buffer when last packet extracted
- * Returns true when when there is an extracted buffer, otherwise false
+ * Return: true when there is an extracted buffer, otherwise false
*/
bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
{
- struct tipc_msg *msg;
- int imsz, offset;
+ struct tipc_msg *hdr, *ihdr;
+ int imsz;
*iskb = NULL;
if (unlikely(skb_linearize(skb)))
goto none;
- msg = buf_msg(skb);
- offset = msg_hdr_sz(msg) + *pos;
- if (unlikely(offset > (msg_size(msg) - MIN_H_SIZE)))
+ hdr = buf_msg(skb);
+ if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
goto none;
- *iskb = skb_clone(skb, GFP_ATOMIC);
- if (unlikely(!*iskb))
+ ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
+ imsz = msg_size(ihdr);
+
+ if ((*pos + imsz) > msg_data_sz(hdr))
goto none;
- skb_pull(*iskb, offset);
- imsz = msg_size(buf_msg(*iskb));
- skb_trim(*iskb, imsz);
- if (unlikely(!tipc_msg_validate(*iskb)))
+
+ *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
+ if (!*iskb)
+ goto none;
+
+ skb_copy_to_linear_data(*iskb, ihdr, imsz);
+ if (unlikely(!tipc_msg_validate(iskb)))
goto none;
- *pos += align(imsz);
+
+ *pos += BUF_ALIGN(imsz);
return true;
none:
kfree_skb(skb);
@@ -423,104 +615,58 @@ none:
}
/**
- * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
- * @list: the buffer chain, where head is the buffer to replace/append
- * @skb: buffer to be created, appended to and returned in case of success
- * @msg: message to be appended
- * @mtu: max allowable size for the bundle buffer, inclusive header
- * @dnode: destination node for message. (Not always present in header)
- * Returns true if success, otherwise false
- */
-bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
- u32 mtu, u32 dnode)
-{
- struct sk_buff *_skb;
- struct tipc_msg *bmsg;
- u32 msz = msg_size(msg);
- u32 max = mtu - INT_H_SIZE;
-
- if (msg_user(msg) == MSG_FRAGMENTER)
- return false;
- if (msg_user(msg) == TUNNEL_PROTOCOL)
- return false;
- if (msg_user(msg) == BCAST_PROTOCOL)
- return false;
- if (msz > (max / 2))
- return false;
-
- _skb = tipc_buf_acquire(max, GFP_ATOMIC);
- if (!_skb)
- return false;
-
- skb_trim(_skb, INT_H_SIZE);
- bmsg = buf_msg(_skb);
- tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
- INT_H_SIZE, dnode);
- if (msg_isdata(msg))
- msg_set_importance(bmsg, TIPC_CRITICAL_IMPORTANCE);
- else
- msg_set_importance(bmsg, TIPC_SYSTEM_IMPORTANCE);
- msg_set_seqno(bmsg, msg_seqno(msg));
- msg_set_ack(bmsg, msg_ack(msg));
- msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
- tipc_msg_bundle(_skb, msg, mtu);
- *skb = _skb;
- return true;
-}
-
-/**
* tipc_msg_reverse(): swap source and destination addresses and add error code
* @own_node: originating node id for reversed message
- * @skb: buffer containing message to be reversed; may be replaced.
+ * @skb: buffer containing message to be reversed; will be consumed
* @err: error code to be set in message, if any
- * Consumes buffer at failure
- * Returns true if success, otherwise false
+ * Replaces consumed buffer with new one when successful
+ * Return: true if success, otherwise false
*/
bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
{
struct sk_buff *_skb = *skb;
- struct tipc_msg *hdr = buf_msg(_skb);
- struct tipc_msg ohdr;
- int dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE);
+ struct tipc_msg *_hdr, *hdr;
+ int hlen, dlen;
if (skb_linearize(_skb))
goto exit;
- hdr = buf_msg(_skb);
- if (msg_dest_droppable(hdr))
+ _hdr = buf_msg(_skb);
+ dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
+ hlen = msg_hdr_sz(_hdr);
+
+ if (msg_dest_droppable(_hdr))
goto exit;
- if (msg_errcode(hdr))
+ if (msg_errcode(_hdr))
goto exit;
- /* Take a copy of original header before altering message */
- memcpy(&ohdr, hdr, msg_hdr_sz(hdr));
-
- /* Never return SHORT header; expand by replacing buffer if necessary */
- if (msg_short(hdr)) {
- *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen, GFP_ATOMIC);
- if (!*skb)
- goto exit;
- memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);
- kfree_skb(_skb);
- _skb = *skb;
- hdr = buf_msg(_skb);
- memcpy(hdr, &ohdr, BASIC_H_SIZE);
- msg_set_hdr_sz(hdr, BASIC_H_SIZE);
- }
+ /* Never return SHORT header */
+ if (hlen == SHORT_H_SIZE)
+ hlen = BASIC_H_SIZE;
+
+ /* Don't return data along with SYN+, - sender has a clone */
+ if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
+ dlen = 0;
- if (skb_cloned(_skb) &&
- pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
+ /* Allocate new buffer to return */
+ *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
+ if (!*skb)
goto exit;
+ memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
+ memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
- /* Now reverse the concerned fields */
+ /* Build reverse header in new buffer */
+ hdr = buf_msg(*skb);
+ msg_set_hdr_sz(hdr, hlen);
msg_set_errcode(hdr, err);
- msg_set_origport(hdr, msg_destport(&ohdr));
- msg_set_destport(hdr, msg_origport(&ohdr));
- msg_set_destnode(hdr, msg_prevnode(&ohdr));
+ msg_set_non_seq(hdr, 0);
+ msg_set_origport(hdr, msg_destport(_hdr));
+ msg_set_destport(hdr, msg_origport(_hdr));
+ msg_set_destnode(hdr, msg_prevnode(_hdr));
msg_set_prevnode(hdr, own_node);
msg_set_orignode(hdr, own_node);
- msg_set_size(hdr, msg_hdr_sz(hdr) + dlen);
- skb_trim(_skb, msg_size(hdr));
+ msg_set_size(hdr, hlen + dlen);
skb_orphan(_skb);
+ kfree_skb(_skb);
return true;
exit:
kfree_skb(_skb);
@@ -528,18 +674,38 @@ exit:
return false;
}
+bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
+{
+ struct sk_buff *skb, *_skb;
+
+ skb_queue_walk(msg, skb) {
+ _skb = skb_clone(skb, GFP_ATOMIC);
+ if (!_skb) {
+ __skb_queue_purge(cpy);
+ pr_err_ratelimited("Failed to clone buffer chain\n");
+ return false;
+ }
+ __skb_queue_tail(cpy, _skb);
+ }
+ return true;
+}
+
/**
* tipc_msg_lookup_dest(): try to find new destination for named message
+ * @net: pointer to associated network namespace
* @skb: the buffer containing the message.
* @err: error code to be used by caller if lookup fails
* Does not consume buffer
- * Returns true if a destination is found, false otherwise
+ * Return: true if a destination is found, false otherwise
*/
bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
{
struct tipc_msg *msg = buf_msg(skb);
- u32 dport, dnode;
- u32 onode = tipc_own_addr(net);
+ u32 scope = msg_lookup_scope(msg);
+ u32 self = tipc_own_addr(net);
+ u32 inst = msg_nameinst(msg);
+ struct tipc_socket_addr sk;
+ struct tipc_uaddr ua;
if (!msg_isdata(msg))
return false;
@@ -547,26 +713,51 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
return false;
if (msg_errcode(msg))
return false;
- *err = -TIPC_ERR_NO_NAME;
+ *err = TIPC_ERR_NO_NAME;
if (skb_linearize(skb))
return false;
msg = buf_msg(skb);
if (msg_reroute_cnt(msg))
return false;
- dnode = addr_domain(net, msg_lookup_scope(msg));
- dport = tipc_nametbl_translate(net, msg_nametype(msg),
- msg_nameinst(msg), &dnode);
- if (!dport)
+ tipc_uaddr(&ua, TIPC_SERVICE_RANGE, scope,
+ msg_nametype(msg), inst, inst);
+ sk.node = tipc_scope2node(net, scope);
+ if (!tipc_nametbl_lookup_anycast(net, &ua, &sk))
return false;
msg_incr_reroute_cnt(msg);
- if (dnode != onode)
- msg_set_prevnode(msg, onode);
- msg_set_destnode(msg, dnode);
- msg_set_destport(msg, dport);
+ if (sk.node != self)
+ msg_set_prevnode(msg, self);
+ msg_set_destnode(msg, sk.node);
+ msg_set_destport(msg, sk.ref);
*err = TIPC_OK;
+
return true;
}
+/* tipc_msg_assemble() - assemble chain of fragments into one message
+ */
+bool tipc_msg_assemble(struct sk_buff_head *list)
+{
+ struct sk_buff *skb, *tmp = NULL;
+
+ if (skb_queue_len(list) == 1)
+ return true;
+
+ while ((skb = __skb_dequeue(list))) {
+ skb->next = NULL;
+ if (tipc_buf_append(&tmp, &skb)) {
+ __skb_queue_tail(list, skb);
+ return true;
+ }
+ if (!tmp)
+ break;
+ }
+ __skb_queue_purge(list);
+ __skb_queue_head_init(list);
+ pr_warn("Failed do assemble buffer\n");
+ return false;
+}
+
/* tipc_msg_reassemble() - clone a buffer chain of fragments and
* reassemble the clones into one message
*/
@@ -629,19 +820,19 @@ bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
* @seqno: sequence number of buffer to add
* @skb: buffer to add
*/
-void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
+bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
struct sk_buff *skb)
{
struct sk_buff *_skb, *tmp;
if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
__skb_queue_head(list, skb);
- return;
+ return true;
}
if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
__skb_queue_tail(list, skb);
- return;
+ return true;
}
skb_queue_walk_safe(list, _skb, tmp) {
@@ -650,7 +841,15 @@ void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
if (seqno == buf_seqno(_skb))
break;
__skb_queue_before(list, _skb, skb);
- return;
+ return true;
}
kfree_skb(skb);
+ return false;
+}
+
+void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
+{
+ if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
+ __skb_queue_tail(xmitq, skb);
}
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index c843fd2bc48d..c5eec16213d7 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -1,7 +1,7 @@
/*
* net/tipc/msg.h: Include file for TIPC message header routines
*
- * Copyright (c) 2000-2007, 2014-2015 Ericsson AB
+ * Copyright (c) 2000-2007, 2014-2017 Ericsson AB
* Copyright (c) 2005-2008, 2010-2011, Wind River Systems
* All rights reserved.
*
@@ -61,10 +61,14 @@ struct plist;
/*
* Payload message types
*/
-#define TIPC_CONN_MSG 0
-#define TIPC_MCAST_MSG 1
-#define TIPC_NAMED_MSG 2
-#define TIPC_DIRECT_MSG 3
+#define TIPC_CONN_MSG 0
+#define TIPC_MCAST_MSG 1
+#define TIPC_NAMED_MSG 2
+#define TIPC_DIRECT_MSG 3
+#define TIPC_GRP_MEMBER_EVT 4
+#define TIPC_GRP_BCAST_MSG 5
+#define TIPC_GRP_MCAST_MSG 6
+#define TIPC_GRP_UCAST_MSG 7
/*
* Internal message users
@@ -73,11 +77,14 @@ struct plist;
#define MSG_BUNDLER 6
#define LINK_PROTOCOL 7
#define CONN_MANAGER 8
+#define GROUP_PROTOCOL 9
#define TUNNEL_PROTOCOL 10
#define NAME_DISTRIBUTOR 11
#define MSG_FRAGMENTER 12
#define LINK_CONFIG 13
+#define MSG_CRYPTO 14
#define SOCK_WAKEUP 14 /* pseudo user */
+#define TOP_SRV 15 /* pseudo user */
/*
* Message header sizes
@@ -86,21 +93,55 @@ struct plist;
#define BASIC_H_SIZE 32 /* Basic payload message */
#define NAMED_H_SIZE 40 /* Named payload message */
#define MCAST_H_SIZE 44 /* Multicast payload message */
+#define GROUP_H_SIZE 44 /* Group payload message */
#define INT_H_SIZE 40 /* Internal messages */
#define MIN_H_SIZE 24 /* Smallest legal TIPC header size */
#define MAX_H_SIZE 60 /* Largest possible TIPC header size */
#define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
-
#define TIPC_MEDIA_INFO_OFFSET 5
+extern const int one_page_mtu;
+
struct tipc_skb_cb {
- u32 bytes_read;
- struct sk_buff *tail;
- bool validated;
- u16 chain_imp;
- u16 ackers;
-};
+ union {
+ struct {
+ struct sk_buff *tail;
+ unsigned long nxt_retr;
+ unsigned long retr_stamp;
+ u32 bytes_read;
+ u32 orig_member;
+ u16 chain_imp;
+ u16 ackers;
+ u16 retr_cnt;
+ } __packed;
+#ifdef CONFIG_TIPC_CRYPTO
+ struct {
+ struct tipc_crypto *rx;
+ struct tipc_aead *last;
+ u8 recurs;
+ } tx_clone_ctx __packed;
+#endif
+ } __packed;
+ union {
+ struct {
+ u8 validated:1;
+#ifdef CONFIG_TIPC_CRYPTO
+ u8 encrypted:1;
+ u8 decrypted:1;
+#define SKB_PROBING 1
+#define SKB_GRACING 2
+ u8 xmit_type:2;
+ u8 tx_clone_deferred:1;
+#endif
+ };
+ u8 flags;
+ };
+ u8 reserved;
+#ifdef CONFIG_TIPC_CRYPTO
+ void *crypto_ctx;
+#endif
+} __packed;
#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
@@ -108,6 +149,54 @@ struct tipc_msg {
__be32 hdr[15];
};
+/* struct tipc_gap_ack - TIPC Gap ACK block
+ * @ack: seqno of the last consecutive packet in link deferdq
+ * @gap: number of gap packets since the last ack
+ *
+ * E.g:
+ * link deferdq: 1 2 3 4 10 11 13 14 15 20
+ * --> Gap ACK blocks: <4, 5>, <11, 1>, <15, 4>, <20, 0>
+ */
+struct tipc_gap_ack {
+ __be16 ack;
+ __be16 gap;
+};
+
+/* struct tipc_gap_ack_blks
+ * @len: actual length of the record
+ * @ugack_cnt: number of Gap ACK blocks for unicast (following the broadcast
+ * ones)
+ * @start_index: starting index for "valid" broadcast Gap ACK blocks
+ * @bgack_cnt: number of Gap ACK blocks for broadcast in the record
+ * @gacks: array of Gap ACK blocks
+ *
+ * 31 16 15 0
+ * +-------------+-------------+-------------+-------------+
+ * | bgack_cnt | ugack_cnt | len |
+ * +-------------+-------------+-------------+-------------+ -
+ * | gap | ack | |
+ * +-------------+-------------+-------------+-------------+ > bc gacks
+ * : : : |
+ * +-------------+-------------+-------------+-------------+ -
+ * | gap | ack | |
+ * +-------------+-------------+-------------+-------------+ > uc gacks
+ * : : : |
+ * +-------------+-------------+-------------+-------------+ -
+ */
+struct tipc_gap_ack_blks {
+ __be16 len;
+ union {
+ u8 ugack_cnt;
+ u8 start_index;
+ };
+ u8 bgack_cnt;
+ struct tipc_gap_ack gacks[];
+};
+
+#define MAX_GAP_ACK_BLKS 128
+#define MAX_GAP_ACK_BLKS_SZ (sizeof(struct tipc_gap_ack_blks) + \
+ sizeof(struct tipc_gap_ack) * MAX_GAP_ACK_BLKS)
+
static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
{
return (struct tipc_msg *)skb->data;
@@ -137,14 +226,6 @@ static inline void msg_set_bits(struct tipc_msg *m, u32 w,
m->hdr[w] |= htonl(val);
}
-static inline void msg_swap_words(struct tipc_msg *msg, u32 a, u32 b)
-{
- u32 temp = msg->hdr[a];
-
- msg->hdr[a] = msg->hdr[b];
- msg->hdr[b] = temp;
-}
-
/*
* Word 0
*/
@@ -188,6 +269,11 @@ static inline u32 msg_size(struct tipc_msg *m)
return msg_bits(m, 0, 0, 0x1ffff);
}
+static inline u32 msg_blocks(struct tipc_msg *m)
+{
+ return (msg_size(m) / 1024) + 1;
+}
+
static inline u32 msg_data_sz(struct tipc_msg *m)
{
return msg_size(m) - msg_hdr_sz(m);
@@ -203,6 +289,16 @@ static inline void msg_set_non_seq(struct tipc_msg *m, u32 n)
msg_set_bits(m, 0, 20, 1, n);
}
+static inline int msg_is_syn(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 17, 1);
+}
+
+static inline void msg_set_syn(struct tipc_msg *m, u32 d)
+{
+ msg_set_bits(m, 0, 17, 1, d);
+}
+
static inline int msg_dest_droppable(struct tipc_msg *m)
{
return msg_bits(m, 0, 19, 1);
@@ -213,6 +309,16 @@ static inline void msg_set_dest_droppable(struct tipc_msg *m, u32 d)
msg_set_bits(m, 0, 19, 1, d);
}
+static inline int msg_is_keepalive(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 19, 1);
+}
+
+static inline void msg_set_is_keepalive(struct tipc_msg *m, u32 d)
+{
+ msg_set_bits(m, 0, 19, 1, d);
+}
+
static inline int msg_src_droppable(struct tipc_msg *m)
{
return msg_bits(m, 0, 18, 1);
@@ -223,6 +329,36 @@ static inline void msg_set_src_droppable(struct tipc_msg *m, u32 d)
msg_set_bits(m, 0, 18, 1, d);
}
+static inline int msg_ack_required(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 18, 1);
+}
+
+static inline void msg_set_ack_required(struct tipc_msg *m)
+{
+ msg_set_bits(m, 0, 18, 1, 1);
+}
+
+static inline int msg_nagle_ack(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 18, 1);
+}
+
+static inline void msg_set_nagle_ack(struct tipc_msg *m)
+{
+ msg_set_bits(m, 0, 18, 1, 1);
+}
+
+static inline bool msg_is_rcast(struct tipc_msg *m)
+{
+ return msg_bits(m, 0, 18, 0x1);
+}
+
+static inline void msg_set_is_rcast(struct tipc_msg *m, bool d)
+{
+ msg_set_bits(m, 0, 18, 0x1, d);
+}
+
static inline void msg_set_size(struct tipc_msg *m, u32 sz)
{
m->hdr[0] = htonl((msg_word(m, 0) & ~0x1ffff) | sz);
@@ -233,7 +369,7 @@ static inline unchar *msg_data(struct tipc_msg *m)
return ((unchar *)m) + msg_hdr_sz(m);
}
-static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
+static inline struct tipc_msg *msg_inner_hdr(struct tipc_msg *m)
{
return (struct tipc_msg *)msg_data(m);
}
@@ -251,6 +387,18 @@ static inline void msg_set_type(struct tipc_msg *m, u32 n)
msg_set_bits(m, 1, 29, 0x7, n);
}
+static inline int msg_in_group(struct tipc_msg *m)
+{
+ int mtyp = msg_type(m);
+
+ return mtyp >= TIPC_GRP_MEMBER_EVT && mtyp <= TIPC_GRP_UCAST_MSG;
+}
+
+static inline bool msg_is_grp_evt(struct tipc_msg *m)
+{
+ return msg_type(m) == TIPC_GRP_MEMBER_EVT;
+}
+
static inline u32 msg_named(struct tipc_msg *m)
{
return msg_type(m) == TIPC_NAMED_MSG;
@@ -258,7 +406,10 @@ static inline u32 msg_named(struct tipc_msg *m)
static inline u32 msg_mcast(struct tipc_msg *m)
{
- return msg_type(m) == TIPC_MCAST_MSG;
+ int mtyp = msg_type(m);
+
+ return ((mtyp == TIPC_MCAST_MSG) || (mtyp == TIPC_GRP_BCAST_MSG) ||
+ (mtyp == TIPC_GRP_MCAST_MSG));
}
static inline u32 msg_connected(struct tipc_msg *m)
@@ -266,6 +417,11 @@ static inline u32 msg_connected(struct tipc_msg *m)
return msg_type(m) == TIPC_CONN_MSG;
}
+static inline u32 msg_direct(struct tipc_msg *m)
+{
+ return msg_type(m) == TIPC_DIRECT_MSG;
+}
+
static inline u32 msg_errcode(struct tipc_msg *m)
{
return msg_bits(m, 1, 25, 0xf);
@@ -276,6 +432,36 @@ static inline void msg_set_errcode(struct tipc_msg *m, u32 err)
msg_set_bits(m, 1, 25, 0xf, err);
}
+static inline void msg_set_bulk(struct tipc_msg *m)
+{
+ msg_set_bits(m, 1, 28, 0x1, 1);
+}
+
+static inline u32 msg_is_bulk(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 28, 0x1);
+}
+
+static inline void msg_set_last_bulk(struct tipc_msg *m)
+{
+ msg_set_bits(m, 1, 27, 0x1, 1);
+}
+
+static inline u32 msg_is_last_bulk(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 27, 0x1);
+}
+
+static inline void msg_set_non_legacy(struct tipc_msg *m)
+{
+ msg_set_bits(m, 1, 26, 0x1, 1);
+}
+
+static inline u32 msg_is_legacy(struct tipc_msg *m)
+{
+ return !msg_bits(m, 1, 26, 0x1);
+}
+
static inline u32 msg_reroute_cnt(struct tipc_msg *m)
{
return msg_bits(m, 1, 21, 0xf);
@@ -286,11 +472,6 @@ static inline void msg_incr_reroute_cnt(struct tipc_msg *m)
msg_set_bits(m, 1, 21, 0xf, msg_reroute_cnt(m) + 1);
}
-static inline void msg_reset_reroute_cnt(struct tipc_msg *m)
-{
- msg_set_bits(m, 1, 21, 0xf, 0);
-}
-
static inline u32 msg_lookup_scope(struct tipc_msg *m)
{
return msg_bits(m, 1, 19, 0x3);
@@ -311,6 +492,28 @@ static inline void msg_set_bcast_ack(struct tipc_msg *m, u16 n)
msg_set_bits(m, 1, 0, 0xffff, n);
}
+/* Note: reusing bits in word 1 for ACTIVATE_MSG only, to re-synch
+ * link peer session number
+ */
+static inline bool msg_dest_session_valid(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 16, 0x1);
+}
+
+static inline void msg_set_dest_session_valid(struct tipc_msg *m, bool valid)
+{
+ msg_set_bits(m, 1, 16, 0x1, valid);
+}
+
+static inline u16 msg_dest_session(struct tipc_msg *m)
+{
+ return msg_bits(m, 1, 0, 0xffff);
+}
+
+static inline void msg_set_dest_session(struct tipc_msg *m, u16 n)
+{
+ msg_set_bits(m, 1, 0, 0xffff, n);
+}
/*
* Word 2
@@ -374,7 +577,7 @@ static inline void msg_set_prevnode(struct tipc_msg *m, u32 a)
static inline u32 msg_origport(struct tipc_msg *m)
{
if (msg_user(m) == MSG_FRAGMENTER)
- m = msg_get_wrapped(m);
+ m = msg_inner_hdr(m);
return msg_word(m, 4);
}
@@ -383,6 +586,16 @@ static inline void msg_set_origport(struct tipc_msg *m, u32 p)
msg_set_word(m, 4, p);
}
+static inline u16 msg_named_seqno(struct tipc_msg *m)
+{
+ return msg_bits(m, 4, 0, 0xffff);
+}
+
+static inline void msg_set_named_seqno(struct tipc_msg *m, u16 n)
+{
+ msg_set_bits(m, 4, 0, 0xffff, n);
+}
+
static inline u32 msg_destport(struct tipc_msg *m)
{
return msg_word(m, 5);
@@ -512,6 +725,21 @@ static inline void msg_set_nameupper(struct tipc_msg *m, u32 n)
*/
#define DSC_REQ_MSG 0
#define DSC_RESP_MSG 1
+#define DSC_TRIAL_MSG 2
+#define DSC_TRIAL_FAIL_MSG 3
+
+/*
+ * Group protocol message types
+ */
+#define GRP_JOIN_MSG 0
+#define GRP_LEAVE_MSG 1
+#define GRP_ADV_MSG 2
+#define GRP_ACK_MSG 3
+#define GRP_RECLAIM_MSG 4
+#define GRP_REMIT_MSG 5
+
+/* Crypto message types */
+#define KEY_DISTR_MSG 0
/*
* Word 1
@@ -559,11 +787,6 @@ static inline void msg_set_dest_domain(struct tipc_msg *m, u32 n)
msg_set_word(m, 2, n);
}
-static inline u32 msg_bcgap_after(struct tipc_msg *m)
-{
- return msg_bits(m, 2, 16, 0xffff);
-}
-
static inline void msg_set_bcgap_after(struct tipc_msg *m, u32 n)
{
msg_set_bits(m, 2, 16, 0xffff, n);
@@ -579,7 +802,6 @@ static inline void msg_set_bcgap_to(struct tipc_msg *m, u32 n)
msg_set_bits(m, 2, 0, 0xffff, n);
}
-
/*
* Word 4
*/
@@ -598,12 +820,26 @@ static inline void msg_set_last_bcast(struct tipc_msg *m, u32 n)
msg_set_bits(m, 4, 16, 0xffff, n);
}
+static inline u32 msg_nof_fragms(struct tipc_msg *m)
+{
+ return msg_bits(m, 4, 0, 0xffff);
+}
+
+static inline void msg_set_nof_fragms(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 4, 0, 0xffff, n);
+}
+
+static inline u32 msg_fragm_no(struct tipc_msg *m)
+{
+ return msg_bits(m, 4, 16, 0xffff);
+}
+
static inline void msg_set_fragm_no(struct tipc_msg *m, u32 n)
{
msg_set_bits(m, 4, 16, 0xffff, n);
}
-
static inline u16 msg_next_sent(struct tipc_msg *m)
{
return msg_bits(m, 4, 0, 0xffff);
@@ -614,11 +850,6 @@ static inline void msg_set_next_sent(struct tipc_msg *m, u16 n)
msg_set_bits(m, 4, 0, 0xffff, n);
}
-static inline void msg_set_long_msgno(struct tipc_msg *m, u32 n)
-{
- msg_set_bits(m, 4, 0, 0xffff, n);
-}
-
static inline u32 msg_bc_netid(struct tipc_msg *m)
{
return msg_word(m, 4);
@@ -754,6 +985,16 @@ static inline void msg_set_msgcnt(struct tipc_msg *m, u16 n)
msg_set_bits(m, 9, 16, 0xffff, n);
}
+static inline u16 msg_syncpt(struct tipc_msg *m)
+{
+ return msg_bits(m, 9, 16, 0xffff);
+}
+
+static inline void msg_set_syncpt(struct tipc_msg *m, u16 n)
+{
+ msg_set_bits(m, 9, 16, 0xffff, n);
+}
+
static inline u32 msg_conn_ack(struct tipc_msg *m)
{
return msg_bits(m, 9, 16, 0xffff);
@@ -764,12 +1005,12 @@ static inline void msg_set_conn_ack(struct tipc_msg *m, u32 n)
msg_set_bits(m, 9, 16, 0xffff, n);
}
-static inline u32 msg_adv_win(struct tipc_msg *m)
+static inline u16 msg_adv_win(struct tipc_msg *m)
{
return msg_bits(m, 9, 0, 0xffff);
}
-static inline void msg_set_adv_win(struct tipc_msg *m, u32 n)
+static inline void msg_set_adv_win(struct tipc_msg *m, u16 n)
{
msg_set_bits(m, 9, 0, 0xffff, n);
}
@@ -794,6 +1035,68 @@ static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
msg_set_bits(m, 9, 0, 0xffff, n);
}
+static inline u16 msg_grp_bc_syncpt(struct tipc_msg *m)
+{
+ return msg_bits(m, 9, 16, 0xffff);
+}
+
+static inline void msg_set_grp_bc_syncpt(struct tipc_msg *m, u16 n)
+{
+ msg_set_bits(m, 9, 16, 0xffff, n);
+}
+
+static inline u16 msg_grp_bc_acked(struct tipc_msg *m)
+{
+ return msg_bits(m, 9, 16, 0xffff);
+}
+
+static inline void msg_set_grp_bc_acked(struct tipc_msg *m, u16 n)
+{
+ msg_set_bits(m, 9, 16, 0xffff, n);
+}
+
+static inline u16 msg_grp_remitted(struct tipc_msg *m)
+{
+ return msg_bits(m, 9, 16, 0xffff);
+}
+
+static inline void msg_set_grp_remitted(struct tipc_msg *m, u16 n)
+{
+ msg_set_bits(m, 9, 16, 0xffff, n);
+}
+
+/* Word 10
+ */
+static inline u16 msg_grp_evt(struct tipc_msg *m)
+{
+ return msg_bits(m, 10, 0, 0x3);
+}
+
+static inline void msg_set_grp_evt(struct tipc_msg *m, int n)
+{
+ msg_set_bits(m, 10, 0, 0x3, n);
+}
+
+static inline u16 msg_grp_bc_ack_req(struct tipc_msg *m)
+{
+ return msg_bits(m, 10, 0, 0x1);
+}
+
+static inline void msg_set_grp_bc_ack_req(struct tipc_msg *m, bool n)
+{
+ msg_set_bits(m, 10, 0, 0x1, n);
+}
+
+static inline u16 msg_grp_bc_seqno(struct tipc_msg *m)
+{
+ return msg_bits(m, 10, 16, 0xffff);
+}
+
+static inline void msg_set_grp_bc_seqno(struct tipc_msg *m, u32 n)
+{
+ msg_set_bits(m, 10, 16, 0xffff, n);
+}
+
static inline bool msg_peer_link_is_up(struct tipc_msg *m)
{
if (likely(msg_user(m) != LINK_PROTOCOL))
@@ -815,33 +1118,79 @@ static inline bool msg_is_reset(struct tipc_msg *hdr)
return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG);
}
+/* Word 13
+ */
+static inline void msg_set_peer_net_hash(struct tipc_msg *m, u32 n)
+{
+ msg_set_word(m, 13, n);
+}
+
+static inline u32 msg_peer_net_hash(struct tipc_msg *m)
+{
+ return msg_word(m, 13);
+}
+
+/* Word 14
+ */
+static inline u32 msg_sugg_node_addr(struct tipc_msg *m)
+{
+ return msg_word(m, 14);
+}
+
+static inline void msg_set_sugg_node_addr(struct tipc_msg *m, u32 n)
+{
+ msg_set_word(m, 14, n);
+}
+
+static inline void msg_set_node_id(struct tipc_msg *hdr, u8 *id)
+{
+ memcpy(msg_data(hdr), id, 16);
+}
+
+static inline u8 *msg_node_id(struct tipc_msg *hdr)
+{
+ return (u8 *)msg_data(hdr);
+}
+
struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp);
-bool tipc_msg_validate(struct sk_buff *skb);
+bool tipc_msg_validate(struct sk_buff **_skb);
bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
+void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
+ struct sk_buff_head *xmitq);
void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
u32 hsize, u32 destnode);
struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
uint data_sz, u32 dnode, u32 onode,
u32 dport, u32 oport, int errcode);
int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
-bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu);
-bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
- u32 mtu, u32 dnode);
+bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
+ u32 dnode, bool *new_bundle);
bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
+int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
+ int pktmax, struct sk_buff_head *frags);
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
int offset, int dsz, int mtu, struct sk_buff_head *list);
+int tipc_msg_append(struct tipc_msg *hdr, struct msghdr *m, int dlen,
+ int mss, struct sk_buff_head *txq);
bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err);
+bool tipc_msg_assemble(struct sk_buff_head *list);
bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq);
bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
struct sk_buff_head *cpy);
-void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
+bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
struct sk_buff *skb);
+bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy);
static inline u16 buf_seqno(struct sk_buff *skb)
{
return msg_seqno(buf_msg(skb));
}
+static inline int buf_roundup_len(struct sk_buff *skb)
+{
+ return (skb->len / 1024 + 1) * 1024;
+}
+
/* tipc_skb_peek(): peek and reserve first buffer in list
* @list: list to be peeked in
* Returns pointer to first buffer in list, if any
@@ -937,4 +1286,25 @@ static inline void tipc_skb_queue_splice_tail_init(struct sk_buff_head *list,
tipc_skb_queue_splice_tail(&tmp, head);
}
+/* __tipc_skb_dequeue() - dequeue the head skb according to expected seqno
+ * @list: list to be dequeued from
+ * @seqno: seqno of the expected msg
+ *
+ * returns skb dequeued from the list if its seqno is less than or equal to
+ * the expected one, otherwise the skb is still hold
+ *
+ * Note: must be used with appropriate locks held only
+ */
+static inline struct sk_buff *__tipc_skb_dequeue(struct sk_buff_head *list,
+ u16 seqno)
+{
+ struct sk_buff *skb = skb_peek(list);
+
+ if (skb && less_eq(buf_seqno(skb), seqno)) {
+ __skb_unlink(skb, list);
+ return skb;
+ }
+ return NULL;
+}
+
#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 23f8899e0f8c..190b49c5cbc3 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -1,8 +1,9 @@
/*
* net/tipc/name_distr.c: TIPC name distribution code
*
- * Copyright (c) 2000-2006, 2014, Ericsson AB
+ * Copyright (c) 2000-2006, 2014-2019, Ericsson AB
* Copyright (c) 2005, 2010-2011, Wind River Systems
+ * Copyright (c) 2020-2021, Red Hat Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -40,42 +41,40 @@
int sysctl_tipc_named_timeout __read_mostly = 2000;
-struct distr_queue_item {
- struct distr_item i;
- u32 dtype;
- u32 node;
- unsigned long expires;
- struct list_head next;
-};
-
/**
* publ_to_item - add publication info to a publication message
+ * @p: publication info
+ * @i: location of item in the message
*/
static void publ_to_item(struct distr_item *i, struct publication *p)
{
- i->type = htonl(p->type);
- i->lower = htonl(p->lower);
- i->upper = htonl(p->upper);
- i->ref = htonl(p->ref);
+ i->type = htonl(p->sr.type);
+ i->lower = htonl(p->sr.lower);
+ i->upper = htonl(p->sr.upper);
+ i->port = htonl(p->sk.ref);
i->key = htonl(p->key);
}
/**
* named_prepare_buf - allocate & initialize a publication message
+ * @net: the associated network namespace
+ * @type: message type
+ * @size: payload size
+ * @dest: destination node
*
* The buffer returned is of size INT_H_SIZE + payload size
*/
static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
u32 dest)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
+ u32 self = tipc_own_addr(net);
struct tipc_msg *msg;
if (buf != NULL) {
msg = buf_msg(buf);
- tipc_msg_init(tn->own_addr, msg, NAME_DISTRIBUTOR, type,
- INT_H_SIZE, dest);
+ tipc_msg_init(self, msg, NAME_DISTRIBUTOR,
+ type, INT_H_SIZE, dest);
msg_set_size(msg, INT_H_SIZE + size);
}
return buf;
@@ -83,71 +82,83 @@ static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
/**
* tipc_named_publish - tell other nodes about a new publication by this node
+ * @net: the associated network namespace
+ * @p: the new publication
*/
-struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
+struct sk_buff *tipc_named_publish(struct net *net, struct publication *p)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct sk_buff *buf;
+ struct name_table *nt = tipc_name_table(net);
struct distr_item *item;
+ struct sk_buff *skb;
- list_add_tail_rcu(&publ->local_list,
- &tn->nametbl->publ_list[publ->scope]);
-
- if (publ->scope == TIPC_NODE_SCOPE)
+ if (p->scope == TIPC_NODE_SCOPE) {
+ list_add_tail_rcu(&p->binding_node, &nt->node_scope);
return NULL;
-
- buf = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
- if (!buf) {
+ }
+ write_lock_bh(&nt->cluster_scope_lock);
+ list_add_tail(&p->binding_node, &nt->cluster_scope);
+ write_unlock_bh(&nt->cluster_scope_lock);
+ skb = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
+ if (!skb) {
pr_warn("Publication distribution failure\n");
return NULL;
}
-
- item = (struct distr_item *)msg_data(buf_msg(buf));
- publ_to_item(item, publ);
- return buf;
+ msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
+ msg_set_non_legacy(buf_msg(skb));
+ item = (struct distr_item *)msg_data(buf_msg(skb));
+ publ_to_item(item, p);
+ return skb;
}
/**
* tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
+ * @net: the associated network namespace
+ * @p: the withdrawn publication
*/
-struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
+struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *p)
{
- struct sk_buff *buf;
+ struct name_table *nt = tipc_name_table(net);
struct distr_item *item;
+ struct sk_buff *skb;
- list_del(&publ->local_list);
-
- if (publ->scope == TIPC_NODE_SCOPE)
+ write_lock_bh(&nt->cluster_scope_lock);
+ list_del(&p->binding_node);
+ write_unlock_bh(&nt->cluster_scope_lock);
+ if (p->scope == TIPC_NODE_SCOPE)
return NULL;
- buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
- if (!buf) {
+ skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
+ if (!skb) {
pr_warn("Withdrawal distribution failure\n");
return NULL;
}
-
- item = (struct distr_item *)msg_data(buf_msg(buf));
- publ_to_item(item, publ);
- return buf;
+ msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
+ msg_set_non_legacy(buf_msg(skb));
+ item = (struct distr_item *)msg_data(buf_msg(skb));
+ publ_to_item(item, p);
+ return skb;
}
/**
* named_distribute - prepare name info for bulk distribution to another node
+ * @net: the associated network namespace
* @list: list of messages (buffers) to be returned from this function
* @dnode: node to be updated
* @pls: linked list of publication items to be packed into buffer chain
+ * @seqno: sequence number for this message
*/
static void named_distribute(struct net *net, struct sk_buff_head *list,
- u32 dnode, struct list_head *pls)
+ u32 dnode, struct list_head *pls, u16 seqno)
{
struct publication *publ;
struct sk_buff *skb = NULL;
struct distr_item *item = NULL;
- u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0) - INT_H_SIZE) /
+ u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) /
ITEM_SIZE) * ITEM_SIZE;
u32 msg_rem = msg_dsz;
+ struct tipc_msg *hdr;
- list_for_each_entry(publ, pls, local_list) {
+ list_for_each_entry(publ, pls, binding_node) {
/* Prepare next buffer: */
if (!skb) {
skb = named_prepare_buf(net, PUBLICATION, msg_rem,
@@ -156,8 +167,11 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
pr_warn("Bulk publication failure\n");
return;
}
- msg_set_bc_ack_invalid(buf_msg(skb), true);
- item = (struct distr_item *)msg_data(buf_msg(skb));
+ hdr = buf_msg(skb);
+ msg_set_bc_ack_invalid(hdr, true);
+ msg_set_bulk(hdr);
+ msg_set_non_legacy(hdr);
+ item = (struct distr_item *)msg_data(hdr);
}
/* Pack publication into message: */
@@ -173,208 +187,207 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
}
}
if (skb) {
- msg_set_size(buf_msg(skb), INT_H_SIZE + (msg_dsz - msg_rem));
+ hdr = buf_msg(skb);
+ msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem));
skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem));
__skb_queue_tail(list, skb);
}
+ hdr = buf_msg(skb_peek_tail(list));
+ msg_set_last_bulk(hdr);
+ msg_set_named_seqno(hdr, seqno);
}
/**
* tipc_named_node_up - tell specified node about all publications by this node
+ * @net: the associated network namespace
+ * @dnode: destination node
+ * @capabilities: peer node's capabilities
*/
-void tipc_named_node_up(struct net *net, u32 dnode)
+void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct name_table *nt = tipc_name_table(net);
+ struct tipc_net *tn = tipc_net(net);
struct sk_buff_head head;
+ u16 seqno;
__skb_queue_head_init(&head);
+ spin_lock_bh(&tn->nametbl_lock);
+ if (!(capabilities & TIPC_NAMED_BCAST))
+ nt->rc_dests++;
+ seqno = nt->snd_nxt;
+ spin_unlock_bh(&tn->nametbl_lock);
- rcu_read_lock();
- named_distribute(net, &head, dnode,
- &tn->nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
- named_distribute(net, &head, dnode,
- &tn->nametbl->publ_list[TIPC_ZONE_SCOPE]);
- rcu_read_unlock();
-
+ read_lock_bh(&nt->cluster_scope_lock);
+ named_distribute(net, &head, dnode, &nt->cluster_scope, seqno);
tipc_node_xmit(net, &head, dnode, 0);
+ read_unlock_bh(&nt->cluster_scope_lock);
}
/**
* tipc_publ_purge - remove publication associated with a failed node
+ * @net: the associated network namespace
+ * @p: the publication to remove
+ * @addr: failed node's address
*
* Invoked for each publication issued by a newly failed node.
* Removes publication structure from name table & deletes it.
*/
-static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
+static void tipc_publ_purge(struct net *net, struct publication *p, u32 addr)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct publication *p;
+ struct tipc_net *tn = tipc_net(net);
+ struct publication *_p;
+ struct tipc_uaddr ua;
+ tipc_uaddr(&ua, TIPC_SERVICE_RANGE, p->scope, p->sr.type,
+ p->sr.lower, p->sr.upper);
spin_lock_bh(&tn->nametbl_lock);
- p = tipc_nametbl_remove_publ(net, publ->type, publ->lower,
- publ->node, publ->ref, publ->key);
- if (p)
- tipc_node_unsubscribe(net, &p->nodesub_list, addr);
+ _p = tipc_nametbl_remove_publ(net, &ua, &p->sk, p->key);
+ if (_p)
+ tipc_node_unsubscribe(net, &_p->binding_node, addr);
spin_unlock_bh(&tn->nametbl_lock);
-
- if (p != publ) {
- pr_err("Unable to remove publication from failed node\n"
- " (type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
- publ->type, publ->lower, publ->node, publ->ref,
- publ->key);
- }
-
- kfree_rcu(p, rcu);
+ if (_p)
+ kfree_rcu(_p, rcu);
}
-/**
- * tipc_dist_queue_purge - remove deferred updates from a node that went down
- */
-static void tipc_dist_queue_purge(struct net *net, u32 addr)
+void tipc_publ_notify(struct net *net, struct list_head *nsub_list,
+ u32 addr, u16 capabilities)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct distr_queue_item *e, *tmp;
+ struct name_table *nt = tipc_name_table(net);
+ struct tipc_net *tn = tipc_net(net);
- spin_lock_bh(&tn->nametbl_lock);
- list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
- if (e->node != addr)
- continue;
- list_del(&e->next);
- kfree(e);
- }
- spin_unlock_bh(&tn->nametbl_lock);
-}
-
-void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr)
-{
struct publication *publ, *tmp;
- list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
+ list_for_each_entry_safe(publ, tmp, nsub_list, binding_node)
tipc_publ_purge(net, publ, addr);
- tipc_dist_queue_purge(net, addr);
+ spin_lock_bh(&tn->nametbl_lock);
+ if (!(capabilities & TIPC_NAMED_BCAST))
+ nt->rc_dests--;
+ spin_unlock_bh(&tn->nametbl_lock);
}
/**
* tipc_update_nametbl - try to process a nametable update and notify
* subscribers
+ * @net: the associated network namespace
+ * @i: location of item in the message
+ * @node: node address
+ * @dtype: name distributor message type
*
* tipc_nametbl_lock must be held.
- * Returns the publication item if successful, otherwise NULL.
+ * Return: the publication item if successful, otherwise NULL.
*/
static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
u32 node, u32 dtype)
{
- struct publication *publ = NULL;
+ struct publication *p = NULL;
+ struct tipc_socket_addr sk;
+ struct tipc_uaddr ua;
+ u32 key = ntohl(i->key);
+
+ tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_CLUSTER_SCOPE,
+ ntohl(i->type), ntohl(i->lower), ntohl(i->upper));
+ sk.ref = ntohl(i->port);
+ sk.node = node;
if (dtype == PUBLICATION) {
- publ = tipc_nametbl_insert_publ(net, ntohl(i->type),
- ntohl(i->lower),
- ntohl(i->upper),
- TIPC_CLUSTER_SCOPE, node,
- ntohl(i->ref), ntohl(i->key));
- if (publ) {
- tipc_node_subscribe(net, &publ->nodesub_list, node);
+ p = tipc_nametbl_insert_publ(net, &ua, &sk, key);
+ if (p) {
+ tipc_node_subscribe(net, &p->binding_node, node);
return true;
}
} else if (dtype == WITHDRAWAL) {
- publ = tipc_nametbl_remove_publ(net, ntohl(i->type),
- ntohl(i->lower),
- node, ntohl(i->ref),
- ntohl(i->key));
- if (publ) {
- tipc_node_unsubscribe(net, &publ->nodesub_list, node);
- kfree_rcu(publ, rcu);
+ p = tipc_nametbl_remove_publ(net, &ua, &sk, key);
+ if (p) {
+ tipc_node_unsubscribe(net, &p->binding_node, node);
+ kfree_rcu(p, rcu);
return true;
}
+ pr_warn_ratelimited("Failed to remove binding %u,%u from %u\n",
+ ua.sr.type, ua.sr.lower, node);
} else {
- pr_warn("Unrecognized name table message received\n");
+ pr_warn_ratelimited("Unknown name table message received\n");
}
return false;
}
-/**
- * tipc_named_add_backlog - add a failed name table update to the backlog
- *
- */
-static void tipc_named_add_backlog(struct net *net, struct distr_item *i,
- u32 type, u32 node)
+static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
+ u16 *rcv_nxt, bool *open)
{
- struct distr_queue_item *e;
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- unsigned long now = get_jiffies_64();
-
- e = kzalloc(sizeof(*e), GFP_ATOMIC);
- if (!e)
- return;
- e->dtype = type;
- e->node = node;
- e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout);
- memcpy(e, i, sizeof(*i));
- list_add_tail(&e->next, &tn->dist_queue);
-}
+ struct sk_buff *skb, *tmp;
+ struct tipc_msg *hdr;
+ u16 seqno;
+
+ spin_lock_bh(&namedq->lock);
+ skb_queue_walk_safe(namedq, skb, tmp) {
+ if (unlikely(skb_linearize(skb))) {
+ __skb_unlink(skb, namedq);
+ kfree_skb(skb);
+ continue;
+ }
+ hdr = buf_msg(skb);
+ seqno = msg_named_seqno(hdr);
+ if (msg_is_last_bulk(hdr)) {
+ *rcv_nxt = seqno;
+ *open = true;
+ }
-/**
- * tipc_named_process_backlog - try to process any pending name table updates
- * from the network.
- */
-void tipc_named_process_backlog(struct net *net)
-{
- struct distr_queue_item *e, *tmp;
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- char addr[16];
- unsigned long now = get_jiffies_64();
-
- list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
- if (time_after(e->expires, now)) {
- if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype))
- continue;
- } else {
- tipc_addr_string_fill(addr, e->node);
- pr_warn_ratelimited("Dropping name table update (%d) of {%u, %u, %u} from %s key=%u\n",
- e->dtype, ntohl(e->i.type),
- ntohl(e->i.lower),
- ntohl(e->i.upper),
- addr, ntohl(e->i.key));
+ if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) {
+ __skb_unlink(skb, namedq);
+ spin_unlock_bh(&namedq->lock);
+ return skb;
+ }
+
+ if (*open && (*rcv_nxt == seqno)) {
+ (*rcv_nxt)++;
+ __skb_unlink(skb, namedq);
+ spin_unlock_bh(&namedq->lock);
+ return skb;
+ }
+
+ if (less(seqno, *rcv_nxt)) {
+ __skb_unlink(skb, namedq);
+ kfree_skb(skb);
+ continue;
}
- list_del(&e->next);
- kfree(e);
}
+ spin_unlock_bh(&namedq->lock);
+ return NULL;
}
/**
* tipc_named_rcv - process name table update messages sent by another node
+ * @net: the associated network namespace
+ * @namedq: queue to receive from
+ * @rcv_nxt: store last received seqno here
+ * @open: last bulk msg was received (FIXME)
*/
-void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
+void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq,
+ u16 *rcv_nxt, bool *open)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_msg *msg;
+ struct tipc_net *tn = tipc_net(net);
struct distr_item *item;
- uint count;
- u32 node;
+ struct tipc_msg *hdr;
struct sk_buff *skb;
- int mtype;
+ u32 count, node;
spin_lock_bh(&tn->nametbl_lock);
- for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) {
- skb_linearize(skb);
- msg = buf_msg(skb);
- mtype = msg_type(msg);
- item = (struct distr_item *)msg_data(msg);
- count = msg_data_sz(msg) / ITEM_SIZE;
- node = msg_orignode(msg);
+ while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) {
+ hdr = buf_msg(skb);
+ node = msg_orignode(hdr);
+ item = (struct distr_item *)msg_data(hdr);
+ count = msg_data_sz(hdr) / ITEM_SIZE;
while (count--) {
- if (!tipc_update_nametbl(net, item, node, mtype))
- tipc_named_add_backlog(net, item, mtype, node);
+ tipc_update_nametbl(net, item, node, msg_type(hdr));
item++;
}
kfree_skb(skb);
- tipc_named_process_backlog(net);
}
spin_unlock_bh(&tn->nametbl_lock);
}
/**
* tipc_named_reinit - re-initialize local publications
+ * @net: the associated network namespace
*
* This routine is called whenever TIPC networking is enabled.
* All name table entries published by this node are updated to reflect
@@ -382,16 +395,17 @@ void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
*/
void tipc_named_reinit(struct net *net)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct publication *publ;
- int scope;
+ struct name_table *nt = tipc_name_table(net);
+ struct tipc_net *tn = tipc_net(net);
+ struct publication *p;
+ u32 self = tipc_own_addr(net);
spin_lock_bh(&tn->nametbl_lock);
- for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++)
- list_for_each_entry_rcu(publ, &tn->nametbl->publ_list[scope],
- local_list)
- publ->node = tn->own_addr;
-
+ list_for_each_entry_rcu(p, &nt->node_scope, binding_node)
+ p->sk.node = self;
+ list_for_each_entry_rcu(p, &nt->cluster_scope, binding_node)
+ p->sk.node = self;
+ nt->rc_dests = 0;
spin_unlock_bh(&tn->nametbl_lock);
}
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
index 1264ba0af937..c677f6f082df 100644
--- a/net/tipc/name_distr.h
+++ b/net/tipc/name_distr.h
@@ -46,7 +46,7 @@
* @type: name sequence type
* @lower: name sequence lower bound
* @upper: name sequence upper bound
- * @ref: publishing port reference
+ * @port: publishing port reference
* @key: publication key
*
* ===> All fields are stored in network byte order. <===
@@ -63,16 +63,17 @@ struct distr_item {
__be32 type;
__be32 lower;
__be32 upper;
- __be32 ref;
+ __be32 port;
__be32 key;
};
struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ);
struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ);
-void tipc_named_node_up(struct net *net, u32 dnode);
-void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue);
+void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities);
+void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq,
+ u16 *rcv_nxt, bool *open);
void tipc_named_reinit(struct net *net);
-void tipc_named_process_backlog(struct net *net);
-void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr);
+void tipc_publ_notify(struct net *net, struct list_head *nsub_list,
+ u32 addr, u16 capabilities);
#endif
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index bd0aac87b41a..e74940eab3a4 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -1,8 +1,9 @@
/*
* net/tipc/name_table.c: TIPC name table code
*
- * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
+ * Copyright (c) 2000-2006, 2014-2018, Ericsson AB
* Copyright (c) 2004-2008, 2010-2014, Wind River Systems
+ * Copyright (c) 2020-2021, Red Hat Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,6 +36,8 @@
*/
#include <net/sock.h>
+#include <linux/list_sort.h>
+#include <linux/rbtree_augmented.h>
#include "core.h"
#include "netlink.h"
#include "name_table.h"
@@ -43,860 +46,937 @@
#include "bcast.h"
#include "addr.h"
#include "node.h"
-#include <net/genetlink.h>
-
-#define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */
+#include "group.h"
/**
- * struct name_info - name sequence publication info
- * @node_list: circular list of publications made by own node
- * @cluster_list: circular list of publications made by own cluster
- * @zone_list: circular list of publications made by own zone
- * @node_list_size: number of entries in "node_list"
- * @cluster_list_size: number of entries in "cluster_list"
- * @zone_list_size: number of entries in "zone_list"
- *
- * Note: The zone list always contains at least one entry, since all
- * publications of the associated name sequence belong to it.
- * (The cluster and node lists may be empty.)
+ * struct service_range - container for all bindings of a service range
+ * @lower: service range lower bound
+ * @upper: service range upper bound
+ * @tree_node: member of service range RB tree
+ * @max: largest 'upper' in this node subtree
+ * @local_publ: list of identical publications made from this node
+ * Used by closest_first lookup and multicast lookup algorithm
+ * @all_publ: all publications identical to this one, whatever node and scope
+ * Used by round-robin lookup algorithm
*/
-struct name_info {
- struct list_head node_list;
- struct list_head cluster_list;
- struct list_head zone_list;
- u32 node_list_size;
- u32 cluster_list_size;
- u32 zone_list_size;
-};
-
-/**
- * struct sub_seq - container for all published instances of a name sequence
- * @lower: name sequence lower bound
- * @upper: name sequence upper bound
- * @info: pointer to name sequence publication info
- */
-struct sub_seq {
+struct service_range {
u32 lower;
u32 upper;
- struct name_info *info;
+ struct rb_node tree_node;
+ u32 max;
+ struct list_head local_publ;
+ struct list_head all_publ;
};
/**
- * struct name_seq - container for all published instances of a name type
- * @type: 32 bit 'type' value for name sequence
- * @sseq: pointer to dynamically-sized array of sub-sequences of this 'type';
- * sub-sequences are sorted in ascending order
- * @alloc: number of sub-sequences currently in array
- * @first_free: array index of first unused sub-sequence entry
- * @ns_list: links to adjacent name sequences in hash chain
- * @subscriptions: list of subscriptions for this 'type'
- * @lock: spinlock controlling access to publication lists of all sub-sequences
+ * struct tipc_service - container for all published instances of a service type
+ * @type: 32 bit 'type' value for service
+ * @publ_cnt: increasing counter for publications in this service
+ * @ranges: rb tree containing all service ranges for this service
+ * @service_list: links to adjacent name ranges in hash chain
+ * @subscriptions: list of subscriptions for this service type
+ * @lock: spinlock controlling access to pertaining service ranges/publications
* @rcu: RCU callback head used for deferred freeing
*/
-struct name_seq {
+struct tipc_service {
u32 type;
- struct sub_seq *sseqs;
- u32 alloc;
- u32 first_free;
- struct hlist_node ns_list;
+ u32 publ_cnt;
+ struct rb_root ranges;
+ struct hlist_node service_list;
struct list_head subscriptions;
- spinlock_t lock;
+ spinlock_t lock; /* Covers service range list */
struct rcu_head rcu;
};
-static int hash(int x)
-{
- return x & (TIPC_NAMETBL_SIZE - 1);
-}
+#define service_range_upper(sr) ((sr)->upper)
+RB_DECLARE_CALLBACKS_MAX(static, sr_callbacks,
+ struct service_range, tree_node, u32, max,
+ service_range_upper)
-/**
- * publ_create - create a publication structure
- */
-static struct publication *publ_create(u32 type, u32 lower, u32 upper,
- u32 scope, u32 node, u32 port_ref,
- u32 key)
-{
- struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC);
- if (publ == NULL) {
- pr_warn("Publication creation failure, no memory\n");
- return NULL;
- }
+#define service_range_entry(rbtree_node) \
+ (container_of(rbtree_node, struct service_range, tree_node))
- publ->type = type;
- publ->lower = lower;
- publ->upper = upper;
- publ->scope = scope;
- publ->node = node;
- publ->ref = port_ref;
- publ->key = key;
- INIT_LIST_HEAD(&publ->pport_list);
- return publ;
-}
+#define service_range_overlap(sr, start, end) \
+ ((sr)->lower <= (end) && (sr)->upper >= (start))
/**
- * tipc_subseq_alloc - allocate a specified number of sub-sequence structures
+ * service_range_foreach_match - iterate over tipc service rbtree for each
+ * range match
+ * @sr: the service range pointer as a loop cursor
+ * @sc: the pointer to tipc service which holds the service range rbtree
+ * @start: beginning of the search range (end >= start) for matching
+ * @end: end of the search range (end >= start) for matching
*/
-static struct sub_seq *tipc_subseq_alloc(u32 cnt)
-{
- return kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC);
-}
+#define service_range_foreach_match(sr, sc, start, end) \
+ for (sr = service_range_match_first((sc)->ranges.rb_node, \
+ start, \
+ end); \
+ sr; \
+ sr = service_range_match_next(&(sr)->tree_node, \
+ start, \
+ end))
/**
- * tipc_nameseq_create - create a name sequence structure for the specified 'type'
+ * service_range_match_first - find first service range matching a range
+ * @n: the root node of service range rbtree for searching
+ * @start: beginning of the search range (end >= start) for matching
+ * @end: end of the search range (end >= start) for matching
*
- * Allocates a single sub-sequence structure and sets it to all 0's.
+ * Return: the leftmost service range node in the rbtree that overlaps the
+ * specific range if any. Otherwise, returns NULL.
*/
-static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head)
+static struct service_range *service_range_match_first(struct rb_node *n,
+ u32 start, u32 end)
{
- struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC);
- struct sub_seq *sseq = tipc_subseq_alloc(1);
+ struct service_range *sr;
+ struct rb_node *l, *r;
- if (!nseq || !sseq) {
- pr_warn("Name sequence creation failed, no memory\n");
- kfree(nseq);
- kfree(sseq);
+ /* Non overlaps in tree at all? */
+ if (!n || service_range_entry(n)->max < start)
return NULL;
+
+ while (n) {
+ l = n->rb_left;
+ if (l && service_range_entry(l)->max >= start) {
+ /* A leftmost overlap range node must be one in the left
+ * subtree. If not, it has lower > end, then nodes on
+ * the right side cannot satisfy the condition either.
+ */
+ n = l;
+ continue;
+ }
+
+ /* No one in the left subtree can match, return if this node is
+ * an overlap i.e. leftmost.
+ */
+ sr = service_range_entry(n);
+ if (service_range_overlap(sr, start, end))
+ return sr;
+
+ /* Ok, try to lookup on the right side */
+ r = n->rb_right;
+ if (sr->lower <= end &&
+ r && service_range_entry(r)->max >= start) {
+ n = r;
+ continue;
+ }
+ break;
}
- spin_lock_init(&nseq->lock);
- nseq->type = type;
- nseq->sseqs = sseq;
- nseq->alloc = 1;
- INIT_HLIST_NODE(&nseq->ns_list);
- INIT_LIST_HEAD(&nseq->subscriptions);
- hlist_add_head_rcu(&nseq->ns_list, seq_head);
- return nseq;
+ return NULL;
}
/**
- * nameseq_find_subseq - find sub-sequence (if any) matching a name instance
+ * service_range_match_next - find next service range matching a range
+ * @n: a node in service range rbtree from which the searching starts
+ * @start: beginning of the search range (end >= start) for matching
+ * @end: end of the search range (end >= start) for matching
*
- * Very time-critical, so binary searches through sub-sequence array.
+ * Return: the next service range node to the given node in the rbtree that
+ * overlaps the specific range if any. Otherwise, returns NULL.
*/
-static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq,
- u32 instance)
+static struct service_range *service_range_match_next(struct rb_node *n,
+ u32 start, u32 end)
{
- struct sub_seq *sseqs = nseq->sseqs;
- int low = 0;
- int high = nseq->first_free - 1;
- int mid;
-
- while (low <= high) {
- mid = (low + high) / 2;
- if (instance < sseqs[mid].lower)
- high = mid - 1;
- else if (instance > sseqs[mid].upper)
- low = mid + 1;
- else
- return &sseqs[mid];
+ struct service_range *sr;
+ struct rb_node *p, *r;
+
+ while (n) {
+ r = n->rb_right;
+ if (r && service_range_entry(r)->max >= start)
+ /* A next overlap range node must be one in the right
+ * subtree. If not, it has lower > end, then any next
+ * successor (- an ancestor) of this node cannot
+ * satisfy the condition either.
+ */
+ return service_range_match_first(r, start, end);
+
+ /* No one in the right subtree can match, go up to find an
+ * ancestor of this node which is parent of a left-hand child.
+ */
+ while ((p = rb_parent(n)) && n == p->rb_right)
+ n = p;
+ if (!p)
+ break;
+
+ /* Return if this ancestor is an overlap */
+ sr = service_range_entry(p);
+ if (service_range_overlap(sr, start, end))
+ return sr;
+
+ /* Ok, try to lookup more from this ancestor */
+ if (sr->lower <= end) {
+ n = p;
+ continue;
+ }
+ break;
}
+
return NULL;
}
-/**
- * nameseq_locate_subseq - determine position of name instance in sub-sequence
- *
- * Returns index in sub-sequence array of the entry that contains the specified
- * instance value; if no entry contains that value, returns the position
- * where a new entry for it would be inserted in the array.
- *
- * Note: Similar to binary search code for locating a sub-sequence.
- */
-static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
+static int hash(int x)
{
- struct sub_seq *sseqs = nseq->sseqs;
- int low = 0;
- int high = nseq->first_free - 1;
- int mid;
-
- while (low <= high) {
- mid = (low + high) / 2;
- if (instance < sseqs[mid].lower)
- high = mid - 1;
- else if (instance > sseqs[mid].upper)
- low = mid + 1;
- else
- return mid;
- }
- return low;
+ return x & (TIPC_NAMETBL_SIZE - 1);
}
/**
- * tipc_nameseq_insert_publ
+ * tipc_publ_create - create a publication structure
+ * @ua: the service range the user is binding to
+ * @sk: the address of the socket that is bound
+ * @key: publication key
*/
-static struct publication *tipc_nameseq_insert_publ(struct net *net,
- struct name_seq *nseq,
- u32 type, u32 lower,
- u32 upper, u32 scope,
- u32 node, u32 port, u32 key)
+static struct publication *tipc_publ_create(struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk,
+ u32 key)
{
- struct tipc_subscription *s;
- struct tipc_subscription *st;
- struct publication *publ;
- struct sub_seq *sseq;
- struct name_info *info;
- int created_subseq = 0;
-
- sseq = nameseq_find_subseq(nseq, lower);
- if (sseq) {
-
- /* Lower end overlaps existing entry => need an exact match */
- if ((sseq->lower != lower) || (sseq->upper != upper)) {
- return NULL;
- }
-
- info = sseq->info;
+ struct publication *p = kzalloc(sizeof(*p), GFP_ATOMIC);
- /* Check if an identical publication already exists */
- list_for_each_entry(publ, &info->zone_list, zone_list) {
- if ((publ->ref == port) && (publ->key == key) &&
- (!publ->node || (publ->node == node)))
- return NULL;
- }
- } else {
- u32 inspos;
- struct sub_seq *freesseq;
+ if (!p)
+ return NULL;
- /* Find where lower end should be inserted */
- inspos = nameseq_locate_subseq(nseq, lower);
+ p->sr = ua->sr;
+ p->sk = *sk;
+ p->scope = ua->scope;
+ p->key = key;
+ INIT_LIST_HEAD(&p->binding_sock);
+ INIT_LIST_HEAD(&p->binding_node);
+ INIT_LIST_HEAD(&p->local_publ);
+ INIT_LIST_HEAD(&p->all_publ);
+ INIT_LIST_HEAD(&p->list);
+ return p;
+}
- /* Fail if upper end overlaps into an existing entry */
- if ((inspos < nseq->first_free) &&
- (upper >= nseq->sseqs[inspos].lower)) {
- return NULL;
- }
+/**
+ * tipc_service_create - create a service structure for the specified 'type'
+ * @net: network namespace
+ * @ua: address representing the service to be bound
+ *
+ * Allocates a single range structure and sets it to all 0's.
+ */
+static struct tipc_service *tipc_service_create(struct net *net,
+ struct tipc_uaddr *ua)
+{
+ struct name_table *nt = tipc_name_table(net);
+ struct tipc_service *service;
+ struct hlist_head *hd;
- /* Ensure there is space for new sub-sequence */
- if (nseq->first_free == nseq->alloc) {
- struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2);
+ service = kzalloc(sizeof(*service), GFP_ATOMIC);
+ if (!service) {
+ pr_warn("Service creation failed, no memory\n");
+ return NULL;
+ }
- if (!sseqs) {
- pr_warn("Cannot publish {%u,%u,%u}, no memory\n",
- type, lower, upper);
- return NULL;
- }
- memcpy(sseqs, nseq->sseqs,
- nseq->alloc * sizeof(struct sub_seq));
- kfree(nseq->sseqs);
- nseq->sseqs = sseqs;
- nseq->alloc *= 2;
- }
+ spin_lock_init(&service->lock);
+ service->type = ua->sr.type;
+ service->ranges = RB_ROOT;
+ INIT_HLIST_NODE(&service->service_list);
+ INIT_LIST_HEAD(&service->subscriptions);
+ hd = &nt->services[hash(ua->sr.type)];
+ hlist_add_head_rcu(&service->service_list, hd);
+ return service;
+}
- info = kzalloc(sizeof(*info), GFP_ATOMIC);
- if (!info) {
- pr_warn("Cannot publish {%u,%u,%u}, no memory\n",
- type, lower, upper);
- return NULL;
- }
+/* tipc_service_find_range - find service range matching publication parameters
+ */
+static struct service_range *tipc_service_find_range(struct tipc_service *sc,
+ struct tipc_uaddr *ua)
+{
+ struct service_range *sr;
- INIT_LIST_HEAD(&info->node_list);
- INIT_LIST_HEAD(&info->cluster_list);
- INIT_LIST_HEAD(&info->zone_list);
-
- /* Insert new sub-sequence */
- sseq = &nseq->sseqs[inspos];
- freesseq = &nseq->sseqs[nseq->first_free];
- memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof(*sseq));
- memset(sseq, 0, sizeof(*sseq));
- nseq->first_free++;
- sseq->lower = lower;
- sseq->upper = upper;
- sseq->info = info;
- created_subseq = 1;
+ service_range_foreach_match(sr, sc, ua->sr.lower, ua->sr.upper) {
+ /* Look for exact match */
+ if (sr->lower == ua->sr.lower && sr->upper == ua->sr.upper)
+ return sr;
}
- /* Insert a publication */
- publ = publ_create(type, lower, upper, scope, node, port, key);
- if (!publ)
- return NULL;
-
- list_add(&publ->zone_list, &info->zone_list);
- info->zone_list_size++;
+ return NULL;
+}
- if (in_own_cluster(net, node)) {
- list_add(&publ->cluster_list, &info->cluster_list);
- info->cluster_list_size++;
+static struct service_range *tipc_service_create_range(struct tipc_service *sc,
+ struct publication *p)
+{
+ struct rb_node **n, *parent = NULL;
+ struct service_range *sr;
+ u32 lower = p->sr.lower;
+ u32 upper = p->sr.upper;
+
+ n = &sc->ranges.rb_node;
+ while (*n) {
+ parent = *n;
+ sr = service_range_entry(parent);
+ if (lower == sr->lower && upper == sr->upper)
+ return sr;
+ if (sr->max < upper)
+ sr->max = upper;
+ if (lower <= sr->lower)
+ n = &parent->rb_left;
+ else
+ n = &parent->rb_right;
}
+ sr = kzalloc(sizeof(*sr), GFP_ATOMIC);
+ if (!sr)
+ return NULL;
+ sr->lower = lower;
+ sr->upper = upper;
+ sr->max = upper;
+ INIT_LIST_HEAD(&sr->local_publ);
+ INIT_LIST_HEAD(&sr->all_publ);
+ rb_link_node(&sr->tree_node, parent, n);
+ rb_insert_augmented(&sr->tree_node, &sc->ranges, &sr_callbacks);
+ return sr;
+}
- if (in_own_node(net, node)) {
- list_add(&publ->node_list, &info->node_list);
- info->node_list_size++;
+static bool tipc_service_insert_publ(struct net *net,
+ struct tipc_service *sc,
+ struct publication *p)
+{
+ struct tipc_subscription *sub, *tmp;
+ struct service_range *sr;
+ struct publication *_p;
+ u32 node = p->sk.node;
+ bool first = false;
+ bool res = false;
+ u32 key = p->key;
+
+ spin_lock_bh(&sc->lock);
+ sr = tipc_service_create_range(sc, p);
+ if (!sr)
+ goto exit;
+
+ first = list_empty(&sr->all_publ);
+
+ /* Return if the publication already exists */
+ list_for_each_entry(_p, &sr->all_publ, all_publ) {
+ if (_p->key == key && (!_p->sk.node || _p->sk.node == node)) {
+ pr_debug("Failed to bind duplicate %u,%u,%u/%u:%u/%u\n",
+ p->sr.type, p->sr.lower, p->sr.upper,
+ node, p->sk.ref, key);
+ goto exit;
+ }
}
+ if (in_own_node(net, p->sk.node))
+ list_add(&p->local_publ, &sr->local_publ);
+ list_add(&p->all_publ, &sr->all_publ);
+ p->id = sc->publ_cnt++;
+
/* Any subscriptions waiting for notification? */
- list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
- tipc_subscrp_report_overlap(s, publ->lower, publ->upper,
- TIPC_PUBLISHED, publ->ref,
- publ->node, created_subseq);
+ list_for_each_entry_safe(sub, tmp, &sc->subscriptions, service_list) {
+ tipc_sub_report_overlap(sub, p, TIPC_PUBLISHED, first);
}
- return publ;
+ res = true;
+exit:
+ if (!res)
+ pr_warn("Failed to bind to %u,%u,%u\n",
+ p->sr.type, p->sr.lower, p->sr.upper);
+ spin_unlock_bh(&sc->lock);
+ return res;
}
/**
- * tipc_nameseq_remove_publ
- *
- * NOTE: There may be cases where TIPC is asked to remove a publication
- * that is not in the name table. For example, if another node issues a
- * publication for a name sequence that overlaps an existing name sequence
- * the publication will not be recorded, which means the publication won't
- * be found when the name sequence is later withdrawn by that node.
- * A failed withdraw request simply returns a failure indication and lets the
- * caller issue any error or warning messages associated with such a problem.
+ * tipc_service_remove_publ - remove a publication from a service
+ * @r: service_range to remove publication from
+ * @sk: address publishing socket
+ * @key: target publication key
*/
-static struct publication *tipc_nameseq_remove_publ(struct net *net,
- struct name_seq *nseq,
- u32 inst, u32 node,
- u32 ref, u32 key)
+static struct publication *tipc_service_remove_publ(struct service_range *r,
+ struct tipc_socket_addr *sk,
+ u32 key)
{
- struct publication *publ;
- struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
- struct name_info *info;
- struct sub_seq *free;
- struct tipc_subscription *s, *st;
- int removed_subseq = 0;
-
- if (!sseq)
- return NULL;
-
- info = sseq->info;
+ struct publication *p;
+ u32 node = sk->node;
- /* Locate publication, if it exists */
- list_for_each_entry(publ, &info->zone_list, zone_list) {
- if ((publ->key == key) && (publ->ref == ref) &&
- (!publ->node || (publ->node == node)))
- goto found;
+ list_for_each_entry(p, &r->all_publ, all_publ) {
+ if (p->key != key || (node && node != p->sk.node))
+ continue;
+ list_del(&p->all_publ);
+ list_del(&p->local_publ);
+ return p;
}
return NULL;
+}
-found:
- /* Remove publication from zone scope list */
- list_del(&publ->zone_list);
- info->zone_list_size--;
-
- /* Remove publication from cluster scope list, if present */
- if (in_own_cluster(net, node)) {
- list_del(&publ->cluster_list);
- info->cluster_list_size--;
- }
-
- /* Remove publication from node scope list, if present */
- if (in_own_node(net, node)) {
- list_del(&publ->node_list);
- info->node_list_size--;
- }
-
- /* Contract subseq list if no more publications for that subseq */
- if (list_empty(&info->zone_list)) {
- kfree(info);
- free = &nseq->sseqs[nseq->first_free--];
- memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof(*sseq));
- removed_subseq = 1;
- }
-
- /* Notify any waiting subscriptions */
- list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
- tipc_subscrp_report_overlap(s, publ->lower, publ->upper,
- TIPC_WITHDRAWN, publ->ref,
- publ->node, removed_subseq);
- }
+/*
+ * Code reused: time_after32() for the same purpose
+ */
+#define publication_after(pa, pb) time_after32((pa)->id, (pb)->id)
+static int tipc_publ_sort(void *priv, const struct list_head *a,
+ const struct list_head *b)
+{
+ struct publication *pa, *pb;
- return publ;
+ pa = container_of(a, struct publication, list);
+ pb = container_of(b, struct publication, list);
+ return publication_after(pa, pb);
}
/**
- * tipc_nameseq_subscribe - attach a subscription, and issue
- * the prescribed number of events if there is any sub-
- * sequence overlapping with the requested sequence
+ * tipc_service_subscribe - attach a subscription, and optionally
+ * issue the prescribed number of events if there is any service
+ * range overlapping with the requested range
+ * @service: the tipc_service to attach the @sub to
+ * @sub: the subscription to attach
*/
-static void tipc_nameseq_subscribe(struct name_seq *nseq,
- struct tipc_subscription *s)
+static void tipc_service_subscribe(struct tipc_service *service,
+ struct tipc_subscription *sub)
{
- struct sub_seq *sseq = nseq->sseqs;
- struct tipc_name_seq ns;
+ struct publication *p, *first, *tmp;
+ struct list_head publ_list;
+ struct service_range *sr;
+ u32 filter, lower, upper;
- tipc_subscrp_convert_seq(&s->evt.s.seq, s->swap, &ns);
+ filter = sub->s.filter;
+ lower = sub->s.seq.lower;
+ upper = sub->s.seq.upper;
- tipc_subscrp_get(s);
- list_add(&s->nameseq_list, &nseq->subscriptions);
+ tipc_sub_get(sub);
+ list_add(&sub->service_list, &service->subscriptions);
- if (!sseq)
+ if (filter & TIPC_SUB_NO_STATUS)
return;
- while (sseq != &nseq->sseqs[nseq->first_free]) {
- if (tipc_subscrp_check_overlap(&ns, sseq->lower, sseq->upper)) {
- struct publication *crs;
- struct name_info *info = sseq->info;
- int must_report = 1;
-
- list_for_each_entry(crs, &info->zone_list, zone_list) {
- tipc_subscrp_report_overlap(s, sseq->lower,
- sseq->upper,
- TIPC_PUBLISHED,
- crs->ref, crs->node,
- must_report);
- must_report = 0;
- }
+ INIT_LIST_HEAD(&publ_list);
+ service_range_foreach_match(sr, service, lower, upper) {
+ first = NULL;
+ list_for_each_entry(p, &sr->all_publ, all_publ) {
+ if (filter & TIPC_SUB_PORTS)
+ list_add_tail(&p->list, &publ_list);
+ else if (!first || publication_after(first, p))
+ /* Pick this range's *first* publication */
+ first = p;
}
- sseq++;
+ if (first)
+ list_add_tail(&first->list, &publ_list);
+ }
+
+ /* Sort the publications before reporting */
+ list_sort(NULL, &publ_list, tipc_publ_sort);
+ list_for_each_entry_safe(p, tmp, &publ_list, list) {
+ tipc_sub_report_overlap(sub, p, TIPC_PUBLISHED, true);
+ list_del_init(&p->list);
}
}
-static struct name_seq *nametbl_find_seq(struct net *net, u32 type)
+static struct tipc_service *tipc_service_find(struct net *net,
+ struct tipc_uaddr *ua)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct hlist_head *seq_head;
- struct name_seq *ns;
-
- seq_head = &tn->nametbl->seq_hlist[hash(type)];
- hlist_for_each_entry_rcu(ns, seq_head, ns_list) {
- if (ns->type == type)
- return ns;
+ struct name_table *nt = tipc_name_table(net);
+ struct hlist_head *service_head;
+ struct tipc_service *service;
+
+ service_head = &nt->services[hash(ua->sr.type)];
+ hlist_for_each_entry_rcu(service, service_head, service_list) {
+ if (service->type == ua->sr.type)
+ return service;
}
-
return NULL;
};
-struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type,
- u32 lower, u32 upper, u32 scope,
- u32 node, u32 port, u32 key)
+struct publication *tipc_nametbl_insert_publ(struct net *net,
+ struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk,
+ u32 key)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct publication *publ;
- struct name_seq *seq = nametbl_find_seq(net, type);
- int index = hash(type);
-
- if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) ||
- (lower > upper)) {
- pr_debug("Failed to publish illegal {%u,%u,%u} with scope %u\n",
- type, lower, upper, scope);
- return NULL;
- }
+ struct tipc_service *sc;
+ struct publication *p;
- if (!seq)
- seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]);
- if (!seq)
+ p = tipc_publ_create(ua, sk, key);
+ if (!p)
return NULL;
- spin_lock_bh(&seq->lock);
- publ = tipc_nameseq_insert_publ(net, seq, type, lower, upper,
- scope, node, port, key);
- spin_unlock_bh(&seq->lock);
- return publ;
+ sc = tipc_service_find(net, ua);
+ if (!sc)
+ sc = tipc_service_create(net, ua);
+ if (sc && tipc_service_insert_publ(net, sc, p))
+ return p;
+ kfree(p);
+ return NULL;
}
-struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
- u32 lower, u32 node, u32 ref,
+struct publication *tipc_nametbl_remove_publ(struct net *net,
+ struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk,
u32 key)
{
- struct publication *publ;
- struct name_seq *seq = nametbl_find_seq(net, type);
+ struct tipc_subscription *sub, *tmp;
+ struct publication *p = NULL;
+ struct service_range *sr;
+ struct tipc_service *sc;
+ bool last;
+
+ sc = tipc_service_find(net, ua);
+ if (!sc)
+ goto exit;
- if (!seq)
- return NULL;
+ spin_lock_bh(&sc->lock);
+ sr = tipc_service_find_range(sc, ua);
+ if (!sr)
+ goto unlock;
+ p = tipc_service_remove_publ(sr, sk, key);
+ if (!p)
+ goto unlock;
- spin_lock_bh(&seq->lock);
- publ = tipc_nameseq_remove_publ(net, seq, lower, node, ref, key);
- if (!seq->first_free && list_empty(&seq->subscriptions)) {
- hlist_del_init_rcu(&seq->ns_list);
- kfree(seq->sseqs);
- spin_unlock_bh(&seq->lock);
- kfree_rcu(seq, rcu);
- return publ;
+ /* Notify any waiting subscriptions */
+ last = list_empty(&sr->all_publ);
+ list_for_each_entry_safe(sub, tmp, &sc->subscriptions, service_list) {
+ tipc_sub_report_overlap(sub, p, TIPC_WITHDRAWN, last);
+ }
+
+ /* Remove service range item if this was its last publication */
+ if (list_empty(&sr->all_publ)) {
+ rb_erase_augmented(&sr->tree_node, &sc->ranges, &sr_callbacks);
+ kfree(sr);
+ }
+
+ /* Delete service item if no more publications and subscriptions */
+ if (RB_EMPTY_ROOT(&sc->ranges) && list_empty(&sc->subscriptions)) {
+ hlist_del_init_rcu(&sc->service_list);
+ kfree_rcu(sc, rcu);
+ }
+unlock:
+ spin_unlock_bh(&sc->lock);
+exit:
+ if (!p) {
+ pr_err("Failed to remove unknown binding: %u,%u,%u/%u:%u/%u\n",
+ ua->sr.type, ua->sr.lower, ua->sr.upper,
+ sk->node, sk->ref, key);
}
- spin_unlock_bh(&seq->lock);
- return publ;
+ return p;
}
/**
- * tipc_nametbl_translate - perform name translation
+ * tipc_nametbl_lookup_anycast - perform service instance to socket translation
+ * @net: network namespace
+ * @ua: service address to look up
+ * @sk: address to socket we want to find
*
- * On entry, 'destnode' is the search domain used during translation.
+ * On entry, a non-zero 'sk->node' indicates the node where we want lookup to be
+ * performed, which may not be this one.
*
* On exit:
- * - if name translation is deferred to another node/cluster/zone,
- * leaves 'destnode' unchanged (will be non-zero) and returns 0
- * - if name translation is attempted and succeeds, sets 'destnode'
- * to publishing node and returns port reference (will be non-zero)
- * - if name translation is attempted and fails, sets 'destnode' to 0
- * and returns 0
+ *
+ * - If lookup is deferred to another node, leave 'sk->node' unchanged and
+ * return 'true'.
+ * - If lookup is successful, set the 'sk->node' and 'sk->ref' (== portid) which
+ * represent the bound socket and return 'true'.
+ * - If lookup fails, return 'false'
+ *
+ * Note that for legacy users (node configured with Z.C.N address format) the
+ * 'closest-first' lookup algorithm must be maintained, i.e., if sk.node is 0
+ * we must look in the local binding list first
*/
-u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance,
- u32 *destnode)
+bool tipc_nametbl_lookup_anycast(struct net *net,
+ struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct sub_seq *sseq;
- struct name_info *info;
- struct publication *publ;
- struct name_seq *seq;
- u32 ref = 0;
- u32 node = 0;
-
- if (!tipc_in_scope(*destnode, tn->own_addr))
- return 0;
+ struct tipc_net *tn = tipc_net(net);
+ bool legacy = tn->legacy_addr_format;
+ u32 self = tipc_own_addr(net);
+ u32 inst = ua->sa.instance;
+ struct service_range *r;
+ struct tipc_service *sc;
+ struct publication *p;
+ struct list_head *l;
+ bool res = false;
+
+ if (!tipc_in_scope(legacy, sk->node, self))
+ return true;
rcu_read_lock();
- seq = nametbl_find_seq(net, type);
- if (unlikely(!seq))
- goto not_found;
- spin_lock_bh(&seq->lock);
- sseq = nameseq_find_subseq(seq, instance);
- if (unlikely(!sseq))
- goto no_match;
- info = sseq->info;
-
- /* Closest-First Algorithm */
- if (likely(!*destnode)) {
- if (!list_empty(&info->node_list)) {
- publ = list_first_entry(&info->node_list,
- struct publication,
- node_list);
- list_move_tail(&publ->node_list,
- &info->node_list);
- } else if (!list_empty(&info->cluster_list)) {
- publ = list_first_entry(&info->cluster_list,
- struct publication,
- cluster_list);
- list_move_tail(&publ->cluster_list,
- &info->cluster_list);
+ sc = tipc_service_find(net, ua);
+ if (unlikely(!sc))
+ goto exit;
+
+ spin_lock_bh(&sc->lock);
+ service_range_foreach_match(r, sc, inst, inst) {
+ /* Select lookup algo: local, closest-first or round-robin */
+ if (sk->node == self) {
+ l = &r->local_publ;
+ if (list_empty(l))
+ continue;
+ p = list_first_entry(l, struct publication, local_publ);
+ list_move_tail(&p->local_publ, &r->local_publ);
+ } else if (legacy && !sk->node && !list_empty(&r->local_publ)) {
+ l = &r->local_publ;
+ p = list_first_entry(l, struct publication, local_publ);
+ list_move_tail(&p->local_publ, &r->local_publ);
} else {
- publ = list_first_entry(&info->zone_list,
- struct publication,
- zone_list);
- list_move_tail(&publ->zone_list,
- &info->zone_list);
+ l = &r->all_publ;
+ p = list_first_entry(l, struct publication, all_publ);
+ list_move_tail(&p->all_publ, &r->all_publ);
}
+ *sk = p->sk;
+ res = true;
+ /* Todo: as for legacy, pick the first matching range only, a
+ * "true" round-robin will be performed as needed.
+ */
+ break;
}
+ spin_unlock_bh(&sc->lock);
- /* Round-Robin Algorithm */
- else if (*destnode == tn->own_addr) {
- if (list_empty(&info->node_list))
- goto no_match;
- publ = list_first_entry(&info->node_list, struct publication,
- node_list);
- list_move_tail(&publ->node_list, &info->node_list);
- } else if (in_own_cluster_exact(net, *destnode)) {
- if (list_empty(&info->cluster_list))
- goto no_match;
- publ = list_first_entry(&info->cluster_list, struct publication,
- cluster_list);
- list_move_tail(&publ->cluster_list, &info->cluster_list);
- } else {
- publ = list_first_entry(&info->zone_list, struct publication,
- zone_list);
- list_move_tail(&publ->zone_list, &info->zone_list);
- }
+exit:
+ rcu_read_unlock();
+ return res;
+}
+
+/* tipc_nametbl_lookup_group(): lookup destinaton(s) in a communication group
+ * Returns a list of one (== group anycast) or more (== group multicast)
+ * destination socket/node pairs matching the given address.
+ * The requester may or may not want to exclude himself from the list.
+ */
+bool tipc_nametbl_lookup_group(struct net *net, struct tipc_uaddr *ua,
+ struct list_head *dsts, int *dstcnt,
+ u32 exclude, bool mcast)
+{
+ u32 self = tipc_own_addr(net);
+ u32 inst = ua->sa.instance;
+ struct service_range *sr;
+ struct tipc_service *sc;
+ struct publication *p;
- ref = publ->ref;
- node = publ->node;
+ *dstcnt = 0;
+ rcu_read_lock();
+ sc = tipc_service_find(net, ua);
+ if (unlikely(!sc))
+ goto exit;
+
+ spin_lock_bh(&sc->lock);
+
+ /* Todo: a full search i.e. service_range_foreach_match() instead? */
+ sr = service_range_match_first(sc->ranges.rb_node, inst, inst);
+ if (!sr)
+ goto no_match;
+
+ list_for_each_entry(p, &sr->all_publ, all_publ) {
+ if (p->scope != ua->scope)
+ continue;
+ if (p->sk.ref == exclude && p->sk.node == self)
+ continue;
+ tipc_dest_push(dsts, p->sk.node, p->sk.ref);
+ (*dstcnt)++;
+ if (mcast)
+ continue;
+ list_move_tail(&p->all_publ, &sr->all_publ);
+ break;
+ }
no_match:
- spin_unlock_bh(&seq->lock);
-not_found:
+ spin_unlock_bh(&sc->lock);
+exit:
rcu_read_unlock();
- *destnode = node;
- return ref;
+ return !list_empty(dsts);
}
-/**
- * tipc_nametbl_mc_translate - find multicast destinations
- *
- * Creates list of all local ports that overlap the given multicast address;
- * also determines if any off-node ports overlap.
- *
- * Note: Publications with a scope narrower than 'limit' are ignored.
- * (i.e. local node-scope publications mustn't receive messages arriving
- * from another node, even if the multcast link brought it here)
- *
- * Returns non-zero if any off-node ports overlap
+/* tipc_nametbl_lookup_mcast_sockets(): look up node local destinaton sockets
+ * matching the given address
+ * Used on nodes which have received a multicast/broadcast message
+ * Returns a list of local sockets
*/
-int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
- u32 limit, struct list_head *dports)
+void tipc_nametbl_lookup_mcast_sockets(struct net *net, struct tipc_uaddr *ua,
+ struct list_head *dports)
{
- struct name_seq *seq;
- struct sub_seq *sseq;
- struct sub_seq *sseq_stop;
- struct name_info *info;
- int res = 0;
+ struct service_range *sr;
+ struct tipc_service *sc;
+ struct publication *p;
+ u8 scope = ua->scope;
rcu_read_lock();
- seq = nametbl_find_seq(net, type);
- if (!seq)
+ sc = tipc_service_find(net, ua);
+ if (!sc)
goto exit;
- spin_lock_bh(&seq->lock);
- sseq = seq->sseqs + nameseq_locate_subseq(seq, lower);
- sseq_stop = seq->sseqs + seq->first_free;
- for (; sseq != sseq_stop; sseq++) {
- struct publication *publ;
+ spin_lock_bh(&sc->lock);
+ service_range_foreach_match(sr, sc, ua->sr.lower, ua->sr.upper) {
+ list_for_each_entry(p, &sr->local_publ, local_publ) {
+ if (scope == p->scope || scope == TIPC_ANY_SCOPE)
+ tipc_dest_push(dports, 0, p->sk.ref);
+ }
+ }
+ spin_unlock_bh(&sc->lock);
+exit:
+ rcu_read_unlock();
+}
- if (sseq->lower > upper)
- break;
+/* tipc_nametbl_lookup_mcast_nodes(): look up all destination nodes matching
+ * the given address. Used in sending node.
+ * Used on nodes which are sending out a multicast/broadcast message
+ * Returns a list of nodes, including own node if applicable
+ */
+void tipc_nametbl_lookup_mcast_nodes(struct net *net, struct tipc_uaddr *ua,
+ struct tipc_nlist *nodes)
+{
+ struct service_range *sr;
+ struct tipc_service *sc;
+ struct publication *p;
- info = sseq->info;
- list_for_each_entry(publ, &info->node_list, node_list) {
- if (publ->scope <= limit)
- u32_push(dports, publ->ref);
- }
+ rcu_read_lock();
+ sc = tipc_service_find(net, ua);
+ if (!sc)
+ goto exit;
- if (info->cluster_list_size != info->node_list_size)
- res = 1;
+ spin_lock_bh(&sc->lock);
+ service_range_foreach_match(sr, sc, ua->sr.lower, ua->sr.upper) {
+ list_for_each_entry(p, &sr->all_publ, all_publ) {
+ tipc_nlist_add(nodes, p->sk.node);
+ }
}
- spin_unlock_bh(&seq->lock);
+ spin_unlock_bh(&sc->lock);
exit:
rcu_read_unlock();
- return res;
}
-/* tipc_nametbl_lookup_dst_nodes - find broadcast destination nodes
- * - Creates list of nodes that overlap the given multicast address
- * - Determines if any node local ports overlap
+/* tipc_nametbl_build_group - build list of communication group members
*/
-void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower,
- u32 upper, u32 domain,
- struct tipc_nlist *nodes)
+void tipc_nametbl_build_group(struct net *net, struct tipc_group *grp,
+ struct tipc_uaddr *ua)
{
- struct sub_seq *sseq, *stop;
- struct publication *publ;
- struct name_info *info;
- struct name_seq *seq;
+ struct service_range *sr;
+ struct tipc_service *sc;
+ struct publication *p;
+ struct rb_node *n;
rcu_read_lock();
- seq = nametbl_find_seq(net, type);
- if (!seq)
+ sc = tipc_service_find(net, ua);
+ if (!sc)
goto exit;
- spin_lock_bh(&seq->lock);
- sseq = seq->sseqs + nameseq_locate_subseq(seq, lower);
- stop = seq->sseqs + seq->first_free;
- for (; sseq->lower <= upper && sseq != stop; sseq++) {
- info = sseq->info;
- list_for_each_entry(publ, &info->zone_list, zone_list) {
- if (tipc_in_scope(domain, publ->node))
- tipc_nlist_add(nodes, publ->node);
+ spin_lock_bh(&sc->lock);
+ for (n = rb_first(&sc->ranges); n; n = rb_next(n)) {
+ sr = container_of(n, struct service_range, tree_node);
+ list_for_each_entry(p, &sr->all_publ, all_publ) {
+ if (p->scope != ua->scope)
+ continue;
+ tipc_group_add_member(grp, p->sk.node, p->sk.ref,
+ p->sr.lower);
}
}
- spin_unlock_bh(&seq->lock);
+ spin_unlock_bh(&sc->lock);
exit:
rcu_read_unlock();
}
-/*
- * tipc_nametbl_publish - add name publication to network name tables
+/* tipc_nametbl_publish - add service binding to name table
*/
-struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower,
- u32 upper, u32 scope, u32 port_ref,
- u32 key)
+struct publication *tipc_nametbl_publish(struct net *net, struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk, u32 key)
{
- struct publication *publ;
- struct sk_buff *buf = NULL;
- struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct name_table *nt = tipc_name_table(net);
+ struct tipc_net *tn = tipc_net(net);
+ struct publication *p = NULL;
+ struct sk_buff *skb = NULL;
+ u32 rc_dests;
spin_lock_bh(&tn->nametbl_lock);
- if (tn->nametbl->local_publ_count >= TIPC_MAX_PUBLICATIONS) {
- pr_warn("Publication failed, local publication limit reached (%u)\n",
- TIPC_MAX_PUBLICATIONS);
- spin_unlock_bh(&tn->nametbl_lock);
- return NULL;
+
+ if (nt->local_publ_count >= TIPC_MAX_PUBL) {
+ pr_warn("Bind failed, max limit %u reached\n", TIPC_MAX_PUBL);
+ goto exit;
}
- publ = tipc_nametbl_insert_publ(net, type, lower, upper, scope,
- tn->own_addr, port_ref, key);
- if (likely(publ)) {
- tn->nametbl->local_publ_count++;
- buf = tipc_named_publish(net, publ);
- /* Any pending external events? */
- tipc_named_process_backlog(net);
+ p = tipc_nametbl_insert_publ(net, ua, sk, key);
+ if (p) {
+ nt->local_publ_count++;
+ skb = tipc_named_publish(net, p);
}
+ rc_dests = nt->rc_dests;
+exit:
spin_unlock_bh(&tn->nametbl_lock);
- if (buf)
- tipc_node_broadcast(net, buf);
- return publ;
+ if (skb)
+ tipc_node_broadcast(net, skb, rc_dests);
+ return p;
+
}
/**
- * tipc_nametbl_withdraw - withdraw name publication from network name tables
+ * tipc_nametbl_withdraw - withdraw a service binding
+ * @net: network namespace
+ * @ua: service address/range being unbound
+ * @sk: address of the socket being unbound from
+ * @key: target publication key
*/
-int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref,
- u32 key)
+void tipc_nametbl_withdraw(struct net *net, struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk, u32 key)
{
- struct publication *publ;
+ struct name_table *nt = tipc_name_table(net);
+ struct tipc_net *tn = tipc_net(net);
struct sk_buff *skb = NULL;
- struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct publication *p;
+ u32 rc_dests;
spin_lock_bh(&tn->nametbl_lock);
- publ = tipc_nametbl_remove_publ(net, type, lower, tn->own_addr,
- ref, key);
- if (likely(publ)) {
- tn->nametbl->local_publ_count--;
- skb = tipc_named_withdraw(net, publ);
- /* Any pending external events? */
- tipc_named_process_backlog(net);
- list_del_init(&publ->pport_list);
- kfree_rcu(publ, rcu);
- } else {
- pr_err("Unable to remove local publication\n"
- "(type=%u, lower=%u, ref=%u, key=%u)\n",
- type, lower, ref, key);
+
+ p = tipc_nametbl_remove_publ(net, ua, sk, key);
+ if (p) {
+ nt->local_publ_count--;
+ skb = tipc_named_withdraw(net, p);
+ list_del_init(&p->binding_sock);
+ kfree_rcu(p, rcu);
}
+ rc_dests = nt->rc_dests;
spin_unlock_bh(&tn->nametbl_lock);
- if (skb) {
- tipc_node_broadcast(net, skb);
- return 1;
- }
- return 0;
+ if (skb)
+ tipc_node_broadcast(net, skb, rc_dests);
}
/**
* tipc_nametbl_subscribe - add a subscription object to the name table
+ * @sub: subscription to add
*/
-void tipc_nametbl_subscribe(struct tipc_subscription *s)
+bool tipc_nametbl_subscribe(struct tipc_subscription *sub)
{
- struct tipc_net *tn = net_generic(s->net, tipc_net_id);
- u32 type = tipc_subscrp_convert_seq_type(s->evt.s.seq.type, s->swap);
- int index = hash(type);
- struct name_seq *seq;
- struct tipc_name_seq ns;
-
+ struct tipc_net *tn = tipc_net(sub->net);
+ u32 type = sub->s.seq.type;
+ struct tipc_service *sc;
+ struct tipc_uaddr ua;
+ bool res = true;
+
+ tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE, type,
+ sub->s.seq.lower, sub->s.seq.upper);
spin_lock_bh(&tn->nametbl_lock);
- seq = nametbl_find_seq(s->net, type);
- if (!seq)
- seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]);
- if (seq) {
- spin_lock_bh(&seq->lock);
- tipc_nameseq_subscribe(seq, s);
- spin_unlock_bh(&seq->lock);
+ sc = tipc_service_find(sub->net, &ua);
+ if (!sc)
+ sc = tipc_service_create(sub->net, &ua);
+ if (sc) {
+ spin_lock_bh(&sc->lock);
+ tipc_service_subscribe(sc, sub);
+ spin_unlock_bh(&sc->lock);
} else {
- tipc_subscrp_convert_seq(&s->evt.s.seq, s->swap, &ns);
- pr_warn("Failed to create subscription for {%u,%u,%u}\n",
- ns.type, ns.lower, ns.upper);
+ pr_warn("Failed to subscribe for {%u,%u,%u}\n",
+ type, sub->s.seq.lower, sub->s.seq.upper);
+ res = false;
}
spin_unlock_bh(&tn->nametbl_lock);
+ return res;
}
/**
* tipc_nametbl_unsubscribe - remove a subscription object from name table
+ * @sub: subscription to remove
*/
-void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
+void tipc_nametbl_unsubscribe(struct tipc_subscription *sub)
{
- struct tipc_net *tn = net_generic(s->net, tipc_net_id);
- struct name_seq *seq;
- u32 type = tipc_subscrp_convert_seq_type(s->evt.s.seq.type, s->swap);
+ struct tipc_net *tn = tipc_net(sub->net);
+ struct tipc_service *sc;
+ struct tipc_uaddr ua;
+ tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE,
+ sub->s.seq.type, sub->s.seq.lower, sub->s.seq.upper);
spin_lock_bh(&tn->nametbl_lock);
- seq = nametbl_find_seq(s->net, type);
- if (seq != NULL) {
- spin_lock_bh(&seq->lock);
- list_del_init(&s->nameseq_list);
- tipc_subscrp_put(s);
- if (!seq->first_free && list_empty(&seq->subscriptions)) {
- hlist_del_init_rcu(&seq->ns_list);
- kfree(seq->sseqs);
- spin_unlock_bh(&seq->lock);
- kfree_rcu(seq, rcu);
- } else {
- spin_unlock_bh(&seq->lock);
- }
+ sc = tipc_service_find(sub->net, &ua);
+ if (!sc)
+ goto exit;
+
+ spin_lock_bh(&sc->lock);
+ list_del_init(&sub->service_list);
+ tipc_sub_put(sub);
+
+ /* Delete service item if no more publications and subscriptions */
+ if (RB_EMPTY_ROOT(&sc->ranges) && list_empty(&sc->subscriptions)) {
+ hlist_del_init_rcu(&sc->service_list);
+ kfree_rcu(sc, rcu);
}
+ spin_unlock_bh(&sc->lock);
+exit:
spin_unlock_bh(&tn->nametbl_lock);
}
int tipc_nametbl_init(struct net *net)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct name_table *tipc_nametbl;
+ struct tipc_net *tn = tipc_net(net);
+ struct name_table *nt;
int i;
- tipc_nametbl = kzalloc(sizeof(*tipc_nametbl), GFP_ATOMIC);
- if (!tipc_nametbl)
+ nt = kzalloc(sizeof(*nt), GFP_KERNEL);
+ if (!nt)
return -ENOMEM;
for (i = 0; i < TIPC_NAMETBL_SIZE; i++)
- INIT_HLIST_HEAD(&tipc_nametbl->seq_hlist[i]);
+ INIT_HLIST_HEAD(&nt->services[i]);
- INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]);
- INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
- INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_NODE_SCOPE]);
- tn->nametbl = tipc_nametbl;
+ INIT_LIST_HEAD(&nt->node_scope);
+ INIT_LIST_HEAD(&nt->cluster_scope);
+ rwlock_init(&nt->cluster_scope_lock);
+ tn->nametbl = nt;
spin_lock_init(&tn->nametbl_lock);
return 0;
}
/**
- * tipc_purge_publications - remove all publications for a given type
- *
- * tipc_nametbl_lock must be held when calling this function
+ * tipc_service_delete - purge all publications for a service and delete it
+ * @net: the associated network namespace
+ * @sc: tipc_service to delete
*/
-static void tipc_purge_publications(struct net *net, struct name_seq *seq)
+static void tipc_service_delete(struct net *net, struct tipc_service *sc)
{
- struct publication *publ, *safe;
- struct sub_seq *sseq;
- struct name_info *info;
-
- spin_lock_bh(&seq->lock);
- sseq = seq->sseqs;
- info = sseq->info;
- list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
- tipc_nameseq_remove_publ(net, seq, publ->lower, publ->node,
- publ->ref, publ->key);
- kfree_rcu(publ, rcu);
+ struct service_range *sr, *tmpr;
+ struct publication *p, *tmp;
+
+ spin_lock_bh(&sc->lock);
+ rbtree_postorder_for_each_entry_safe(sr, tmpr, &sc->ranges, tree_node) {
+ list_for_each_entry_safe(p, tmp, &sr->all_publ, all_publ) {
+ tipc_service_remove_publ(sr, &p->sk, p->key);
+ kfree_rcu(p, rcu);
+ }
+ rb_erase_augmented(&sr->tree_node, &sc->ranges, &sr_callbacks);
+ kfree(sr);
}
- hlist_del_init_rcu(&seq->ns_list);
- kfree(seq->sseqs);
- spin_unlock_bh(&seq->lock);
-
- kfree_rcu(seq, rcu);
+ hlist_del_init_rcu(&sc->service_list);
+ spin_unlock_bh(&sc->lock);
+ kfree_rcu(sc, rcu);
}
void tipc_nametbl_stop(struct net *net)
{
+ struct name_table *nt = tipc_name_table(net);
+ struct tipc_net *tn = tipc_net(net);
+ struct hlist_head *service_head;
+ struct tipc_service *service;
u32 i;
- struct name_seq *seq;
- struct hlist_head *seq_head;
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct name_table *tipc_nametbl = tn->nametbl;
/* Verify name table is empty and purge any lingering
* publications, then release the name table
*/
spin_lock_bh(&tn->nametbl_lock);
for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
- if (hlist_empty(&tipc_nametbl->seq_hlist[i]))
+ if (hlist_empty(&nt->services[i]))
continue;
- seq_head = &tipc_nametbl->seq_hlist[i];
- hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
- tipc_purge_publications(net, seq);
+ service_head = &nt->services[i];
+ hlist_for_each_entry_rcu(service, service_head, service_list) {
+ tipc_service_delete(net, service);
}
}
spin_unlock_bh(&tn->nametbl_lock);
- synchronize_net();
- kfree(tipc_nametbl);
-
+ /* TODO: clear tn->nametbl, implement proper RCU rules ? */
+ kfree_rcu(nt, rcu);
}
static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg,
- struct name_seq *seq,
- struct sub_seq *sseq, u32 *last_publ)
+ struct tipc_service *service,
+ struct service_range *sr,
+ u32 *last_key)
{
- void *hdr;
- struct nlattr *attrs;
- struct nlattr *publ;
struct publication *p;
+ struct nlattr *attrs;
+ struct nlattr *b;
+ void *hdr;
- if (*last_publ) {
- list_for_each_entry(p, &sseq->info->zone_list, zone_list)
- if (p->key == *last_publ)
+ if (*last_key) {
+ list_for_each_entry(p, &sr->all_publ, all_publ)
+ if (p->key == *last_key)
break;
- if (p->key != *last_publ)
+ if (list_entry_is_head(p, &sr->all_publ, all_publ))
return -EPIPE;
} else {
- p = list_first_entry(&sseq->info->zone_list, struct publication,
- zone_list);
+ p = list_first_entry(&sr->all_publ,
+ struct publication,
+ all_publ);
}
- list_for_each_entry_from(p, &sseq->info->zone_list, zone_list) {
- *last_publ = p->key;
+ list_for_each_entry_from(p, &sr->all_publ, all_publ) {
+ *last_key = p->key;
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq,
&tipc_genl_family, NLM_F_MULTI,
@@ -904,39 +984,39 @@ static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg,
if (!hdr)
return -EMSGSIZE;
- attrs = nla_nest_start(msg->skb, TIPC_NLA_NAME_TABLE);
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NAME_TABLE);
if (!attrs)
goto msg_full;
- publ = nla_nest_start(msg->skb, TIPC_NLA_NAME_TABLE_PUBL);
- if (!publ)
+ b = nla_nest_start_noflag(msg->skb, TIPC_NLA_NAME_TABLE_PUBL);
+ if (!b)
goto attr_msg_full;
- if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_TYPE, seq->type))
+ if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_TYPE, service->type))
goto publ_msg_full;
- if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_LOWER, sseq->lower))
+ if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_LOWER, sr->lower))
goto publ_msg_full;
- if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_UPPER, sseq->upper))
+ if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_UPPER, sr->upper))
goto publ_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_SCOPE, p->scope))
goto publ_msg_full;
- if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_NODE, p->node))
+ if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_NODE, p->sk.node))
goto publ_msg_full;
- if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_REF, p->ref))
+ if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_REF, p->sk.ref))
goto publ_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_KEY, p->key))
goto publ_msg_full;
- nla_nest_end(msg->skb, publ);
+ nla_nest_end(msg->skb, b);
nla_nest_end(msg->skb, attrs);
genlmsg_end(msg->skb, hdr);
}
- *last_publ = 0;
+ *last_key = 0;
return 0;
publ_msg_full:
- nla_nest_cancel(msg->skb, publ);
+ nla_nest_cancel(msg->skb, b);
attr_msg_full:
nla_nest_cancel(msg->skb, attrs);
msg_full:
@@ -945,39 +1025,35 @@ msg_full:
return -EMSGSIZE;
}
-static int __tipc_nl_subseq_list(struct tipc_nl_msg *msg, struct name_seq *seq,
- u32 *last_lower, u32 *last_publ)
+static int __tipc_nl_service_range_list(struct tipc_nl_msg *msg,
+ struct tipc_service *sc,
+ u32 *last_lower, u32 *last_key)
{
- struct sub_seq *sseq;
- struct sub_seq *sseq_start;
+ struct service_range *sr;
+ struct rb_node *n;
int err;
- if (*last_lower) {
- sseq_start = nameseq_find_subseq(seq, *last_lower);
- if (!sseq_start)
- return -EPIPE;
- } else {
- sseq_start = seq->sseqs;
- }
-
- for (sseq = sseq_start; sseq != &seq->sseqs[seq->first_free]; sseq++) {
- err = __tipc_nl_add_nametable_publ(msg, seq, sseq, last_publ);
+ for (n = rb_first(&sc->ranges); n; n = rb_next(n)) {
+ sr = container_of(n, struct service_range, tree_node);
+ if (sr->lower < *last_lower)
+ continue;
+ err = __tipc_nl_add_nametable_publ(msg, sc, sr, last_key);
if (err) {
- *last_lower = sseq->lower;
+ *last_lower = sr->lower;
return err;
}
}
*last_lower = 0;
-
return 0;
}
-static int tipc_nl_seq_list(struct net *net, struct tipc_nl_msg *msg,
- u32 *last_type, u32 *last_lower, u32 *last_publ)
+static int tipc_nl_service_list(struct net *net, struct tipc_nl_msg *msg,
+ u32 *last_type, u32 *last_lower, u32 *last_key)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct hlist_head *seq_head;
- struct name_seq *seq = NULL;
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_service *service = NULL;
+ struct hlist_head *head;
+ struct tipc_uaddr ua;
int err;
int i;
@@ -987,30 +1063,34 @@ static int tipc_nl_seq_list(struct net *net, struct tipc_nl_msg *msg,
i = 0;
for (; i < TIPC_NAMETBL_SIZE; i++) {
- seq_head = &tn->nametbl->seq_hlist[i];
-
- if (*last_type) {
- seq = nametbl_find_seq(net, *last_type);
- if (!seq)
+ head = &tn->nametbl->services[i];
+
+ if (*last_type ||
+ (!i && *last_key && (*last_lower == *last_key))) {
+ tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE,
+ *last_type, *last_lower, *last_lower);
+ service = tipc_service_find(net, &ua);
+ if (!service)
return -EPIPE;
} else {
- hlist_for_each_entry_rcu(seq, seq_head, ns_list)
+ hlist_for_each_entry_rcu(service, head, service_list)
break;
- if (!seq)
+ if (!service)
continue;
}
- hlist_for_each_entry_from_rcu(seq, ns_list) {
- spin_lock_bh(&seq->lock);
- err = __tipc_nl_subseq_list(msg, seq, last_lower,
- last_publ);
+ hlist_for_each_entry_from_rcu(service, service_list) {
+ spin_lock_bh(&service->lock);
+ err = __tipc_nl_service_range_list(msg, service,
+ last_lower,
+ last_key);
if (err) {
- *last_type = seq->type;
- spin_unlock_bh(&seq->lock);
+ *last_type = service->type;
+ spin_unlock_bh(&service->lock);
return err;
}
- spin_unlock_bh(&seq->lock);
+ spin_unlock_bh(&service->lock);
}
*last_type = 0;
}
@@ -1019,13 +1099,13 @@ static int tipc_nl_seq_list(struct net *net, struct tipc_nl_msg *msg,
int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
- int err;
- int done = cb->args[3];
+ struct net *net = sock_net(skb->sk);
u32 last_type = cb->args[0];
u32 last_lower = cb->args[1];
- u32 last_publ = cb->args[2];
- struct net *net = sock_net(skb->sk);
+ u32 last_key = cb->args[2];
+ int done = cb->args[3];
struct tipc_nl_msg msg;
+ int err;
if (done)
return 0;
@@ -1035,7 +1115,8 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
msg.seq = cb->nlh->nlmsg_seq;
rcu_read_lock();
- err = tipc_nl_seq_list(net, &msg, &last_type, &last_lower, &last_publ);
+ err = tipc_nl_service_list(net, &msg, &last_type,
+ &last_lower, &last_key);
if (!err) {
done = 1;
} else if (err != -EMSGSIZE) {
@@ -1051,85 +1132,73 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
cb->args[0] = last_type;
cb->args[1] = last_lower;
- cb->args[2] = last_publ;
+ cb->args[2] = last_key;
cb->args[3] = done;
return skb->len;
}
-bool u32_find(struct list_head *l, u32 value)
+struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port)
{
- struct u32_item *item;
+ struct tipc_dest *dst;
- list_for_each_entry(item, l, list) {
- if (item->value == value)
- return true;
+ list_for_each_entry(dst, l, list) {
+ if (dst->node == node && dst->port == port)
+ return dst;
}
- return false;
+ return NULL;
}
-bool u32_push(struct list_head *l, u32 value)
+bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
{
- struct u32_item *item;
+ struct tipc_dest *dst;
- list_for_each_entry(item, l, list) {
- if (item->value == value)
- return false;
- }
- item = kmalloc(sizeof(*item), GFP_ATOMIC);
- if (unlikely(!item))
+ if (tipc_dest_find(l, node, port))
return false;
- item->value = value;
- list_add(&item->list, l);
+ dst = kmalloc(sizeof(*dst), GFP_ATOMIC);
+ if (unlikely(!dst))
+ return false;
+ dst->node = node;
+ dst->port = port;
+ list_add(&dst->list, l);
return true;
}
-u32 u32_pop(struct list_head *l)
+bool tipc_dest_pop(struct list_head *l, u32 *node, u32 *port)
{
- struct u32_item *item;
- u32 value = 0;
+ struct tipc_dest *dst;
if (list_empty(l))
- return 0;
- item = list_first_entry(l, typeof(*item), list);
- value = item->value;
- list_del(&item->list);
- kfree(item);
- return value;
-}
-
-bool u32_del(struct list_head *l, u32 value)
-{
- struct u32_item *item, *tmp;
-
- list_for_each_entry_safe(item, tmp, l, list) {
- if (item->value != value)
- continue;
- list_del(&item->list);
- kfree(item);
- return true;
- }
- return false;
+ return false;
+ dst = list_first_entry(l, typeof(*dst), list);
+ if (port)
+ *port = dst->port;
+ if (node)
+ *node = dst->node;
+ list_del(&dst->list);
+ kfree(dst);
+ return true;
}
-void u32_list_purge(struct list_head *l)
+bool tipc_dest_del(struct list_head *l, u32 node, u32 port)
{
- struct u32_item *item, *tmp;
+ struct tipc_dest *dst;
- list_for_each_entry_safe(item, tmp, l, list) {
- list_del(&item->list);
- kfree(item);
- }
+ dst = tipc_dest_find(l, node, port);
+ if (!dst)
+ return false;
+ list_del(&dst->list);
+ kfree(dst);
+ return true;
}
-int u32_list_len(struct list_head *l)
+void tipc_dest_list_purge(struct list_head *l)
{
- struct u32_item *item;
- int i = 0;
+ struct tipc_dest *dst, *tmp;
- list_for_each_entry(item, l, list) {
- i++;
+ list_for_each_entry_safe(dst, tmp, l, list) {
+ list_del(&dst->list);
+ kfree(dst);
}
- return i;
}
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 6ebdeb1d84a5..7ff6eeebaae6 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -1,8 +1,9 @@
/*
* net/tipc/name_table.h: Include file for TIPC name table code
*
- * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
+ * Copyright (c) 2000-2006, 2014-2018, Ericsson AB
* Copyright (c) 2004-2005, 2010-2011, Wind River Systems
+ * Copyright (c) 2020-2021, Red Hat Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -40,6 +41,8 @@
struct tipc_subscription;
struct tipc_plist;
struct tipc_nlist;
+struct tipc_group;
+struct tipc_uaddr;
/*
* TIPC name types reserved for internal TIPC use (both current and planned)
@@ -48,88 +51,107 @@ struct tipc_nlist;
#define TIPC_PUBL_SCOPE_NUM (TIPC_NODE_SCOPE + 1)
#define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */
+#define TIPC_ANY_SCOPE 10 /* Both node and cluster scope will match */
+
/**
- * struct publication - info about a published (name or) name sequence
- * @type: name sequence type
- * @lower: name sequence lower bound
- * @upper: name sequence upper bound
- * @scope: scope of publication
- * @node: network address of publishing port's node
- * @ref: publishing port
- * @key: publication key
- * @nodesub_list: subscription to "node down" event (off-node publication only)
- * @local_list: adjacent entries in list of publications made by this node
- * @pport_list: adjacent entries in list of publications made by this port
- * @node_list: adjacent matching name seq publications with >= node scope
- * @cluster_list: adjacent matching name seq publications with >= cluster scope
- * @zone_list: adjacent matching name seq publications with >= zone scope
+ * struct publication - info about a published service address or range
+ * @sr: service range represented by this publication
+ * @sk: address of socket bound to this publication
+ * @scope: scope of publication, TIPC_NODE_SCOPE or TIPC_CLUSTER_SCOPE
+ * @key: publication key, unique across the cluster
+ * @id: publication id
+ * @binding_node: all publications from the same node which bound this one
+ * - Remote publications: in node->publ_list;
+ * Used by node/name distr to withdraw publications when node is lost
+ * - Local/node scope publications: in name_table->node_scope list
+ * - Local/cluster scope publications: in name_table->cluster_scope list
+ * @binding_sock: all publications from the same socket which bound this one
+ * Used by socket to withdraw publications when socket is unbound/released
+ * @local_publ: list of identical publications made from this node
+ * Used by closest_first and multicast receive lookup algorithms
+ * @all_publ: all publications identical to this one, whatever node and scope
+ * Used by round-robin lookup algorithm
+ * @list: to form a list of publications in temporal order
* @rcu: RCU callback head used for deferred freeing
- *
- * Note that the node list, cluster list, and zone list are circular lists.
*/
struct publication {
- u32 type;
- u32 lower;
- u32 upper;
- u32 scope;
- u32 node;
- u32 ref;
+ struct tipc_service_range sr;
+ struct tipc_socket_addr sk;
+ u16 scope;
u32 key;
- struct list_head nodesub_list;
- struct list_head local_list;
- struct list_head pport_list;
- struct list_head node_list;
- struct list_head cluster_list;
- struct list_head zone_list;
+ u32 id;
+ struct list_head binding_node;
+ struct list_head binding_sock;
+ struct list_head local_publ;
+ struct list_head all_publ;
+ struct list_head list;
struct rcu_head rcu;
};
/**
* struct name_table - table containing all existing port name publications
- * @seq_hlist: name sequence hash lists
- * @publ_list: pulication lists
+ * @rcu: RCU callback head used for deferred freeing
+ * @services: name sequence hash lists
+ * @node_scope: all local publications with node scope
+ * - used by name_distr during re-init of name table
+ * @cluster_scope: all local publications with cluster scope
+ * - used by name_distr to send bulk updates to new nodes
+ * - used by name_distr during re-init of name table
+ * @cluster_scope_lock: lock for accessing @cluster_scope
* @local_publ_count: number of publications issued by this node
+ * @rc_dests: destination node counter
+ * @snd_nxt: next sequence number to be used
*/
struct name_table {
- struct hlist_head seq_hlist[TIPC_NAMETBL_SIZE];
- struct list_head publ_list[TIPC_PUBL_SCOPE_NUM];
+ struct rcu_head rcu;
+ struct hlist_head services[TIPC_NAMETBL_SIZE];
+ struct list_head node_scope;
+ struct list_head cluster_scope;
+ rwlock_t cluster_scope_lock;
u32 local_publ_count;
+ u32 rc_dests;
+ u32 snd_nxt;
};
int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb);
-
-u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance, u32 *node);
-int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
- u32 limit, struct list_head *dports);
-void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower,
- u32 upper, u32 domain,
- struct tipc_nlist *nodes);
-struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower,
- u32 upper, u32 scope, u32 port_ref,
- u32 key);
-int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref,
- u32 key);
-struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type,
- u32 lower, u32 upper, u32 scope,
- u32 node, u32 ref, u32 key);
-struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
- u32 lower, u32 node, u32 ref,
+bool tipc_nametbl_lookup_anycast(struct net *net, struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk);
+void tipc_nametbl_lookup_mcast_sockets(struct net *net, struct tipc_uaddr *ua,
+ struct list_head *dports);
+void tipc_nametbl_lookup_mcast_nodes(struct net *net, struct tipc_uaddr *ua,
+ struct tipc_nlist *nodes);
+bool tipc_nametbl_lookup_group(struct net *net, struct tipc_uaddr *ua,
+ struct list_head *dsts, int *dstcnt,
+ u32 exclude, bool mcast);
+void tipc_nametbl_build_group(struct net *net, struct tipc_group *grp,
+ struct tipc_uaddr *ua);
+struct publication *tipc_nametbl_publish(struct net *net, struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk, u32 key);
+void tipc_nametbl_withdraw(struct net *net, struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk, u32 key);
+struct publication *tipc_nametbl_insert_publ(struct net *net,
+ struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk,
u32 key);
-void tipc_nametbl_subscribe(struct tipc_subscription *s);
+struct publication *tipc_nametbl_remove_publ(struct net *net,
+ struct tipc_uaddr *ua,
+ struct tipc_socket_addr *sk,
+ u32 key);
+bool tipc_nametbl_subscribe(struct tipc_subscription *s);
void tipc_nametbl_unsubscribe(struct tipc_subscription *s);
int tipc_nametbl_init(struct net *net);
void tipc_nametbl_stop(struct net *net);
-struct u32_item {
+struct tipc_dest {
struct list_head list;
- u32 value;
+ u32 port;
+ u32 node;
};
-bool u32_push(struct list_head *l, u32 value);
-u32 u32_pop(struct list_head *l);
-bool u32_find(struct list_head *l, u32 value);
-bool u32_del(struct list_head *l, u32 value);
-void u32_list_purge(struct list_head *l);
-int u32_list_len(struct list_head *l);
+struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port);
+bool tipc_dest_push(struct list_head *l, u32 node, u32 port);
+bool tipc_dest_pop(struct list_head *l, u32 *node, u32 *port);
+bool tipc_dest_del(struct list_head *l, u32 node, u32 port);
+void tipc_dest_list_purge(struct list_head *l);
#endif
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 719c5924b638..7e65d0b0c4a8 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -41,7 +41,9 @@
#include "socket.h"
#include "node.h"
#include "bcast.h"
+#include "link.h"
#include "netlink.h"
+#include "monitor.h"
/*
* The TIPC locking policy is designed to ensure a very fine locking
@@ -88,7 +90,7 @@
* - A spin lock to protect the registry of kernel/driver users (reg.c)
* - A global spin_lock (tipc_port_lock), which only task is to ensure
* consistency where more than one port is involved in an operation,
- * i.e., whe a port is part of a linked list of ports.
+ * i.e., when a port is part of a linked list of ports.
* There are two such lists; 'port_list', which is used for management,
* and 'wait_list', which is used to queue ports during congestion.
*
@@ -104,38 +106,55 @@
* - A local spin_lock protecting the queue of subscriber events.
*/
-int tipc_net_start(struct net *net, u32 addr)
+static void tipc_net_finalize(struct net *net, u32 addr);
+
+int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- char addr_string[16];
+ if (tipc_own_id(net)) {
+ pr_info("Cannot configure node identity twice\n");
+ return -1;
+ }
+ pr_info("Started in network mode\n");
+
+ if (node_id)
+ tipc_set_node_id(net, node_id);
+ if (addr)
+ tipc_net_finalize(net, addr);
+ return 0;
+}
- tn->own_addr = addr;
+static void tipc_net_finalize(struct net *net, u32 addr)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_socket_addr sk = {0, addr};
+ struct tipc_uaddr ua;
- /* Ensure that the new address is visible before we reinit. */
- smp_mb();
+ tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_CLUSTER_SCOPE,
+ TIPC_NODE_STATE, addr, addr);
+ if (cmpxchg(&tn->node_addr, 0, addr))
+ return;
+ tipc_set_node_addr(net, addr);
tipc_named_reinit(net);
tipc_sk_reinit(net);
+ tipc_mon_reinit_self(net);
+ tipc_nametbl_publish(net, &ua, &sk, addr);
+}
- tipc_nametbl_publish(net, TIPC_CFG_SRV, tn->own_addr, tn->own_addr,
- TIPC_ZONE_SCOPE, 0, tn->own_addr);
+void tipc_net_finalize_work(struct work_struct *work)
+{
+ struct tipc_net *tn = container_of(work, struct tipc_net, work);
- pr_info("Started in network mode\n");
- pr_info("Own node address %s, network identity %u\n",
- tipc_addr_string_fill(addr_string, tn->own_addr),
- tn->net_id);
- return 0;
+ rtnl_lock();
+ tipc_net_finalize(tipc_link_net(tn->bcl), tn->trial_addr);
+ rtnl_unlock();
}
void tipc_net_stop(struct net *net)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
-
- if (!tn->own_addr)
+ if (!tipc_own_id(net))
return;
- tipc_nametbl_withdraw(net, TIPC_CFG_SRV, tn->own_addr, 0,
- tn->own_addr);
rtnl_lock();
tipc_bearer_stop(net);
tipc_node_stop(net);
@@ -147,21 +166,26 @@ void tipc_net_stop(struct net *net)
static int __tipc_nl_add_net(struct net *net, struct tipc_nl_msg *msg)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
- void *hdr;
+ u64 *w0 = (u64 *)&tn->node_id[0];
+ u64 *w1 = (u64 *)&tn->node_id[8];
struct nlattr *attrs;
+ void *hdr;
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
NLM_F_MULTI, TIPC_NL_NET_GET);
if (!hdr)
return -EMSGSIZE;
- attrs = nla_nest_start(msg->skb, TIPC_NLA_NET);
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NET);
if (!attrs)
goto msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tn->net_id))
goto attr_msg_full;
-
+ if (nla_put_u64_64bit(msg->skb, TIPC_NLA_NET_NODEID, *w0, 0))
+ goto attr_msg_full;
+ if (nla_put_u64_64bit(msg->skb, TIPC_NLA_NET_NODEID_W1, *w1, 0))
+ goto attr_msg_full;
nla_nest_end(msg->skb, attrs);
genlmsg_end(msg->skb, hdr);
@@ -200,29 +224,30 @@ out:
return skb->len;
}
-int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
+int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
{
- struct net *net = sock_net(skb->sk);
- struct tipc_net *tn = net_generic(net, tipc_net_id);
struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = tipc_net(net);
int err;
if (!info->attrs[TIPC_NLA_NET])
return -EINVAL;
- err = nla_parse_nested(attrs, TIPC_NLA_NET_MAX,
- info->attrs[TIPC_NLA_NET], tipc_nl_net_policy,
- info->extack);
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX,
+ info->attrs[TIPC_NLA_NET],
+ tipc_nl_net_policy, info->extack);
+
if (err)
return err;
+ /* Can't change net id once TIPC has joined a network */
+ if (tipc_own_addr(net))
+ return -EPERM;
+
if (attrs[TIPC_NLA_NET_ID]) {
u32 val;
- /* Can't change net id once TIPC has joined a network */
- if (tn->own_addr)
- return -EPERM;
-
val = nla_get_u32(attrs[TIPC_NLA_NET_ID]);
if (val < 1 || val > 9999)
return -EINVAL;
@@ -233,18 +258,90 @@ int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
if (attrs[TIPC_NLA_NET_ADDR]) {
u32 addr;
- /* Can't change net addr once TIPC has joined a network */
- if (tn->own_addr)
- return -EPERM;
-
addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
- if (!tipc_addr_node_valid(addr))
+ if (!addr)
return -EINVAL;
+ tn->legacy_addr_format = true;
+ tipc_net_init(net, NULL, addr);
+ }
+
+ if (attrs[TIPC_NLA_NET_NODEID]) {
+ u8 node_id[NODE_ID_LEN];
+ u64 *w0 = (u64 *)&node_id[0];
+ u64 *w1 = (u64 *)&node_id[8];
- rtnl_lock();
- tipc_net_start(net, addr);
- rtnl_unlock();
+ if (!attrs[TIPC_NLA_NET_NODEID_W1])
+ return -EINVAL;
+ *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]);
+ *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]);
+ tipc_net_init(net, node_id, 0);
}
+ return 0;
+}
+
+int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+
+ rtnl_lock();
+ err = __tipc_nl_net_set(skb, info);
+ rtnl_unlock();
+
+ return err;
+}
+
+static int __tipc_nl_addr_legacy_get(struct net *net, struct tipc_nl_msg *msg)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct nlattr *attrs;
+ void *hdr;
+
+ hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
+ 0, TIPC_NL_ADDR_LEGACY_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ attrs = nla_nest_start(msg->skb, TIPC_NLA_NET);
+ if (!attrs)
+ goto msg_full;
+
+ if (tn->legacy_addr_format)
+ if (nla_put_flag(msg->skb, TIPC_NLA_NET_ADDR_LEGACY))
+ goto attr_msg_full;
+
+ nla_nest_end(msg->skb, attrs);
+ genlmsg_end(msg->skb, hdr);
return 0;
+
+attr_msg_full:
+ nla_nest_cancel(msg->skb, attrs);
+msg_full:
+ genlmsg_cancel(msg->skb, hdr);
+
+ return -EMSGSIZE;
+}
+
+int tipc_nl_net_addr_legacy_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = sock_net(skb->sk);
+ struct tipc_nl_msg msg;
+ struct sk_buff *rep;
+ int err;
+
+ rep = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ msg.skb = rep;
+ msg.portid = info->snd_portid;
+ msg.seq = info->snd_seq;
+
+ err = __tipc_nl_addr_legacy_get(net, &msg);
+ if (err) {
+ nlmsg_free(msg.skb);
+ return err;
+ }
+
+ return genlmsg_reply(msg.skb, info);
}
diff --git a/net/tipc/net.h b/net/tipc/net.h
index c7c254902873..1cb1e43cf34a 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -41,11 +41,12 @@
extern const struct nla_policy tipc_nl_net_policy[];
-int tipc_net_start(struct net *net, u32 addr);
-
+int tipc_net_init(struct net *net, u8 *node_id, u32 addr);
+void tipc_net_finalize_work(struct work_struct *work);
void tipc_net_stop(struct net *net);
-
int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
+int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_net_addr_legacy_get(struct sk_buff *skb, struct genl_info *info);
#endif
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index b76f13f6fea1..1a9a5bdaccf4 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -79,12 +79,16 @@ const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
[TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC },
- [TIPC_NLA_NET_ID] = { .type = NLA_U32 }
+ [TIPC_NLA_NET_ID] = { .type = NLA_U32 },
+ [TIPC_NLA_NET_ADDR] = { .type = NLA_U32 },
+ [TIPC_NLA_NET_NODEID] = { .type = NLA_U64 },
+ [TIPC_NLA_NET_NODEID_W1] = { .type = NLA_U64 },
+ [TIPC_NLA_NET_ADDR_LEGACY] = { .type = NLA_FLAG }
};
const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
[TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
- [TIPC_NLA_LINK_NAME] = { .type = NLA_STRING,
+ [TIPC_NLA_LINK_NAME] = { .type = NLA_NUL_STRING,
.len = TIPC_MAX_LINK_NAME },
[TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
[TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
@@ -99,7 +103,13 @@ const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = {
[TIPC_NLA_NODE_UNSPEC] = { .type = NLA_UNSPEC },
[TIPC_NLA_NODE_ADDR] = { .type = NLA_U32 },
- [TIPC_NLA_NODE_UP] = { .type = NLA_FLAG }
+ [TIPC_NLA_NODE_UP] = { .type = NLA_FLAG },
+ [TIPC_NLA_NODE_ID] = { .type = NLA_BINARY,
+ .len = TIPC_NODEID_LEN},
+ [TIPC_NLA_NODE_KEY] = { .type = NLA_BINARY,
+ .len = TIPC_AEAD_KEY_SIZE_MAX},
+ [TIPC_NLA_NODE_KEY_MASTER] = { .type = NLA_FLAG },
+ [TIPC_NLA_NODE_REKEYING] = { .type = NLA_U32 },
};
/* Properties valid for media, bearer and link */
@@ -107,12 +117,15 @@ const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
[TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC },
[TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
[TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
- [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 }
+ [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 },
+ [TIPC_NLA_PROP_MTU] = { .type = NLA_U32 },
+ [TIPC_NLA_PROP_BROADCAST] = { .type = NLA_U32 },
+ [TIPC_NLA_PROP_BROADCAST_RATIO] = { .type = NLA_U32 }
};
const struct nla_policy tipc_nl_bearer_policy[TIPC_NLA_BEARER_MAX + 1] = {
[TIPC_NLA_BEARER_UNSPEC] = { .type = NLA_UNSPEC },
- [TIPC_NLA_BEARER_NAME] = { .type = NLA_STRING,
+ [TIPC_NLA_BEARER_NAME] = { .type = NLA_NUL_STRING,
.len = TIPC_MAX_BEARER_NAME },
[TIPC_NLA_BEARER_PROP] = { .type = NLA_NESTED },
[TIPC_NLA_BEARER_DOMAIN] = { .type = NLA_U32 }
@@ -138,115 +151,137 @@ const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = {
static const struct genl_ops tipc_genl_v2_ops[] = {
{
.cmd = TIPC_NL_BEARER_DISABLE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = tipc_nl_bearer_disable,
- .policy = tipc_nl_policy,
},
{
.cmd = TIPC_NL_BEARER_ENABLE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = tipc_nl_bearer_enable,
- .policy = tipc_nl_policy,
},
{
.cmd = TIPC_NL_BEARER_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = tipc_nl_bearer_get,
.dumpit = tipc_nl_bearer_dump,
- .policy = tipc_nl_policy,
},
{
.cmd = TIPC_NL_BEARER_ADD,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = tipc_nl_bearer_add,
- .policy = tipc_nl_policy,
},
{
.cmd = TIPC_NL_BEARER_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = tipc_nl_bearer_set,
- .policy = tipc_nl_policy,
},
{
.cmd = TIPC_NL_SOCK_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .start = tipc_dump_start,
.dumpit = tipc_nl_sk_dump,
- .policy = tipc_nl_policy,
+ .done = tipc_dump_done,
},
{
.cmd = TIPC_NL_PUBL_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT |
+ GENL_DONT_VALIDATE_DUMP_STRICT,
.dumpit = tipc_nl_publ_dump,
- .policy = tipc_nl_policy,
},
{
.cmd = TIPC_NL_LINK_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
.doit = tipc_nl_node_get_link,
.dumpit = tipc_nl_node_dump_link,
- .policy = tipc_nl_policy,
},
{
.cmd = TIPC_NL_LINK_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = tipc_nl_node_set_link,
- .policy = tipc_nl_policy,
},
{
.cmd = TIPC_NL_LINK_RESET_STATS,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = tipc_nl_node_reset_link_stats,
- .policy = tipc_nl_policy,
},
{
.cmd = TIPC_NL_MEDIA_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = tipc_nl_media_get,
.dumpit = tipc_nl_media_dump,
- .policy = tipc_nl_policy,
},
{
.cmd = TIPC_NL_MEDIA_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = tipc_nl_media_set,
- .policy = tipc_nl_policy,
},
{
.cmd = TIPC_NL_NODE_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.dumpit = tipc_nl_node_dump,
- .policy = tipc_nl_policy,
},
{
.cmd = TIPC_NL_NET_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.dumpit = tipc_nl_net_dump,
- .policy = tipc_nl_policy,
},
{
.cmd = TIPC_NL_NET_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = tipc_nl_net_set,
- .policy = tipc_nl_policy,
},
{
.cmd = TIPC_NL_NAME_TABLE_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.dumpit = tipc_nl_name_table_dump,
- .policy = tipc_nl_policy,
},
{
.cmd = TIPC_NL_MON_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = tipc_nl_node_set_monitor,
- .policy = tipc_nl_policy,
},
{
.cmd = TIPC_NL_MON_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = tipc_nl_node_get_monitor,
.dumpit = tipc_nl_node_dump_monitor,
- .policy = tipc_nl_policy,
},
{
.cmd = TIPC_NL_MON_PEER_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT |
+ GENL_DONT_VALIDATE_DUMP_STRICT,
.dumpit = tipc_nl_node_dump_monitor_peer,
- .policy = tipc_nl_policy,
},
{
.cmd = TIPC_NL_PEER_REMOVE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = tipc_nl_peer_rm,
- .policy = tipc_nl_policy,
},
#ifdef CONFIG_TIPC_MEDIA_UDP
{
.cmd = TIPC_NL_UDP_GET_REMOTEIP,
+ .validate = GENL_DONT_VALIDATE_STRICT |
+ GENL_DONT_VALIDATE_DUMP_STRICT,
.dumpit = tipc_udp_nl_dump_remoteip,
- .policy = tipc_nl_policy,
},
#endif
+#ifdef CONFIG_TIPC_CRYPTO
+ {
+ .cmd = TIPC_NL_KEY_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_node_set_key,
+ },
+ {
+ .cmd = TIPC_NL_KEY_FLUSH,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_node_flush_key,
+ },
+#endif
+ {
+ .cmd = TIPC_NL_ADDR_LEGACY_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = tipc_nl_net_addr_legacy_get,
+ },
};
struct genl_family tipc_genl_family __ro_after_init = {
@@ -254,24 +289,14 @@ struct genl_family tipc_genl_family __ro_after_init = {
.version = TIPC_GENL_V2_VERSION,
.hdrsize = 0,
.maxattr = TIPC_NLA_MAX,
+ .policy = tipc_nl_policy,
.netnsok = true,
.module = THIS_MODULE,
.ops = tipc_genl_v2_ops,
.n_ops = ARRAY_SIZE(tipc_genl_v2_ops),
+ .resv_start_op = TIPC_NL_ADDR_LEGACY_GET + 1,
};
-int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***attr)
-{
- u32 maxattr = tipc_genl_family.maxattr;
-
- *attr = genl_family_attrbuf(&tipc_genl_family);
- if (!*attr)
- return -EOPNOTSUPP;
-
- return nlmsg_parse(nlh, GENL_HDRLEN, *attr, maxattr, tipc_nl_policy,
- NULL);
-}
-
int __init tipc_netlink_start(void)
{
int res;
diff --git a/net/tipc/netlink.h b/net/tipc/netlink.h
index 4ba0ad422110..7cf777723e3e 100644
--- a/net/tipc/netlink.h
+++ b/net/tipc/netlink.h
@@ -38,7 +38,6 @@
#include <net/netlink.h>
extern struct genl_family tipc_genl_family;
-int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***buf);
struct tipc_nl_msg {
struct sk_buff *skb;
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 9bfe886ab330..079aebb16ed8 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -39,6 +39,7 @@
#include "node.h"
#include "net.h"
#include <net/genetlink.h>
+#include <linux/string_helpers.h>
#include <linux/tipc_config.h>
/* The legacy API had an artificial message length limit called
@@ -55,6 +56,7 @@ struct tipc_nl_compat_msg {
int rep_type;
int rep_size;
int req_type;
+ int req_size;
struct net *net;
struct sk_buff *rep;
struct tlv_desc *req;
@@ -87,6 +89,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb)
return limit;
}
+static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
+{
+ return TLV_GET_LEN(tlv) - TLV_SPACE(0);
+}
+
static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
{
struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
@@ -95,6 +102,7 @@ static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
return -EMSGSIZE;
skb_put(skb, TLV_SPACE(len));
+ memset(tlv, 0, TLV_SPACE(len));
tlv->tlv_type = htons(type);
tlv->tlv_len = htons(TLV_LENGTH(len));
if (len && data)
@@ -112,7 +120,8 @@ static void tipc_tlv_init(struct sk_buff *skb, u16 type)
skb_put(skb, sizeof(struct tlv_desc));
}
-static int tipc_tlv_sprintf(struct sk_buff *skb, const char *fmt, ...)
+static __printf(2, 3) int tipc_tlv_sprintf(struct sk_buff *skb,
+ const char *fmt, ...)
{
int n;
u16 len;
@@ -159,7 +168,7 @@ static struct sk_buff *tipc_get_err_tlv(char *str)
int str_len = strlen(str) + 1;
struct sk_buff *buf;
- buf = tipc_tlv_alloc(TLV_SPACE(str_len));
+ buf = tipc_tlv_alloc(str_len);
if (buf)
tipc_add_tlv(buf, TIPC_TLV_ERROR_STRING, str, str_len);
@@ -170,35 +179,60 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
struct tipc_nl_compat_msg *msg,
struct sk_buff *arg)
{
+ struct genl_dumpit_info info;
int len = 0;
int err;
struct sk_buff *buf;
struct nlmsghdr *nlmsg;
struct netlink_callback cb;
+ struct nlattr **attrbuf;
memset(&cb, 0, sizeof(cb));
cb.nlh = (struct nlmsghdr *)arg->data;
cb.skb = arg;
+ cb.data = &info;
buf = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf->sk = msg->dst_sk;
+ if (__tipc_dump_start(&cb, msg->net)) {
+ kfree_skb(buf);
+ return -ENOMEM;
+ }
+
+ attrbuf = kcalloc(tipc_genl_family.maxattr + 1,
+ sizeof(struct nlattr *), GFP_KERNEL);
+ if (!attrbuf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ info.info.attrs = attrbuf;
+
+ if (nlmsg_len(cb.nlh) > 0) {
+ err = nlmsg_parse_deprecated(cb.nlh, GENL_HDRLEN, attrbuf,
+ tipc_genl_family.maxattr,
+ tipc_genl_family.policy, NULL);
+ if (err)
+ goto err_out;
+ }
do {
int rem;
len = (*cmd->dumpit)(buf, &cb);
nlmsg_for_each_msg(nlmsg, nlmsg_hdr(buf), len, rem) {
- struct nlattr **attrs;
-
- err = tipc_nlmsg_parse(nlmsg, &attrs);
+ err = nlmsg_parse_deprecated(nlmsg, GENL_HDRLEN,
+ attrbuf,
+ tipc_genl_family.maxattr,
+ tipc_genl_family.policy,
+ NULL);
if (err)
goto err_out;
- err = (*cmd->format)(msg, attrs);
+ err = (*cmd->format)(msg, attrbuf);
if (err)
goto err_out;
@@ -216,6 +250,8 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
err = 0;
err_out:
+ kfree(attrbuf);
+ tipc_dump_done(&cb);
kfree_skb(buf);
if (err == -EMSGSIZE) {
@@ -239,10 +275,12 @@ err_out:
static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
struct tipc_nl_compat_msg *msg)
{
- int err;
+ struct nlmsghdr *nlh;
struct sk_buff *arg;
+ int err;
- if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
+ if (msg->req_type && (!msg->req_size ||
+ !TLV_CHECK_TYPE(msg->req, msg->req_type)))
return -EINVAL;
msg->rep = tipc_tlv_alloc(msg->rep_size);
@@ -252,19 +290,36 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
if (msg->rep_type)
tipc_tlv_init(msg->rep, msg->rep_type);
- if (cmd->header)
- (*cmd->header)(msg);
+ if (cmd->header) {
+ err = (*cmd->header)(msg);
+ if (err) {
+ kfree_skb(msg->rep);
+ msg->rep = NULL;
+ return err;
+ }
+ }
arg = nlmsg_new(0, GFP_KERNEL);
if (!arg) {
kfree_skb(msg->rep);
+ msg->rep = NULL;
return -ENOMEM;
}
- err = __tipc_nl_compat_dumpit(cmd, msg, arg);
- if (err)
+ nlh = nlmsg_put(arg, 0, 0, tipc_genl_family.id, 0, NLM_F_MULTI);
+ if (!nlh) {
+ kfree_skb(arg);
kfree_skb(msg->rep);
+ msg->rep = NULL;
+ return -EMSGSIZE;
+ }
+ nlmsg_end(arg, nlh);
+ err = __tipc_nl_compat_dumpit(cmd, msg, arg);
+ if (err) {
+ kfree_skb(msg->rep);
+ msg->rep = NULL;
+ }
kfree_skb(arg);
return err;
@@ -283,38 +338,42 @@ static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
if (!trans_buf)
return -ENOMEM;
- err = (*cmd->transcode)(cmd, trans_buf, msg);
- if (err)
- goto trans_out;
-
- attrbuf = kmalloc((tipc_genl_family.maxattr + 1) *
- sizeof(struct nlattr *), GFP_KERNEL);
+ attrbuf = kmalloc_array(tipc_genl_family.maxattr + 1,
+ sizeof(struct nlattr *),
+ GFP_KERNEL);
if (!attrbuf) {
err = -ENOMEM;
goto trans_out;
}
- err = nla_parse(attrbuf, tipc_genl_family.maxattr,
- (const struct nlattr *)trans_buf->data,
- trans_buf->len, NULL, NULL);
- if (err)
- goto parse_out;
-
doit_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!doit_buf) {
err = -ENOMEM;
- goto parse_out;
+ goto attrbuf_out;
}
- doit_buf->sk = msg->dst_sk;
-
memset(&info, 0, sizeof(info));
info.attrs = attrbuf;
+ rtnl_lock();
+ err = (*cmd->transcode)(cmd, trans_buf, msg);
+ if (err)
+ goto doit_out;
+
+ err = nla_parse_deprecated(attrbuf, tipc_genl_family.maxattr,
+ (const struct nlattr *)trans_buf->data,
+ trans_buf->len, NULL, NULL);
+ if (err)
+ goto doit_out;
+
+ doit_buf->sk = msg->dst_sk;
+
err = (*cmd->doit)(doit_buf, &info);
+doit_out:
+ rtnl_unlock();
kfree_skb(doit_buf);
-parse_out:
+attrbuf_out:
kfree(attrbuf);
trans_out:
kfree_skb(trans_buf);
@@ -327,7 +386,8 @@ static int tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
{
int err;
- if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
+ if (msg->req_type && (!msg->req_size ||
+ !TLV_CHECK_TYPE(msg->req, msg->req_type)))
return -EINVAL;
err = __tipc_nl_compat_doit(cmd, msg);
@@ -351,8 +411,8 @@ static int tipc_nl_compat_bearer_dump(struct tipc_nl_compat_msg *msg,
if (!attrs[TIPC_NLA_BEARER])
return -EINVAL;
- err = nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX,
- attrs[TIPC_NLA_BEARER], NULL, NULL);
+ err = nla_parse_nested_deprecated(bearer, TIPC_NLA_BEARER_MAX,
+ attrs[TIPC_NLA_BEARER], NULL, NULL);
if (err)
return err;
@@ -368,13 +428,23 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
struct nlattr *prop;
struct nlattr *bearer;
struct tipc_bearer_config *b;
+ int len;
b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
- bearer = nla_nest_start(skb, TIPC_NLA_BEARER);
+ bearer = nla_nest_start_noflag(skb, TIPC_NLA_BEARER);
if (!bearer)
return -EMSGSIZE;
+ len = TLV_GET_DATA_LEN(msg->req);
+ len -= offsetof(struct tipc_bearer_config, name);
+ if (len <= 0)
+ return -EINVAL;
+
+ len = min_t(int, len, TIPC_MAX_BEARER_NAME);
+ if (!string_is_terminated(b->name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
return -EMSGSIZE;
@@ -382,7 +452,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
return -EMSGSIZE;
if (ntohl(b->priority) <= TIPC_MAX_LINK_PRI) {
- prop = nla_nest_start(skb, TIPC_NLA_BEARER_PROP);
+ prop = nla_nest_start_noflag(skb, TIPC_NLA_BEARER_PROP);
if (!prop)
return -EMSGSIZE;
if (nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(b->priority)))
@@ -400,13 +470,22 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
{
char *name;
struct nlattr *bearer;
+ int len;
name = (char *)TLV_DATA(msg->req);
- bearer = nla_nest_start(skb, TIPC_NLA_BEARER);
+ bearer = nla_nest_start_noflag(skb, TIPC_NLA_BEARER);
if (!bearer)
return -EMSGSIZE;
+ len = TLV_GET_DATA_LEN(msg->req);
+ if (len <= 0)
+ return -EINVAL;
+
+ len = min_t(int, len, TIPC_MAX_BEARER_NAME);
+ if (!string_is_terminated(name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
return -EMSGSIZE;
@@ -467,37 +546,49 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
int err;
+ int len;
if (!attrs[TIPC_NLA_LINK])
return -EINVAL;
- err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK],
- NULL, NULL);
+ err = nla_parse_nested_deprecated(link, TIPC_NLA_LINK_MAX,
+ attrs[TIPC_NLA_LINK], NULL, NULL);
if (err)
return err;
if (!link[TIPC_NLA_LINK_PROP])
return -EINVAL;
- err = nla_parse_nested(prop, TIPC_NLA_PROP_MAX,
- link[TIPC_NLA_LINK_PROP], NULL, NULL);
+ err = nla_parse_nested_deprecated(prop, TIPC_NLA_PROP_MAX,
+ link[TIPC_NLA_LINK_PROP], NULL,
+ NULL);
if (err)
return err;
if (!link[TIPC_NLA_LINK_STATS])
return -EINVAL;
- err = nla_parse_nested(stats, TIPC_NLA_STATS_MAX,
- link[TIPC_NLA_LINK_STATS], NULL, NULL);
+ err = nla_parse_nested_deprecated(stats, TIPC_NLA_STATS_MAX,
+ link[TIPC_NLA_LINK_STATS], NULL,
+ NULL);
if (err)
return err;
name = (char *)TLV_DATA(msg->req);
+
+ len = TLV_GET_DATA_LEN(msg->req);
+ if (len <= 0)
+ return -EINVAL;
+
+ len = min_t(int, len, TIPC_MAX_LINK_NAME);
+ if (!string_is_terminated(name, len))
+ return -EINVAL;
+
if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
return 0;
tipc_tlv_sprintf(msg->rep, "\nLink <%s>\n",
- nla_data(link[TIPC_NLA_LINK_NAME]));
+ (char *)nla_data(link[TIPC_NLA_LINK_NAME]));
if (link[TIPC_NLA_LINK_BROADCAST]) {
__fill_bc_link_stat(msg, prop, stats);
@@ -597,14 +688,14 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
if (!attrs[TIPC_NLA_LINK])
return -EINVAL;
- err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK],
- NULL, NULL);
+ err = nla_parse_nested_deprecated(link, TIPC_NLA_LINK_MAX,
+ attrs[TIPC_NLA_LINK], NULL, NULL);
if (err)
return err;
- link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
+ link_info.dest = htonl(nla_get_flag(link[TIPC_NLA_LINK_DEST]));
link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
- nla_strlcpy(link_info.str, link[TIPC_NLA_LINK_NAME],
+ nla_strscpy(link_info.str, link[TIPC_NLA_LINK_NAME],
TIPC_MAX_LINK_NAME);
return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
@@ -636,14 +727,14 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
- media = nla_nest_start(skb, TIPC_NLA_MEDIA);
+ media = nla_nest_start_noflag(skb, TIPC_NLA_MEDIA);
if (!media)
return -EMSGSIZE;
if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
return -EMSGSIZE;
- prop = nla_nest_start(skb, TIPC_NLA_MEDIA_PROP);
+ prop = nla_nest_start_noflag(skb, TIPC_NLA_MEDIA_PROP);
if (!prop)
return -EMSGSIZE;
@@ -663,14 +754,14 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
- bearer = nla_nest_start(skb, TIPC_NLA_BEARER);
+ bearer = nla_nest_start_noflag(skb, TIPC_NLA_BEARER);
if (!bearer)
return -EMSGSIZE;
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
return -EMSGSIZE;
- prop = nla_nest_start(skb, TIPC_NLA_BEARER_PROP);
+ prop = nla_nest_start_noflag(skb, TIPC_NLA_BEARER_PROP);
if (!prop)
return -EMSGSIZE;
@@ -690,14 +781,14 @@ static int __tipc_nl_compat_link_set(struct sk_buff *skb,
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
- link = nla_nest_start(skb, TIPC_NLA_LINK);
+ link = nla_nest_start_noflag(skb, TIPC_NLA_LINK);
if (!link)
return -EMSGSIZE;
if (nla_put_string(skb, TIPC_NLA_LINK_NAME, lc->name))
return -EMSGSIZE;
- prop = nla_nest_start(skb, TIPC_NLA_LINK_PROP);
+ prop = nla_nest_start_noflag(skb, TIPC_NLA_LINK_PROP);
if (!prop)
return -EMSGSIZE;
@@ -715,18 +806,28 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
struct tipc_link_config *lc;
struct tipc_bearer *bearer;
struct tipc_media *media;
+ int len;
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+ len = TLV_GET_DATA_LEN(msg->req);
+ len -= offsetof(struct tipc_link_config, name);
+ if (len <= 0)
+ return -EINVAL;
+
+ len = min_t(int, len, TIPC_MAX_LINK_NAME);
+ if (!string_is_terminated(lc->name, len))
+ return -EINVAL;
+
media = tipc_media_find(lc->name);
if (media) {
- cmd->doit = &tipc_nl_media_set;
+ cmd->doit = &__tipc_nl_media_set;
return tipc_nl_compat_media_set(skb, msg);
}
bearer = tipc_bearer_find(msg->net, lc->name);
if (bearer) {
- cmd->doit = &tipc_nl_bearer_set;
+ cmd->doit = &__tipc_nl_bearer_set;
return tipc_nl_compat_bearer_set(skb, msg);
}
@@ -739,13 +840,22 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
{
char *name;
struct nlattr *link;
+ int len;
name = (char *)TLV_DATA(msg->req);
- link = nla_nest_start(skb, TIPC_NLA_LINK);
+ link = nla_nest_start_noflag(skb, TIPC_NLA_LINK);
if (!link)
return -EMSGSIZE;
+ len = TLV_GET_DATA_LEN(msg->req);
+ if (len <= 0)
+ return -EINVAL;
+
+ len = min_t(int, len, TIPC_MAX_LINK_NAME);
+ if (!string_is_terminated(name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
return -EMSGSIZE;
@@ -767,6 +877,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
};
ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
+ if (TLV_GET_DATA_LEN(msg->req) < (int)sizeof(struct tipc_name_table_query))
+ return -EINVAL;
depth = ntohl(ntq->depth);
@@ -794,16 +906,18 @@ static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg,
if (!attrs[TIPC_NLA_NAME_TABLE])
return -EINVAL;
- err = nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX,
- attrs[TIPC_NLA_NAME_TABLE], NULL, NULL);
+ err = nla_parse_nested_deprecated(nt, TIPC_NLA_NAME_TABLE_MAX,
+ attrs[TIPC_NLA_NAME_TABLE], NULL,
+ NULL);
if (err)
return err;
if (!nt[TIPC_NLA_NAME_TABLE_PUBL])
return -EINVAL;
- err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX,
- nt[TIPC_NLA_NAME_TABLE_PUBL], NULL, NULL);
+ err = nla_parse_nested_deprecated(publ, TIPC_NLA_PUBL_MAX,
+ nt[TIPC_NLA_NAME_TABLE_PUBL], NULL,
+ NULL);
if (err)
return err;
@@ -862,8 +976,8 @@ static int __tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg,
if (!attrs[TIPC_NLA_PUBL])
return -EINVAL;
- err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL],
- NULL, NULL);
+ err = nla_parse_nested_deprecated(publ, TIPC_NLA_PUBL_MAX,
+ attrs[TIPC_NLA_PUBL], NULL, NULL);
if (err)
return err;
@@ -893,8 +1007,12 @@ static int tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, u32 sock)
hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI,
TIPC_NL_PUBL_GET);
+ if (!hdr) {
+ kfree_skb(args);
+ return -EMSGSIZE;
+ }
- nest = nla_nest_start(args, TIPC_NLA_SOCK);
+ nest = nla_nest_start_noflag(args, TIPC_NLA_SOCK);
if (!nest) {
kfree_skb(args);
return -EMSGSIZE;
@@ -928,8 +1046,8 @@ static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg,
if (!attrs[TIPC_NLA_SOCK])
return -EINVAL;
- err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK],
- NULL, NULL);
+ err = nla_parse_nested_deprecated(sock, TIPC_NLA_SOCK_MAX,
+ attrs[TIPC_NLA_SOCK], NULL, NULL);
if (err)
return err;
@@ -940,8 +1058,12 @@ static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg,
u32 node;
struct nlattr *con[TIPC_NLA_CON_MAX + 1];
- nla_parse_nested(con, TIPC_NLA_CON_MAX,
- sock[TIPC_NLA_SOCK_CON], NULL, NULL);
+ err = nla_parse_nested_deprecated(con, TIPC_NLA_CON_MAX,
+ sock[TIPC_NLA_SOCK_CON],
+ NULL, NULL);
+
+ if (err)
+ return err;
node = nla_get_u32(con[TIPC_NLA_CON_NODE]);
tipc_tlv_sprintf(msg->rep, " connected to <%u.%u.%u:%u>",
@@ -977,8 +1099,8 @@ static int tipc_nl_compat_media_dump(struct tipc_nl_compat_msg *msg,
if (!attrs[TIPC_NLA_MEDIA])
return -EINVAL;
- err = nla_parse_nested(media, TIPC_NLA_MEDIA_MAX,
- attrs[TIPC_NLA_MEDIA], NULL, NULL);
+ err = nla_parse_nested_deprecated(media, TIPC_NLA_MEDIA_MAX,
+ attrs[TIPC_NLA_MEDIA], NULL, NULL);
if (err)
return err;
@@ -997,8 +1119,8 @@ static int tipc_nl_compat_node_dump(struct tipc_nl_compat_msg *msg,
if (!attrs[TIPC_NLA_NODE])
return -EINVAL;
- err = nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE],
- NULL, NULL);
+ err = nla_parse_nested_deprecated(node, TIPC_NLA_NODE_MAX,
+ attrs[TIPC_NLA_NODE], NULL, NULL);
if (err)
return err;
@@ -1018,7 +1140,7 @@ static int tipc_nl_compat_net_set(struct tipc_nl_compat_cmd_doit *cmd,
val = ntohl(*(__be32 *)TLV_DATA(msg->req));
- net = nla_nest_start(skb, TIPC_NLA_NET);
+ net = nla_nest_start_noflag(skb, TIPC_NLA_NET);
if (!net)
return -EMSGSIZE;
@@ -1044,8 +1166,8 @@ static int tipc_nl_compat_net_dump(struct tipc_nl_compat_msg *msg,
if (!attrs[TIPC_NLA_NET])
return -EINVAL;
- err = nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET],
- NULL, NULL);
+ err = nla_parse_nested_deprecated(net, TIPC_NLA_NET_MAX,
+ attrs[TIPC_NLA_NET], NULL, NULL);
if (err)
return err;
@@ -1087,12 +1209,12 @@ static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg)
return tipc_nl_compat_dumpit(&dump, msg);
case TIPC_CMD_ENABLE_BEARER:
msg->req_type = TIPC_TLV_BEARER_CONFIG;
- doit.doit = tipc_nl_bearer_enable;
+ doit.doit = __tipc_nl_bearer_enable;
doit.transcode = tipc_nl_compat_bearer_enable;
return tipc_nl_compat_doit(&doit, msg);
case TIPC_CMD_DISABLE_BEARER:
msg->req_type = TIPC_TLV_BEARER_NAME;
- doit.doit = tipc_nl_bearer_disable;
+ doit.doit = __tipc_nl_bearer_disable;
doit.transcode = tipc_nl_compat_bearer_disable;
return tipc_nl_compat_doit(&doit, msg);
case TIPC_CMD_SHOW_LINK_STATS:
@@ -1146,12 +1268,12 @@ static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg)
return tipc_nl_compat_dumpit(&dump, msg);
case TIPC_CMD_SET_NODE_ADDR:
msg->req_type = TIPC_TLV_NET_ADDR;
- doit.doit = tipc_nl_net_set;
+ doit.doit = __tipc_nl_net_set;
doit.transcode = tipc_nl_compat_net_set;
return tipc_nl_compat_doit(&doit, msg);
case TIPC_CMD_SET_NETID:
msg->req_type = TIPC_TLV_UNSIGNED;
- doit.doit = tipc_nl_net_set;
+ doit.doit = __tipc_nl_net_set;
doit.transcode = tipc_nl_compat_net_set;
return tipc_nl_compat_doit(&doit, msg);
case TIPC_CMD_GET_NETID:
@@ -1173,7 +1295,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
struct tipc_nl_compat_msg msg;
struct nlmsghdr *req_nlh;
struct nlmsghdr *rep_nlh;
- struct tipc_genlmsghdr *req_userhdr = info->userhdr;
+ struct tipc_genlmsghdr *req_userhdr = genl_info_userhdr(info);
memset(&msg, 0, sizeof(msg));
@@ -1189,8 +1311,8 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
goto send;
}
- len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
- if (len && !TLV_OK(msg.req, len)) {
+ msg.req_size = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
+ if (msg.req_size && !TLV_OK(msg.req, msg.req_size)) {
msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
err = -EOPNOTSUPP;
goto send;
@@ -1215,9 +1337,10 @@ send:
return err;
}
-static struct genl_ops tipc_genl_compat_ops[] = {
+static const struct genl_small_ops tipc_genl_compat_ops[] = {
{
.cmd = TIPC_GENL_CMD,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = tipc_nl_compat_recv,
},
};
@@ -1229,8 +1352,9 @@ static struct genl_family tipc_genl_compat_family __ro_after_init = {
.maxattr = 0,
.netnsok = true,
.module = THIS_MODULE,
- .ops = tipc_genl_compat_ops,
- .n_ops = ARRAY_SIZE(tipc_genl_compat_ops),
+ .small_ops = tipc_genl_compat_ops,
+ .n_small_ops = ARRAY_SIZE(tipc_genl_compat_ops),
+ .resv_start_op = TIPC_GENL_CMD + 1,
};
int __init tipc_netlink_compat_start(void)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index aeef8011ac7d..a07fb073368c 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -43,8 +43,11 @@
#include "monitor.h"
#include "discover.h"
#include "netlink.h"
+#include "trace.h"
+#include "crypto.h"
#define INVALID_NODE_SIG 0x10000
+#define NODE_CLEANUP_AFTER 300000
/* Flags used to take different actions according to flag type
* TIPC_NOTIFY_NODE_DOWN: notify node is down
@@ -72,21 +75,24 @@ struct tipc_bclink_entry {
struct sk_buff_head arrvq;
struct sk_buff_head inputq2;
struct sk_buff_head namedq;
+ u16 named_rcv_nxt;
+ bool named_open;
};
/**
* struct tipc_node - TIPC node structure
* @addr: network address of node
- * @ref: reference counter to node object
+ * @kref: reference counter to node object
* @lock: rwlock governing access to structure
* @net: the applicable net namespace
* @hash: links to adjacent nodes in unsorted hash chain
- * @inputq: pointer to input queue containing messages for msg event
- * @namedq: pointer to name table input queue with name table messages
* @active_links: bearer ids of active links, used as index into links[] array
* @links: array containing references to all links to node
+ * @bc_entry: broadcast link entry
* @action_flags: bit mask of different types of node actions
* @state: connectivity state vs peer node
+ * @preliminary: a preliminary node or not
+ * @failover_sent: failover sent or not
* @sync_point: sequence number where synch/failover is finished
* @list: links to adjacent nodes in sorted list of cluster's nodes
* @working_links: number of working links to node (both active and standby)
@@ -94,8 +100,17 @@ struct tipc_bclink_entry {
* @capabilities: bitmap, indicating peer node's functional capabilities
* @signature: node instance identifier
* @link_id: local and remote bearer ids of changing link, if any
+ * @peer_id: 128-bit ID of peer
+ * @peer_id_string: ID string of peer
* @publ_list: list of publications
+ * @conn_sks: list of connections (FIXME)
+ * @timer: node's keepalive timer
+ * @keepalive_intv: keepalive interval in milliseconds
* @rcu: rcu struct for tipc_node
+ * @delete_at: indicates the time for deleting a down node
+ * @peer_net: peer's net namespace
+ * @peer_hash_mix: hash for this peer (FIXME)
+ * @crypto_rx: RX crypto handler
*/
struct tipc_node {
u32 addr;
@@ -109,17 +124,27 @@ struct tipc_node {
int action_flags;
struct list_head list;
int state;
+ bool preliminary;
+ bool failover_sent;
u16 sync_point;
int link_cnt;
u16 working_links;
u16 capabilities;
u32 signature;
u32 link_id;
+ u8 peer_id[16];
+ char peer_id_string[NODE_ID_STR_LEN];
struct list_head publ_list;
struct list_head conn_sks;
unsigned long keepalive_intv;
struct timer_list timer;
struct rcu_head rcu;
+ unsigned long delete_at;
+ struct net *peer_net;
+ u32 peer_hash_mix;
+#ifdef CONFIG_TIPC_CRYPTO
+ struct tipc_crypto *crypto_rx;
+#endif
};
/* Node FSM states and events:
@@ -153,11 +178,12 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
bool delete);
static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
static void tipc_node_delete(struct tipc_node *node);
-static void tipc_node_timeout(unsigned long data);
+static void tipc_node_timeout(struct timer_list *t);
static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
-static void tipc_node_put(struct tipc_node *node);
-static bool tipc_node_is_up(struct tipc_node *n);
+static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
+static bool node_is_up(struct tipc_node *n);
+static void tipc_node_delete_from_list(struct tipc_node *node);
struct tipc_sock_conn {
u32 port;
@@ -176,7 +202,7 @@ static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
return n->links[bearer_id].link;
}
-int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
+int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
{
struct tipc_node *n;
int bearer_id;
@@ -186,6 +212,14 @@ int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
if (unlikely(!n))
return mtu;
+ /* Allow MAX_MSG_SIZE when building connection oriented message
+ * if they are in the same core network
+ */
+ if (n->peer_net && connected) {
+ tipc_node_put(n);
+ return mtu;
+ }
+
bearer_id = n->active_links[sel & 1];
if (likely(bearer_id != INVALID_BEARER_ID))
mtu = n->links[bearer_id].mtu;
@@ -193,6 +227,27 @@ int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
return mtu;
}
+bool tipc_node_get_id(struct net *net, u32 addr, u8 *id)
+{
+ u8 *own_id = tipc_own_id(net);
+ struct tipc_node *n;
+
+ if (!own_id)
+ return true;
+
+ if (addr == tipc_own_addr(net)) {
+ memcpy(id, own_id, TIPC_NODEID_LEN);
+ return true;
+ }
+ n = tipc_node_find(net, addr);
+ if (!n)
+ return false;
+
+ memcpy(id, &n->peer_id, TIPC_NODEID_LEN);
+ tipc_node_put(n);
+ return true;
+}
+
u16 tipc_node_get_capabilities(struct net *net, u32 addr)
{
struct tipc_node *n;
@@ -206,20 +261,65 @@ u16 tipc_node_get_capabilities(struct net *net, u32 addr)
return caps;
}
+u32 tipc_node_get_addr(struct tipc_node *node)
+{
+ return (node) ? node->addr : 0;
+}
+
+char *tipc_node_get_id_str(struct tipc_node *node)
+{
+ return node->peer_id_string;
+}
+
+#ifdef CONFIG_TIPC_CRYPTO
+/**
+ * tipc_node_crypto_rx - Retrieve crypto RX handle from node
+ * @__n: target tipc_node
+ * Note: node ref counter must be held first!
+ */
+struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n)
+{
+ return (__n) ? __n->crypto_rx : NULL;
+}
+
+struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
+{
+ return container_of(pos, struct tipc_node, list)->crypto_rx;
+}
+
+struct tipc_crypto *tipc_node_crypto_rx_by_addr(struct net *net, u32 addr)
+{
+ struct tipc_node *n;
+
+ n = tipc_node_find(net, addr);
+ return (n) ? n->crypto_rx : NULL;
+}
+#endif
+
+static void tipc_node_free(struct rcu_head *rp)
+{
+ struct tipc_node *n = container_of(rp, struct tipc_node, rcu);
+
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_stop(&n->crypto_rx);
+#endif
+ kfree(n);
+}
+
static void tipc_node_kref_release(struct kref *kref)
{
struct tipc_node *n = container_of(kref, struct tipc_node, kref);
kfree(n->bc_entry.link);
- kfree_rcu(n, rcu);
+ call_rcu(&n->rcu, tipc_node_free);
}
-static void tipc_node_put(struct tipc_node *node)
+void tipc_node_put(struct tipc_node *node)
{
kref_put(&node->kref, tipc_node_kref_release);
}
-static void tipc_node_get(struct tipc_node *node)
+void tipc_node_get(struct tipc_node *node)
{
kref_get(&node->kref);
}
@@ -233,12 +333,9 @@ static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
struct tipc_node *node;
unsigned int thash = tipc_hashfn(addr);
- if (unlikely(!in_own_cluster_exact(net, addr)))
- return NULL;
-
rcu_read_lock();
hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
- if (node->addr != addr)
+ if (node->addr != addr || node->preliminary)
continue;
if (!kref_get_unless_zero(&node->kref))
node = NULL;
@@ -248,43 +345,75 @@ static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
return node;
}
+/* tipc_node_find_by_id - locate specified node object by its 128-bit id
+ * Note: this function is called only when a discovery request failed
+ * to find the node by its 32-bit id, and is not time critical
+ */
+static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_node *n;
+ bool found = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(n, &tn->node_list, list) {
+ read_lock_bh(&n->lock);
+ if (!memcmp(id, n->peer_id, 16) &&
+ kref_get_unless_zero(&n->kref))
+ found = true;
+ read_unlock_bh(&n->lock);
+ if (found)
+ break;
+ }
+ rcu_read_unlock();
+ return found ? n : NULL;
+}
+
static void tipc_node_read_lock(struct tipc_node *n)
+ __acquires(n->lock)
{
read_lock_bh(&n->lock);
}
static void tipc_node_read_unlock(struct tipc_node *n)
+ __releases(n->lock)
{
read_unlock_bh(&n->lock);
}
static void tipc_node_write_lock(struct tipc_node *n)
+ __acquires(n->lock)
{
write_lock_bh(&n->lock);
}
static void tipc_node_write_unlock_fast(struct tipc_node *n)
+ __releases(n->lock)
{
write_unlock_bh(&n->lock);
}
static void tipc_node_write_unlock(struct tipc_node *n)
+ __releases(n->lock)
{
+ struct tipc_socket_addr sk;
struct net *net = n->net;
- u32 addr = 0;
u32 flags = n->action_flags;
- u32 link_id = 0;
- u32 bearer_id;
struct list_head *publ_list;
+ struct tipc_uaddr ua;
+ u32 bearer_id, node;
if (likely(!flags)) {
write_unlock_bh(&n->lock);
return;
}
- addr = n->addr;
- link_id = n->link_id;
- bearer_id = link_id & 0xffff;
+ tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE,
+ TIPC_LINK_STATE, n->addr, n->addr);
+ sk.ref = n->link_id;
+ sk.node = tipc_own_addr(net);
+ node = n->addr;
+ bearer_id = n->link_id & 0xffff;
publ_list = &n->publ_list;
n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
@@ -293,34 +422,117 @@ static void tipc_node_write_unlock(struct tipc_node *n)
write_unlock_bh(&n->lock);
if (flags & TIPC_NOTIFY_NODE_DOWN)
- tipc_publ_notify(net, publ_list, addr);
+ tipc_publ_notify(net, publ_list, node, n->capabilities);
if (flags & TIPC_NOTIFY_NODE_UP)
- tipc_named_node_up(net, addr);
+ tipc_named_node_up(net, node, n->capabilities);
if (flags & TIPC_NOTIFY_LINK_UP) {
- tipc_mon_peer_up(net, addr, bearer_id);
- tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
- TIPC_NODE_SCOPE, link_id, addr);
+ tipc_mon_peer_up(net, node, bearer_id);
+ tipc_nametbl_publish(net, &ua, &sk, sk.ref);
}
if (flags & TIPC_NOTIFY_LINK_DOWN) {
- tipc_mon_peer_down(net, addr, bearer_id);
- tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
- link_id, addr);
+ tipc_mon_peer_down(net, node, bearer_id);
+ tipc_nametbl_withdraw(net, &ua, &sk, sk.ref);
}
}
-struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
+static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
+{
+ int net_id = tipc_netid(n->net);
+ struct tipc_net *tn_peer;
+ struct net *tmp;
+ u32 hash_chk;
+
+ if (n->peer_net)
+ return;
+
+ for_each_net_rcu(tmp) {
+ tn_peer = tipc_net(tmp);
+ if (!tn_peer)
+ continue;
+ /* Integrity checking whether node exists in namespace or not */
+ if (tn_peer->net_id != net_id)
+ continue;
+ if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
+ continue;
+ hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
+ if (hash_mixes ^ hash_chk)
+ continue;
+ n->peer_net = tmp;
+ n->peer_hash_mix = hash_mixes;
+ break;
+ }
+}
+
+struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
+ u16 capabilities, u32 hash_mixes,
+ bool preliminary)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_link *l, *snd_l = tipc_bc_sndlink(net);
struct tipc_node *n, *temp_node;
+ unsigned long intv;
+ int bearer_id;
int i;
spin_lock_bh(&tn->node_list_lock);
- n = tipc_node_find(net, addr);
+ n = tipc_node_find(net, addr) ?:
+ tipc_node_find_by_id(net, peer_id);
if (n) {
+ if (!n->preliminary)
+ goto update;
+ if (preliminary)
+ goto exit;
+ /* A preliminary node becomes "real" now, refresh its data */
+ tipc_node_write_lock(n);
+ if (!tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX,
+ tipc_link_min_win(snd_l), tipc_link_max_win(snd_l),
+ n->capabilities, &n->bc_entry.inputq1,
+ &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) {
+ pr_warn("Broadcast rcv link refresh failed, no memory\n");
+ tipc_node_write_unlock_fast(n);
+ tipc_node_put(n);
+ n = NULL;
+ goto exit;
+ }
+ n->preliminary = false;
+ n->addr = addr;
+ hlist_del_rcu(&n->hash);
+ hlist_add_head_rcu(&n->hash,
+ &tn->node_htable[tipc_hashfn(addr)]);
+ list_del_rcu(&n->list);
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ if (n->addr < temp_node->addr)
+ break;
+ }
+ list_add_tail_rcu(&n->list, &temp_node->list);
+ tipc_node_write_unlock_fast(n);
+
+update:
+ if (n->peer_hash_mix ^ hash_mixes)
+ tipc_node_assign_peer_net(n, hash_mixes);
+ if (n->capabilities == capabilities)
+ goto exit;
/* Same node may come back with new capabilities */
+ tipc_node_write_lock(n);
n->capabilities = capabilities;
+ for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+ l = n->links[bearer_id].link;
+ if (l)
+ tipc_link_update_caps(l, capabilities);
+ }
+ tipc_node_write_unlock_fast(n);
+
+ /* Calculate cluster capabilities */
+ tn->capabilities = TIPC_NODE_CAPABILITIES;
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ tn->capabilities &= temp_node->capabilities;
+ }
+
+ tipc_bcast_toggle_rcast(net,
+ (tn->capabilities & TIPC_BCAST_RCAST));
+
goto exit;
}
n = kzalloc(sizeof(*n), GFP_ATOMIC);
@@ -328,8 +540,23 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
pr_warn("Node creation failed, no memory\n");
goto exit;
}
+ tipc_nodeid2string(n->peer_id_string, peer_id);
+#ifdef CONFIG_TIPC_CRYPTO
+ if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) {
+ pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string);
+ kfree(n);
+ n = NULL;
+ goto exit;
+ }
+#endif
n->addr = addr;
+ n->preliminary = preliminary;
+ memcpy(&n->peer_id, peer_id, 16);
n->net = net;
+ n->peer_net = NULL;
+ n->peer_hash_mix = 0;
+ /* Assign kernel local namespace if exists */
+ tipc_node_assign_peer_net(n, hash_mixes);
n->capabilities = capabilities;
kref_init(&n->kref);
rwlock_init(&n->lock);
@@ -344,31 +571,40 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
for (i = 0; i < MAX_BEARERS; i++)
spin_lock_init(&n->links[i].lock);
n->state = SELF_DOWN_PEER_LEAVING;
+ n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
n->signature = INVALID_NODE_SIG;
n->active_links[0] = INVALID_BEARER_ID;
n->active_links[1] = INVALID_BEARER_ID;
- if (!tipc_link_bc_create(net, tipc_own_addr(net), n->addr,
- U16_MAX,
- tipc_link_window(tipc_bc_sndlink(net)),
- n->capabilities,
- &n->bc_entry.inputq1,
- &n->bc_entry.namedq,
- tipc_bc_sndlink(net),
- &n->bc_entry.link)) {
+ if (!preliminary &&
+ !tipc_link_bc_create(net, tipc_own_addr(net), addr, peer_id, U16_MAX,
+ tipc_link_min_win(snd_l), tipc_link_max_win(snd_l),
+ n->capabilities, &n->bc_entry.inputq1,
+ &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) {
pr_warn("Broadcast rcv link creation failed, no memory\n");
- kfree(n);
+ tipc_node_put(n);
n = NULL;
goto exit;
}
tipc_node_get(n);
- setup_timer(&n->timer, tipc_node_timeout, (unsigned long)n);
- n->keepalive_intv = U32_MAX;
+ timer_setup(&n->timer, tipc_node_timeout, 0);
+ /* Start a slow timer anyway, crypto needs it */
+ n->keepalive_intv = 10000;
+ intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
+ if (!mod_timer(&n->timer, intv))
+ tipc_node_get(n);
hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
if (n->addr < temp_node->addr)
break;
}
list_add_tail_rcu(&n->list, &temp_node->list);
+ /* Calculate cluster capabilities */
+ tn->capabilities = TIPC_NODE_CAPABILITIES;
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ tn->capabilities &= temp_node->capabilities;
+ }
+ tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
+ trace_tipc_node_create(n, true, " ");
exit:
spin_unlock_bh(&tn->node_list_lock);
return n;
@@ -387,13 +623,22 @@ static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
}
-static void tipc_node_delete(struct tipc_node *node)
+static void tipc_node_delete_from_list(struct tipc_node *node)
{
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_key_flush(node->crypto_rx);
+#endif
list_del_rcu(&node->list);
hlist_del_rcu(&node->hash);
tipc_node_put(node);
+}
- del_timer_sync(&node->timer);
+static void tipc_node_delete(struct tipc_node *node)
+{
+ trace_tipc_node_delete(node, true, " ");
+ tipc_node_delete_from_list(node);
+
+ timer_delete_sync(&node->timer);
tipc_node_put(node);
}
@@ -498,30 +743,102 @@ void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
tipc_node_put(node);
}
+static void tipc_node_clear_links(struct tipc_node *node)
+{
+ int i;
+
+ for (i = 0; i < MAX_BEARERS; i++) {
+ struct tipc_link_entry *le = &node->links[i];
+
+ if (le->link) {
+ kfree(le->link);
+ le->link = NULL;
+ node->link_cnt--;
+ }
+ }
+}
+
+/* tipc_node_cleanup - delete nodes that does not
+ * have active links for NODE_CLEANUP_AFTER time
+ */
+static bool tipc_node_cleanup(struct tipc_node *peer)
+{
+ struct tipc_node *temp_node;
+ struct tipc_net *tn = tipc_net(peer->net);
+ bool deleted = false;
+
+ /* If lock held by tipc_node_stop() the node will be deleted anyway */
+ if (!spin_trylock_bh(&tn->node_list_lock))
+ return false;
+
+ tipc_node_write_lock(peer);
+
+ if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
+ tipc_node_clear_links(peer);
+ tipc_node_delete_from_list(peer);
+ deleted = true;
+ }
+ tipc_node_write_unlock(peer);
+
+ if (!deleted) {
+ spin_unlock_bh(&tn->node_list_lock);
+ return deleted;
+ }
+
+ /* Calculate cluster capabilities */
+ tn->capabilities = TIPC_NODE_CAPABILITIES;
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ tn->capabilities &= temp_node->capabilities;
+ }
+ tipc_bcast_toggle_rcast(peer->net,
+ (tn->capabilities & TIPC_BCAST_RCAST));
+ spin_unlock_bh(&tn->node_list_lock);
+ return deleted;
+}
+
/* tipc_node_timeout - handle expiration of node timer
*/
-static void tipc_node_timeout(unsigned long data)
+static void tipc_node_timeout(struct timer_list *t)
{
- struct tipc_node *n = (struct tipc_node *)data;
+ struct tipc_node *n = timer_container_of(n, t, timer);
struct tipc_link_entry *le;
struct sk_buff_head xmitq;
+ int remains = n->link_cnt;
int bearer_id;
int rc = 0;
+ trace_tipc_node_timeout(n, false, " ");
+ if (!node_is_up(n) && tipc_node_cleanup(n)) {
+ /*Removing the reference of Timer*/
+ tipc_node_put(n);
+ return;
+ }
+
+#ifdef CONFIG_TIPC_CRYPTO
+ /* Take any crypto key related actions first */
+ tipc_crypto_timeout(n->crypto_rx);
+#endif
__skb_queue_head_init(&xmitq);
- for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+ /* Initial node interval to value larger (10 seconds), then it will be
+ * recalculated with link lowest tolerance
+ */
+ tipc_node_read_lock(n);
+ n->keepalive_intv = 10000;
+ tipc_node_read_unlock(n);
+ for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
tipc_node_read_lock(n);
le = &n->links[bearer_id];
- spin_lock_bh(&le->lock);
if (le->link) {
+ spin_lock_bh(&le->lock);
/* Link tolerance may change asynchronously: */
tipc_node_calculate_timer(n, le->link);
rc = tipc_link_timeout(le->link, &xmitq);
+ spin_unlock_bh(&le->lock);
+ remains--;
}
- spin_unlock_bh(&le->lock);
tipc_node_read_unlock(n);
- tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr);
+ tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n);
if (rc & TIPC_LINK_DOWN_EVT)
tipc_node_link_down(n, bearer_id, false);
}
@@ -530,6 +847,9 @@ static void tipc_node_timeout(unsigned long data)
/**
* __tipc_node_link_up - handle addition of link
+ * @n: target tipc_node
+ * @bearer_id: id of the bearer
+ * @xmitq: queue for messages to be xmited on
* Node lock must be held by caller
* Link becomes active (alone or shared) or standby, depending on its priority.
*/
@@ -553,13 +873,14 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
n->link_id = tipc_link_id(nl);
/* Leave room for tunnel header when returning 'mtu' to users: */
- n->links[bearer_id].mtu = tipc_link_mtu(nl) - INT_H_SIZE;
+ n->links[bearer_id].mtu = tipc_link_mss(nl);
tipc_bearer_add_dest(n->net, bearer_id, n->addr);
tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
pr_debug("Established link <%s> on network plane %c\n",
tipc_link_name(nl), tipc_link_plane(nl));
+ trace_tipc_node_link_up(n, true, " ");
/* Ensure that a STATE message goes first */
tipc_link_build_state_msg(nl, xmitq);
@@ -595,6 +916,9 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
/**
* tipc_node_link_up - handle addition of link
+ * @n: target tipc_node
+ * @bearer_id: id of the bearer
+ * @xmitq: queue for messages to be xmited on
*
* Link becomes active (alone or shared) or standby, depending on its priority.
*/
@@ -606,12 +930,60 @@ static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
tipc_node_write_lock(n);
__tipc_node_link_up(n, bearer_id, xmitq);
maddr = &n->links[bearer_id].maddr;
- tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr);
+ tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n);
tipc_node_write_unlock(n);
}
/**
+ * tipc_node_link_failover() - start failover in case "half-failover"
+ *
+ * This function is only called in a very special situation where link
+ * failover can be already started on peer node but not on this node.
+ * This can happen when e.g.::
+ *
+ * 1. Both links <1A-2A>, <1B-2B> down
+ * 2. Link endpoint 2A up, but 1A still down (e.g. due to network
+ * disturbance, wrong session, etc.)
+ * 3. Link <1B-2B> up
+ * 4. Link endpoint 2A down (e.g. due to link tolerance timeout)
+ * 5. Node 2 starts failover onto link <1B-2B>
+ *
+ * ==> Node 1 does never start link/node failover!
+ *
+ * @n: tipc node structure
+ * @l: link peer endpoint failingover (- can be NULL)
+ * @tnl: tunnel link
+ * @xmitq: queue for messages to be xmited on tnl link later
+ */
+static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l,
+ struct tipc_link *tnl,
+ struct sk_buff_head *xmitq)
+{
+ /* Avoid to be "self-failover" that can never end */
+ if (!tipc_link_is_up(tnl))
+ return;
+
+ /* Don't rush, failure link may be in the process of resetting */
+ if (l && !tipc_link_is_reset(l))
+ return;
+
+ tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
+ tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
+
+ n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
+ tipc_link_failover_prepare(l, tnl, xmitq);
+
+ if (l)
+ tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
+ tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
+}
+
+/**
* __tipc_node_link_down - handle loss of link
+ * @n: target tipc_node
+ * @bearer_id: id of the bearer
+ * @xmitq: queue for messages to be xmited on
+ * @maddr: output media address of the bearer
*/
static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
struct sk_buff_head *xmitq,
@@ -657,10 +1029,11 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
*slot1 = i;
}
- if (!tipc_node_is_up(n)) {
+ if (!node_is_up(n)) {
if (tipc_link_peer_is_down(l))
tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
+ trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!");
tipc_link_fsm_evt(l, LINK_RESET_EVT);
tipc_link_reset(l);
tipc_link_build_reset_msg(l, xmitq);
@@ -678,6 +1051,7 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
+ trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!");
tipc_link_reset(l);
tipc_link_fsm_evt(l, LINK_RESET_EVT);
tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
@@ -688,10 +1062,10 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
{
struct tipc_link_entry *le = &n->links[bearer_id];
+ struct tipc_media_addr *maddr = NULL;
struct tipc_link *l = le->link;
- struct tipc_media_addr *maddr;
- struct sk_buff_head xmitq;
int old_bearer_id = bearer_id;
+ struct sk_buff_head xmitq;
if (!l)
return;
@@ -701,30 +1075,99 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
tipc_node_write_lock(n);
if (!tipc_link_is_establishing(l)) {
__tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
- if (delete) {
- kfree(l);
- le->link = NULL;
- n->link_cnt--;
- }
} else {
/* Defuse pending tipc_node_link_up() */
+ tipc_link_reset(l);
tipc_link_fsm_evt(l, LINK_RESET_EVT);
}
+ if (delete) {
+ kfree(l);
+ le->link = NULL;
+ n->link_cnt--;
+ }
+ trace_tipc_node_link_down(n, true, "node link down or deleted!");
tipc_node_write_unlock(n);
if (delete)
tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
- tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
+ if (!skb_queue_empty(&xmitq))
+ tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n);
tipc_sk_rcv(n->net, &le->inputq);
}
-static bool tipc_node_is_up(struct tipc_node *n)
+static bool node_is_up(struct tipc_node *n)
{
return n->active_links[0] != INVALID_BEARER_ID;
}
-void tipc_node_check_dest(struct net *net, u32 onode,
- struct tipc_bearer *b,
- u16 capabilities, u32 signature,
+bool tipc_node_is_up(struct net *net, u32 addr)
+{
+ struct tipc_node *n;
+ bool retval = false;
+
+ if (in_own_node(net, addr))
+ return true;
+
+ n = tipc_node_find(net, addr);
+ if (!n)
+ return false;
+ retval = node_is_up(n);
+ tipc_node_put(n);
+ return retval;
+}
+
+static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
+{
+ struct tipc_node *n;
+
+ addr ^= tipc_net(net)->random;
+ while ((n = tipc_node_find(net, addr))) {
+ tipc_node_put(n);
+ addr++;
+ }
+ return addr;
+}
+
+/* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
+ * Returns suggested address if any, otherwise 0
+ */
+u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
+{
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_node *n;
+ bool preliminary;
+ u32 sugg_addr;
+
+ /* Suggest new address if some other peer is using this one */
+ n = tipc_node_find(net, addr);
+ if (n) {
+ if (!memcmp(n->peer_id, id, NODE_ID_LEN))
+ addr = 0;
+ tipc_node_put(n);
+ if (!addr)
+ return 0;
+ return tipc_node_suggest_addr(net, addr);
+ }
+
+ /* Suggest previously used address if peer is known */
+ n = tipc_node_find_by_id(net, id);
+ if (n) {
+ sugg_addr = n->addr;
+ preliminary = n->preliminary;
+ tipc_node_put(n);
+ if (!preliminary)
+ return sugg_addr;
+ }
+
+ /* Even this node may be in conflict */
+ if (tn->trial_addr == addr)
+ return tipc_node_suggest_addr(net, addr);
+
+ return 0;
+}
+
+void tipc_node_check_dest(struct net *net, u32 addr,
+ u8 *peer_id, struct tipc_bearer *b,
+ u16 capabilities, u32 signature, u32 hash_mixes,
struct tipc_media_addr *maddr,
bool *respond, bool *dupl_addr)
{
@@ -734,15 +1177,18 @@ void tipc_node_check_dest(struct net *net, u32 onode,
bool addr_match = false;
bool sign_match = false;
bool link_up = false;
+ bool link_is_reset = false;
bool accept_addr = false;
- bool reset = true;
+ bool reset = false;
char *if_name;
unsigned long intv;
+ u16 session;
*dupl_addr = false;
*respond = false;
- n = tipc_node_create(net, onode, capabilities);
+ n = tipc_node_create(net, addr, peer_id, capabilities, hash_mixes,
+ false);
if (!n)
return;
@@ -753,14 +1199,17 @@ void tipc_node_check_dest(struct net *net, u32 onode,
/* Prepare to validate requesting node's signature and media address */
l = le->link;
link_up = l && tipc_link_is_up(l);
+ link_is_reset = l && tipc_link_is_reset(l);
addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
sign_match = (signature == n->signature);
/* These three flags give us eight permutations: */
if (sign_match && addr_match && link_up) {
- /* All is fine. Do nothing. */
- reset = false;
+ /* All is fine. Ignore requests. */
+ /* Peer node is not a container/local namespace */
+ if (!n->peer_hash_mix)
+ n->peer_hash_mix = hash_mixes;
} else if (sign_match && addr_match && !link_up) {
/* Respond. The link will come up in due time */
*respond = true;
@@ -768,7 +1217,7 @@ void tipc_node_check_dest(struct net *net, u32 onode,
/* Peer has changed i/f address without rebooting.
* If so, the link will reset soon, and the next
* discovery will be accepted. So we can ignore it.
- * It may also be an cloned or malicious peer having
+ * It may also be a cloned or malicious peer having
* chosen the same node address and signature as an
* existing one.
* Ignore requests until the link goes down, if ever.
@@ -782,6 +1231,7 @@ void tipc_node_check_dest(struct net *net, u32 onode,
*/
accept_addr = true;
*respond = true;
+ reset = true;
} else if (!sign_match && addr_match && link_up) {
/* Peer node rebooted. Two possibilities:
* - Delayed re-discovery; this link endpoint has already
@@ -813,6 +1263,7 @@ void tipc_node_check_dest(struct net *net, u32 onode,
n->signature = signature;
accept_addr = true;
*respond = true;
+ reset = true;
}
if (!accept_addr)
@@ -820,15 +1271,15 @@ void tipc_node_check_dest(struct net *net, u32 onode,
/* Now create new link if not already existing */
if (!l) {
- if (n->link_cnt == 2) {
- pr_warn("Cannot establish 3rd link to %x\n", n->addr);
+ if (n->link_cnt == 2)
goto exit;
- }
+
if_name = strchr(b->name, ':') + 1;
+ get_random_bytes(&session, sizeof(u16));
if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
b->net_plane, b->mtu, b->priority,
- b->window, mod(tipc_net(net)->random),
- tipc_own_addr(net), onode,
+ b->min_win, b->max_win, session,
+ tipc_own_addr(net), addr, peer_id,
n->capabilities,
tipc_bc_sndlink(n->net), n->bc_entry.link,
&le->inputq,
@@ -836,10 +1287,12 @@ void tipc_node_check_dest(struct net *net, u32 onode,
*respond = false;
goto exit;
}
+ trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!");
tipc_link_reset(l);
tipc_link_fsm_evt(l, LINK_RESET_EVT);
if (n->state == NODE_FAILINGOVER)
tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
+ link_is_reset = tipc_link_is_reset(l);
le->link = l;
n->link_cnt++;
tipc_node_calculate_timer(n, l);
@@ -852,7 +1305,7 @@ void tipc_node_check_dest(struct net *net, u32 onode,
memcpy(&le->maddr, maddr, sizeof(*maddr));
exit:
tipc_node_write_unlock(n);
- if (reset && l && !tipc_link_is_reset(l))
+ if (reset && !link_is_reset)
tipc_node_link_down(n, b->identity, false);
tipc_node_put(n);
}
@@ -871,12 +1324,11 @@ void tipc_node_delete_links(struct net *net, int bearer_id)
static void tipc_node_reset_links(struct tipc_node *n)
{
- char addr_string[16];
int i;
- pr_warn("Resetting all links to %s\n",
- tipc_addr_string_fill(addr_string, n->addr));
+ pr_warn("Resetting all links to %x\n", n->addr);
+ trace_tipc_node_reset_links(n, true, " ");
for (i = 0; i < MAX_BEARERS; i++) {
tipc_node_link_down(n, i, false);
}
@@ -1052,28 +1504,31 @@ static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
pr_err("Unknown node fsm state %x\n", state);
break;
}
+ trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
n->state = state;
return;
illegal_evt:
pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
+ trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
}
static void node_lost_contact(struct tipc_node *n,
struct sk_buff_head *inputq)
{
- char addr_string[16];
struct tipc_sock_conn *conn, *safe;
struct tipc_link *l;
struct list_head *conns = &n->conn_sks;
struct sk_buff *skb;
uint i;
- pr_debug("Lost contact with %s\n",
- tipc_addr_string_fill(addr_string, n->addr));
+ pr_debug("Lost contact with %x\n", n->addr);
+ n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
+ trace_tipc_node_lost_contact(n, true, " ");
/* Clean up broadcast state */
tipc_bcast_remove_peer(n->net, n->bc_entry.link);
+ skb_queue_purge(&n->bc_entry.namedq);
/* Abort any ongoing link failover */
for (i = 0; i < MAX_BEARERS; i++) {
@@ -1084,7 +1539,8 @@ static void node_lost_contact(struct tipc_node *n,
/* Notify publications from this node */
n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
-
+ n->peer_net = NULL;
+ n->peer_hash_mix = 0;
/* Notify sockets connected to node */
list_for_each_entry_safe(conn, safe, conns, list) {
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
@@ -1101,11 +1557,13 @@ static void node_lost_contact(struct tipc_node *n,
/**
* tipc_node_get_linkname - get the name of a link
*
+ * @net: the applicable net namespace
* @bearer_id: id of the bearer
- * @node: peer node address
+ * @addr: peer node address
* @linkname: link name output buffer
+ * @len: size of @linkname output buffer
*
- * Returns 0 on success
+ * Return: 0 on success
*/
int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
char *linkname, size_t len)
@@ -1123,11 +1581,11 @@ int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
tipc_node_read_lock(node);
link = node->links[bearer_id].link;
if (link) {
- strncpy(linkname, tipc_link_name(link), len);
+ strscpy(linkname, tipc_link_name(link), len);
err = 0;
}
-exit:
tipc_node_read_unlock(node);
+exit:
tipc_node_put(node);
return err;
}
@@ -1143,13 +1601,13 @@ static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
if (!hdr)
return -EMSGSIZE;
- attrs = nla_nest_start(msg->skb, TIPC_NLA_NODE);
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE);
if (!attrs)
goto msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
goto attr_msg_full;
- if (tipc_node_is_up(node))
+ if (node_is_up(node))
if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
goto attr_msg_full;
@@ -1166,14 +1624,65 @@ msg_full:
return -EMSGSIZE;
}
+static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
+{
+ struct tipc_msg *hdr = buf_msg(skb_peek(list));
+ struct sk_buff_head inputq;
+
+ switch (msg_user(hdr)) {
+ case TIPC_LOW_IMPORTANCE:
+ case TIPC_MEDIUM_IMPORTANCE:
+ case TIPC_HIGH_IMPORTANCE:
+ case TIPC_CRITICAL_IMPORTANCE:
+ if (msg_connected(hdr) || msg_named(hdr) ||
+ msg_direct(hdr)) {
+ tipc_loopback_trace(peer_net, list);
+ spin_lock_init(&list->lock);
+ tipc_sk_rcv(peer_net, list);
+ return;
+ }
+ if (msg_mcast(hdr)) {
+ tipc_loopback_trace(peer_net, list);
+ skb_queue_head_init(&inputq);
+ tipc_sk_mcast_rcv(peer_net, list, &inputq);
+ __skb_queue_purge(list);
+ skb_queue_purge(&inputq);
+ return;
+ }
+ return;
+ case MSG_FRAGMENTER:
+ if (tipc_msg_assemble(list)) {
+ tipc_loopback_trace(peer_net, list);
+ skb_queue_head_init(&inputq);
+ tipc_sk_mcast_rcv(peer_net, list, &inputq);
+ __skb_queue_purge(list);
+ skb_queue_purge(&inputq);
+ }
+ return;
+ case GROUP_PROTOCOL:
+ case CONN_MANAGER:
+ tipc_loopback_trace(peer_net, list);
+ spin_lock_init(&list->lock);
+ tipc_sk_rcv(peer_net, list);
+ return;
+ case LINK_PROTOCOL:
+ case NAME_DISTRIBUTOR:
+ case TUNNEL_PROTOCOL:
+ case BCAST_PROTOCOL:
+ return;
+ default:
+ return;
+ }
+}
+
/**
- * tipc_node_xmit() is the general link level function for message sending
+ * tipc_node_xmit() - general link level function for message sending
* @net: the applicable net namespace
* @list: chain of buffers containing message
* @dnode: address of destination node
* @selector: a number used for deterministic link selection
* Consumes the buffer chain.
- * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
+ * Return: 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
*/
int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
u32 dnode, int selector)
@@ -1181,26 +1690,46 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
struct tipc_link_entry *le = NULL;
struct tipc_node *n;
struct sk_buff_head xmitq;
+ bool node_up = false;
+ struct net *peer_net;
int bearer_id;
int rc;
if (in_own_node(net, dnode)) {
+ tipc_loopback_trace(net, list);
+ spin_lock_init(&list->lock);
tipc_sk_rcv(net, list);
return 0;
}
n = tipc_node_find(net, dnode);
if (unlikely(!n)) {
- skb_queue_purge(list);
+ __skb_queue_purge(list);
return -EHOSTUNREACH;
}
+ rcu_read_lock();
+ tipc_node_read_lock(n);
+ node_up = node_is_up(n);
+ peer_net = n->peer_net;
+ tipc_node_read_unlock(n);
+ if (node_up && peer_net && check_net(peer_net)) {
+ /* xmit inner linux container */
+ tipc_lxc_xmit(peer_net, list);
+ if (likely(skb_queue_empty(list))) {
+ rcu_read_unlock();
+ tipc_node_put(n);
+ return 0;
+ }
+ }
+ rcu_read_unlock();
+
tipc_node_read_lock(n);
bearer_id = n->active_links[selector & 1];
if (unlikely(bearer_id == INVALID_BEARER_ID)) {
tipc_node_read_unlock(n);
tipc_node_put(n);
- skb_queue_purge(list);
+ __skb_queue_purge(list);
return -EHOSTUNREACH;
}
@@ -1214,7 +1743,7 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
if (unlikely(rc == -ENOBUFS))
tipc_node_link_down(n, bearer_id, false);
else
- tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
tipc_node_put(n);
@@ -1222,7 +1751,7 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
}
/* tipc_node_xmit_skb(): send single buffer to destination
- * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
+ * Buffers sent via this function are generally TIPC_SYSTEM_IMPORTANCE
* messages, which will not be rejected
* The only exception is datagram messages rerouted after secondary
* lookup, which are rare and safe to dispose of anyway.
@@ -1232,24 +1761,51 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
{
struct sk_buff_head head;
- skb_queue_head_init(&head);
+ __skb_queue_head_init(&head);
__skb_queue_tail(&head, skb);
tipc_node_xmit(net, &head, dnode, selector);
return 0;
}
-void tipc_node_broadcast(struct net *net, struct sk_buff *skb)
+/* tipc_node_distr_xmit(): send single buffer msgs to individual destinations
+ * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected
+ */
+int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
{
+ struct sk_buff *skb;
+ u32 selector, dnode;
+
+ while ((skb = __skb_dequeue(xmitq))) {
+ selector = msg_origport(buf_msg(skb));
+ dnode = msg_destnode(buf_msg(skb));
+ tipc_node_xmit_skb(net, skb, dnode, selector);
+ }
+ return 0;
+}
+
+void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests)
+{
+ struct sk_buff_head xmitq;
struct sk_buff *txskb;
struct tipc_node *n;
+ u16 dummy;
u32 dst;
+ /* Use broadcast if all nodes support it */
+ if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) {
+ __skb_queue_head_init(&xmitq);
+ __skb_queue_tail(&xmitq, skb);
+ tipc_bcast_xmit(net, &xmitq, &dummy);
+ return;
+ }
+
+ /* Otherwise use legacy replicast method */
rcu_read_lock();
list_for_each_entry_rcu(n, tipc_nodes(net), list) {
dst = n->addr;
if (in_own_node(net, dst))
continue;
- if (!tipc_node_is_up(n))
+ if (!node_is_up(n))
continue;
txskb = pskb_copy(skb, GFP_ATOMIC);
if (!txskb)
@@ -1258,7 +1814,6 @@ void tipc_node_broadcast(struct net *net, struct sk_buff *skb)
tipc_node_xmit_skb(net, txskb, dst, 0);
}
rcu_read_unlock();
-
kfree_skb(skb);
}
@@ -1281,10 +1836,10 @@ static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
struct tipc_link *ucl;
int rc;
- rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr);
+ rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr, xmitq);
if (rc & TIPC_LINK_DOWN_EVT) {
- tipc_bearer_reset_all(n->net);
+ tipc_node_reset_links(n);
return;
}
@@ -1346,29 +1901,31 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
}
if (!skb_queue_empty(&xmitq))
- tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
if (!skb_queue_empty(&be->inputq1))
tipc_node_mcast_rcv(n);
- if (rc & TIPC_LINK_DOWN_EVT) {
- /* Reception reassembly failure => reset all links to peer */
- if (!tipc_link_is_up(be->link))
- tipc_node_reset_links(n);
+ /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */
+ if (!skb_queue_empty(&n->bc_entry.namedq))
+ tipc_named_rcv(net, &n->bc_entry.namedq,
+ &n->bc_entry.named_rcv_nxt,
+ &n->bc_entry.named_open);
- /* Retransmission failure => reset all links to all peers */
- if (!tipc_link_is_up(tipc_bc_sndlink(net)))
- tipc_bearer_reset_all(net);
- }
+ /* If reassembly or retransmission failure => reset all links to peer */
+ if (rc & TIPC_LINK_DOWN_EVT)
+ tipc_node_reset_links(n);
tipc_node_put(n);
}
/**
* tipc_node_check_state - check and if necessary update node state
+ * @n: target tipc_node
* @skb: TIPC packet
* @bearer_id: identity of bearer delivering the packet
- * Returns true if state is ok, otherwise consumes buffer and returns false
+ * @xmitq: queue for messages to be xmited on
+ * Return: true if state and msg are ok, otherwise false
*/
static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
int bearer_id, struct sk_buff_head *xmitq)
@@ -1377,7 +1934,6 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
int usr = msg_user(hdr);
int mtyp = msg_type(hdr);
u16 oseqno = msg_seqno(hdr);
- u16 iseqno = msg_seqno(msg_get_wrapped(hdr));
u16 exp_pkts = msg_msgcnt(hdr);
u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
int state = n->state;
@@ -1385,6 +1941,10 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
struct tipc_media_addr *maddr;
int pb_id;
+ if (trace_tipc_node_check_state_enabled()) {
+ trace_tipc_skb_dump(skb, false, "skb for node state check");
+ trace_tipc_node_check_state(n, true, " ");
+ }
l = n->links[bearer_id].link;
if (!l)
return false;
@@ -1402,6 +1962,12 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
}
}
+ if (!tipc_link_validate_msg(l, hdr)) {
+ trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!");
+ trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!");
+ return false;
+ }
+
/* Check and update node accesibility if applicable */
if (state == SELF_UP_PEER_COMING) {
if (!tipc_link_is_up(l))
@@ -1428,11 +1994,23 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
/* Initiate or update failover mode if applicable */
if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
syncpt = oseqno + exp_pkts - 1;
- if (pl && tipc_link_is_up(pl)) {
+ if (pl && !tipc_link_is_reset(pl)) {
__tipc_node_link_down(n, &pb_id, xmitq, &maddr);
+ trace_tipc_node_link_down(n, true,
+ "node link down <- failover!");
tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
tipc_link_inputq(l));
}
+
+ /* If parallel link was already down, and this happened before
+ * the tunnel link came up, node failover was never started.
+ * Ensure that a FAILOVER_MSG is sent to get peer out of
+ * NODE_FAILINGOVER state, also this node must accept
+ * TUNNEL_MSGs from peer.
+ */
+ if (n->state != NODE_FAILINGOVER)
+ tipc_node_link_failover(n, pl, l, xmitq);
+
/* If pkts arrive out of order, use lowest calculated syncpt */
if (less(syncpt, n->sync_point))
n->sync_point = syncpt;
@@ -1448,17 +2026,18 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
return true;
}
- /* No synching needed if only one link */
+ /* No syncing needed if only one link */
if (!pl || !tipc_link_is_up(pl))
return true;
/* Initiate synch mode if applicable */
if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
- syncpt = iseqno + exp_pkts - 1;
- if (!tipc_link_is_up(l)) {
- tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+ if (n->capabilities & TIPC_TUNNEL_ENHANCED)
+ syncpt = msg_syncpt(hdr);
+ else
+ syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1;
+ if (!tipc_link_is_up(l))
__tipc_node_link_up(n, bearer_id, xmitq);
- }
if (n->state == SELF_UP_PEER_UP) {
n->sync_point = syncpt;
tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
@@ -1496,7 +2075,7 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
* tipc_rcv - process TIPC packets/messages arriving from off-node
* @net: the applicable net namespace
* @skb: TIPC packet
- * @bearer: pointer to bearer message arrived on
+ * @b: pointer to bearer message arrived on
*
* Invoked with no locks held. Bearer pointer must point to a valid bearer
* structure (i.e. cannot be NULL), but bearer can be inactive.
@@ -1504,19 +2083,39 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
{
struct sk_buff_head xmitq;
- struct tipc_node *n;
+ struct tipc_link_entry *le;
struct tipc_msg *hdr;
+ struct tipc_node *n;
int bearer_id = b->identity;
- struct tipc_link_entry *le;
u32 self = tipc_own_addr(net);
int usr, rc = 0;
u16 bc_ack;
+#ifdef CONFIG_TIPC_CRYPTO
+ struct tipc_ehdr *ehdr;
+
+ /* Check if message must be decrypted first */
+ if (TIPC_SKB_CB(skb)->decrypted || !tipc_ehdr_validate(skb))
+ goto rcv;
+
+ ehdr = (struct tipc_ehdr *)skb->data;
+ if (likely(ehdr->user != LINK_CONFIG)) {
+ n = tipc_node_find(net, ntohl(ehdr->addr));
+ if (unlikely(!n))
+ goto discard;
+ } else {
+ n = tipc_node_find_by_id(net, ehdr->id);
+ }
+ skb_dst_force(skb);
+ tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
+ if (!skb)
+ return;
- __skb_queue_head_init(&xmitq);
-
+rcv:
+#endif
/* Ensure message is well-formed before touching the header */
- if (unlikely(!tipc_msg_validate(skb)))
+ if (unlikely(!tipc_msg_validate(&skb)))
goto discard;
+ __skb_queue_head_init(&xmitq);
hdr = buf_msg(skb);
usr = msg_user(hdr);
bc_ack = msg_bcast_ack(hdr);
@@ -1540,10 +2139,16 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
le = &n->links[bearer_id];
/* Ensure broadcast reception is in synch with peer's send state */
- if (unlikely(usr == LINK_PROTOCOL))
+ if (unlikely(usr == LINK_PROTOCOL)) {
+ if (unlikely(skb_linearize(skb))) {
+ tipc_node_put(n);
+ goto discard;
+ }
+ hdr = buf_msg(skb);
tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
- else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack))
+ } else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) {
tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
+ }
/* Receive packet directly if conditions permit */
tipc_node_read_lock(n);
@@ -1559,6 +2164,8 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
/* Check/update node state before receiving */
if (unlikely(skb)) {
+ if (unlikely(skb_linearize(skb)))
+ goto out_node_put;
tipc_node_write_lock(n);
if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
if (le->link) {
@@ -1576,7 +2183,9 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
tipc_node_link_down(n, bearer_id, false);
if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
- tipc_named_rcv(net, &n->bc_entry.namedq);
+ tipc_named_rcv(net, &n->bc_entry.namedq,
+ &n->bc_entry.named_rcv_nxt,
+ &n->bc_entry.named_open);
if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
tipc_node_mcast_rcv(n);
@@ -1585,37 +2194,86 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
tipc_sk_rcv(net, &le->inputq);
if (!skb_queue_empty(&xmitq))
- tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
+out_node_put:
tipc_node_put(n);
discard:
kfree_skb(skb);
}
+void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
+ int prop)
+{
+ struct tipc_net *tn = tipc_net(net);
+ int bearer_id = b->identity;
+ struct sk_buff_head xmitq;
+ struct tipc_link_entry *e;
+ struct tipc_node *n;
+
+ __skb_queue_head_init(&xmitq);
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(n, &tn->node_list, list) {
+ tipc_node_write_lock(n);
+ e = &n->links[bearer_id];
+ if (e->link) {
+ if (prop == TIPC_NLA_PROP_TOL)
+ tipc_link_set_tolerance(e->link, b->tolerance,
+ &xmitq);
+ else if (prop == TIPC_NLA_PROP_MTU)
+ tipc_link_set_mtu(e->link, b->mtu);
+
+ /* Update MTU for node link entry */
+ e->mtu = tipc_link_mss(e->link);
+ }
+
+ tipc_node_write_unlock(n);
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
+ }
+
+ rcu_read_unlock();
+}
+
int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = sock_net(skb->sk);
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
- struct tipc_node *peer;
+ struct tipc_node *peer, *temp_node;
+ u8 node_id[NODE_ID_LEN];
+ u64 *w0 = (u64 *)&node_id[0];
+ u64 *w1 = (u64 *)&node_id[8];
u32 addr;
int err;
- int i;
/* We identify the peer by its net */
if (!info->attrs[TIPC_NLA_NET])
return -EINVAL;
- err = nla_parse_nested(attrs, TIPC_NLA_NET_MAX,
- info->attrs[TIPC_NLA_NET], tipc_nl_net_policy,
- info->extack);
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX,
+ info->attrs[TIPC_NLA_NET],
+ tipc_nl_net_policy, info->extack);
if (err)
return err;
- if (!attrs[TIPC_NLA_NET_ADDR])
- return -EINVAL;
+ /* attrs[TIPC_NLA_NET_NODEID] and attrs[TIPC_NLA_NET_ADDR] are
+ * mutually exclusive cases
+ */
+ if (attrs[TIPC_NLA_NET_ADDR]) {
+ addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
+ if (!addr)
+ return -EINVAL;
+ }
- addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
+ if (attrs[TIPC_NLA_NET_NODEID]) {
+ if (!attrs[TIPC_NLA_NET_NODEID_W1])
+ return -EINVAL;
+ *w0 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID]);
+ *w1 = nla_get_u64(attrs[TIPC_NLA_NET_NODEID_W1]);
+ addr = hash128to32(node_id);
+ }
if (in_own_node(net, addr))
return -ENOTSUPP;
@@ -1635,18 +2293,16 @@ int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
goto err_out;
}
- for (i = 0; i < MAX_BEARERS; i++) {
- struct tipc_link_entry *le = &peer->links[i];
-
- if (le->link) {
- kfree(le->link);
- le->link = NULL;
- peer->link_cnt--;
- }
- }
+ tipc_node_clear_links(peer);
tipc_node_write_unlock(peer);
tipc_node_delete(peer);
+ /* Calculate cluster capabilities */
+ tn->capabilities = TIPC_NODE_CAPABILITIES;
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ tn->capabilities &= temp_node->capabilities;
+ }
+ tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
err = 0;
err_out:
tipc_node_put(peer);
@@ -1691,6 +2347,8 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
}
list_for_each_entry_rcu(node, &tn->node_list, list) {
+ if (node->preliminary)
+ continue;
if (last_addr) {
if (node->addr == last_addr)
last_addr = 0;
@@ -1772,9 +2430,9 @@ int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[TIPC_NLA_LINK])
return -EINVAL;
- err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
- info->attrs[TIPC_NLA_LINK],
- tipc_nl_link_policy, info->extack);
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
+ info->attrs[TIPC_NLA_LINK],
+ tipc_nl_link_policy, info->extack);
if (err)
return err;
@@ -1801,8 +2459,7 @@ int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
if (attrs[TIPC_NLA_LINK_PROP]) {
struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
- err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
- props);
+ err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
if (err) {
res = err;
goto out;
@@ -1821,22 +2478,26 @@ int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
tipc_link_set_prio(link, prio, &xmitq);
}
if (props[TIPC_NLA_PROP_WIN]) {
- u32 win;
+ u32 max_win;
- win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
- tipc_link_set_queue_limits(link, win);
+ max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
+ tipc_link_set_queue_limits(link,
+ tipc_link_min_win(link),
+ max_win);
}
}
out:
tipc_node_read_unlock(node);
- tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr);
+ tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr,
+ NULL);
return res;
}
int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = genl_info_net(info);
+ struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
struct tipc_nl_msg msg;
char *name;
int err;
@@ -1844,46 +2505,58 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
msg.portid = info->snd_portid;
msg.seq = info->snd_seq;
- if (!info->attrs[TIPC_NLA_LINK_NAME])
+ if (!info->attrs[TIPC_NLA_LINK])
+ return -EINVAL;
+
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
+ info->attrs[TIPC_NLA_LINK],
+ tipc_nl_link_policy, info->extack);
+ if (err)
+ return err;
+
+ if (!attrs[TIPC_NLA_LINK_NAME])
return -EINVAL;
- name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
+
+ name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!msg.skb)
return -ENOMEM;
if (strcmp(name, tipc_bclink_name) == 0) {
- err = tipc_nl_add_bc_link(net, &msg);
- if (err) {
- nlmsg_free(msg.skb);
- return err;
- }
+ err = tipc_nl_add_bc_link(net, &msg, tipc_net(net)->bcl);
+ if (err)
+ goto err_free;
} else {
int bearer_id;
struct tipc_node *node;
struct tipc_link *link;
node = tipc_node_find_by_name(net, name, &bearer_id);
- if (!node)
- return -EINVAL;
+ if (!node) {
+ err = -EINVAL;
+ goto err_free;
+ }
tipc_node_read_lock(node);
link = node->links[bearer_id].link;
if (!link) {
tipc_node_read_unlock(node);
- nlmsg_free(msg.skb);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_free;
}
err = __tipc_nl_add_link(net, &msg, link, 0);
tipc_node_read_unlock(node);
- if (err) {
- nlmsg_free(msg.skb);
- return err;
- }
+ if (err)
+ goto err_free;
}
return genlmsg_reply(msg.skb, info);
+
+err_free:
+ nlmsg_free(msg.skb);
+ return err;
}
int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
@@ -1895,14 +2568,15 @@ int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
struct tipc_node *node;
struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = tipc_net(net);
struct tipc_link_entry *le;
if (!info->attrs[TIPC_NLA_LINK])
return -EINVAL;
- err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
- info->attrs[TIPC_NLA_LINK],
- tipc_nl_link_policy, info->extack);
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
+ info->attrs[TIPC_NLA_LINK],
+ tipc_nl_link_policy, info->extack);
if (err)
return err;
@@ -1911,11 +2585,26 @@ int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
- if (strcmp(link_name, tipc_bclink_name) == 0) {
- err = tipc_bclink_reset_stats(net);
+ err = -EINVAL;
+ if (!strcmp(link_name, tipc_bclink_name)) {
+ err = tipc_bclink_reset_stats(net, tipc_bc_sndlink(net));
if (err)
return err;
return 0;
+ } else if (strstr(link_name, tipc_bclink_name)) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(node, &tn->node_list, list) {
+ tipc_node_read_lock(node);
+ link = node->bc_entry.link;
+ if (link && !strcmp(link_name, tipc_link_name(link))) {
+ err = tipc_bclink_reset_stats(net, link);
+ tipc_node_read_unlock(node);
+ break;
+ }
+ tipc_node_read_unlock(node);
+ }
+ rcu_read_unlock();
+ return err;
}
node = tipc_node_find_by_name(net, link_name, &bearer_id);
@@ -1939,7 +2628,8 @@ int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
/* Caller should hold node lock */
static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
- struct tipc_node *node, u32 *prev_link)
+ struct tipc_node *node, u32 *prev_link,
+ bool bc_link)
{
u32 i;
int err;
@@ -1955,6 +2645,14 @@ static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
if (err)
return err;
}
+
+ if (bc_link) {
+ *prev_link = i;
+ err = tipc_nl_add_bc_link(net, msg, node->bc_entry.link);
+ if (err)
+ return err;
+ }
+
*prev_link = 0;
return 0;
@@ -1963,17 +2661,36 @@ static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
+ struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
+ struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_node *node;
struct tipc_nl_msg msg;
u32 prev_node = cb->args[0];
u32 prev_link = cb->args[1];
int done = cb->args[2];
+ bool bc_link = cb->args[3];
int err;
if (done)
return 0;
+ if (!prev_node) {
+ /* Check if broadcast-receiver links dumping is needed */
+ if (attrs && attrs[TIPC_NLA_LINK]) {
+ err = nla_parse_nested_deprecated(link,
+ TIPC_NLA_LINK_MAX,
+ attrs[TIPC_NLA_LINK],
+ tipc_nl_link_policy,
+ NULL);
+ if (unlikely(err))
+ return err;
+ if (unlikely(!link[TIPC_NLA_LINK_BROADCAST]))
+ return -EINVAL;
+ bc_link = true;
+ }
+ }
+
msg.skb = skb;
msg.portid = NETLINK_CB(cb->skb).portid;
msg.seq = cb->nlh->nlmsg_seq;
@@ -1997,7 +2714,7 @@ int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
list) {
tipc_node_read_lock(node);
err = __tipc_nl_add_node_links(net, &msg, node,
- &prev_link);
+ &prev_link, bc_link);
tipc_node_read_unlock(node);
if (err)
goto out;
@@ -2005,14 +2722,14 @@ int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
prev_node = node->addr;
}
} else {
- err = tipc_nl_add_bc_link(net, &msg);
+ err = tipc_nl_add_bc_link(net, &msg, tn->bcl);
if (err)
goto out;
list_for_each_entry_rcu(node, &tn->node_list, list) {
tipc_node_read_lock(node);
err = __tipc_nl_add_node_links(net, &msg, node,
- &prev_link);
+ &prev_link, bc_link);
tipc_node_read_unlock(node);
if (err)
goto out;
@@ -2027,6 +2744,7 @@ out:
cb->args[0] = prev_node;
cb->args[1] = prev_link;
cb->args[2] = done;
+ cb->args[3] = bc_link;
return skb->len;
}
@@ -2040,9 +2758,10 @@ int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[TIPC_NLA_MON])
return -EINVAL;
- err = nla_parse_nested(attrs, TIPC_NLA_MON_MAX,
- info->attrs[TIPC_NLA_MON],
- tipc_nl_monitor_policy, info->extack);
+ err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX,
+ info->attrs[TIPC_NLA_MON],
+ tipc_nl_monitor_policy,
+ info->extack);
if (err)
return err;
@@ -2069,7 +2788,7 @@ static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg)
if (!hdr)
return -EMSGSIZE;
- attrs = nla_nest_start(msg->skb, TIPC_NLA_MON);
+ attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
if (!attrs)
goto msg_full;
@@ -2117,8 +2836,8 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
struct net *net = sock_net(skb->sk);
u32 prev_bearer = cb->args[0];
struct tipc_nl_msg msg;
+ int bearer_id;
int err;
- int i;
if (prev_bearer == MAX_BEARERS)
return 0;
@@ -2128,16 +2847,13 @@ int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
msg.seq = cb->nlh->nlmsg_seq;
rtnl_lock();
- for (i = prev_bearer; i < MAX_BEARERS; i++) {
- prev_bearer = i;
- err = __tipc_nl_add_monitor(net, &msg, prev_bearer);
+ for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
+ err = __tipc_nl_add_monitor(net, &msg, bearer_id);
if (err)
- goto out;
+ break;
}
-
-out:
rtnl_unlock();
- cb->args[0] = prev_bearer;
+ cb->args[0] = bearer_id;
return skb->len;
}
@@ -2153,19 +2869,16 @@ int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
int err;
if (!prev_node) {
- struct nlattr **attrs;
+ struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
- err = tipc_nlmsg_parse(cb->nlh, &attrs);
- if (err)
- return err;
-
if (!attrs[TIPC_NLA_MON])
return -EINVAL;
- err = nla_parse_nested(mon, TIPC_NLA_MON_MAX,
- attrs[TIPC_NLA_MON],
- tipc_nl_monitor_policy, NULL);
+ err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX,
+ attrs[TIPC_NLA_MON],
+ tipc_nl_monitor_policy,
+ NULL);
if (err)
return err;
@@ -2197,3 +2910,256 @@ int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
return skb->len;
}
+
+#ifdef CONFIG_TIPC_CRYPTO
+static int tipc_nl_retrieve_key(struct nlattr **attrs,
+ struct tipc_aead_key **pkey)
+{
+ struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
+ struct tipc_aead_key *key;
+
+ if (!attr)
+ return -ENODATA;
+
+ if (nla_len(attr) < sizeof(*key))
+ return -EINVAL;
+ key = (struct tipc_aead_key *)nla_data(attr);
+ if (key->keylen > TIPC_AEAD_KEYLEN_MAX ||
+ nla_len(attr) < tipc_aead_key_size(key))
+ return -EINVAL;
+
+ *pkey = key;
+ return 0;
+}
+
+static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id)
+{
+ struct nlattr *attr = attrs[TIPC_NLA_NODE_ID];
+
+ if (!attr)
+ return -ENODATA;
+
+ if (nla_len(attr) < TIPC_NODEID_LEN)
+ return -EINVAL;
+
+ *node_id = (u8 *)nla_data(attr);
+ return 0;
+}
+
+static int tipc_nl_retrieve_rekeying(struct nlattr **attrs, u32 *intv)
+{
+ struct nlattr *attr = attrs[TIPC_NLA_NODE_REKEYING];
+
+ if (!attr)
+ return -ENODATA;
+
+ *intv = nla_get_u32(attr);
+ return 0;
+}
+
+static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1];
+ struct net *net = sock_net(skb->sk);
+ struct tipc_crypto *tx = tipc_net(net)->crypto_tx, *c = tx;
+ struct tipc_node *n = NULL;
+ struct tipc_aead_key *ukey;
+ bool rekeying = true, master_key = false;
+ u8 *id, *own_id, mode;
+ u32 intv = 0;
+ int rc = 0;
+
+ if (!info->attrs[TIPC_NLA_NODE])
+ return -EINVAL;
+
+ rc = nla_parse_nested(attrs, TIPC_NLA_NODE_MAX,
+ info->attrs[TIPC_NLA_NODE],
+ tipc_nl_node_policy, info->extack);
+ if (rc)
+ return rc;
+
+ own_id = tipc_own_id(net);
+ if (!own_id) {
+ GENL_SET_ERR_MSG(info, "not found own node identity (set id?)");
+ return -EPERM;
+ }
+
+ rc = tipc_nl_retrieve_rekeying(attrs, &intv);
+ if (rc == -ENODATA)
+ rekeying = false;
+
+ rc = tipc_nl_retrieve_key(attrs, &ukey);
+ if (rc == -ENODATA && rekeying)
+ goto rekeying;
+ else if (rc)
+ return rc;
+
+ rc = tipc_aead_key_validate(ukey, info);
+ if (rc)
+ return rc;
+
+ rc = tipc_nl_retrieve_nodeid(attrs, &id);
+ switch (rc) {
+ case -ENODATA:
+ mode = CLUSTER_KEY;
+ master_key = !!(attrs[TIPC_NLA_NODE_KEY_MASTER]);
+ break;
+ case 0:
+ mode = PER_NODE_KEY;
+ if (memcmp(id, own_id, NODE_ID_LEN)) {
+ n = tipc_node_find_by_id(net, id) ?:
+ tipc_node_create(net, 0, id, 0xffffu, 0, true);
+ if (unlikely(!n))
+ return -ENOMEM;
+ c = n->crypto_rx;
+ }
+ break;
+ default:
+ return rc;
+ }
+
+ /* Initiate the TX/RX key */
+ rc = tipc_crypto_key_init(c, ukey, mode, master_key);
+ if (n)
+ tipc_node_put(n);
+
+ if (unlikely(rc < 0)) {
+ GENL_SET_ERR_MSG(info, "unable to initiate or attach new key");
+ return rc;
+ } else if (c == tx) {
+ /* Distribute TX key but not master one */
+ if (!master_key && tipc_crypto_key_distr(tx, rc, NULL))
+ GENL_SET_ERR_MSG(info, "failed to replicate new key");
+rekeying:
+ /* Schedule TX rekeying if needed */
+ tipc_crypto_rekeying_sched(tx, rekeying, intv);
+ }
+
+ return 0;
+}
+
+int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+
+ rtnl_lock();
+ err = __tipc_nl_node_set_key(skb, info);
+ rtnl_unlock();
+
+ return err;
+}
+
+static int __tipc_nl_node_flush_key(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct net *net = sock_net(skb->sk);
+ struct tipc_net *tn = tipc_net(net);
+ struct tipc_node *n;
+
+ tipc_crypto_key_flush(tn->crypto_tx);
+ rcu_read_lock();
+ list_for_each_entry_rcu(n, &tn->node_list, list)
+ tipc_crypto_key_flush(n->crypto_rx);
+ rcu_read_unlock();
+
+ return 0;
+}
+
+int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
+{
+ int err;
+
+ rtnl_lock();
+ err = __tipc_nl_node_flush_key(skb, info);
+ rtnl_unlock();
+
+ return err;
+}
+#endif
+
+/**
+ * tipc_node_dump - dump TIPC node data
+ * @n: tipc node to be dumped
+ * @more: dump more?
+ * - false: dump only tipc node data
+ * - true: dump node link data as well
+ * @buf: returned buffer of dump data in format
+ */
+int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
+{
+ int i = 0;
+ size_t sz = (more) ? NODE_LMAX : NODE_LMIN;
+
+ if (!n) {
+ i += scnprintf(buf, sz, "node data: (null)\n");
+ return i;
+ }
+
+ i += scnprintf(buf, sz, "node data: %x", n->addr);
+ i += scnprintf(buf + i, sz - i, " %x", n->state);
+ i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]);
+ i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]);
+ i += scnprintf(buf + i, sz - i, " %x", n->action_flags);
+ i += scnprintf(buf + i, sz - i, " %u", n->failover_sent);
+ i += scnprintf(buf + i, sz - i, " %u", n->sync_point);
+ i += scnprintf(buf + i, sz - i, " %d", n->link_cnt);
+ i += scnprintf(buf + i, sz - i, " %u", n->working_links);
+ i += scnprintf(buf + i, sz - i, " %x", n->capabilities);
+ i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv);
+
+ if (!more)
+ return i;
+
+ i += scnprintf(buf + i, sz - i, "link_entry[0]:\n");
+ i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu);
+ i += scnprintf(buf + i, sz - i, " media: ");
+ i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr);
+ i += scnprintf(buf + i, sz - i, "\n");
+ i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i);
+ i += scnprintf(buf + i, sz - i, " inputq: ");
+ i += tipc_list_dump(&n->links[0].inputq, false, buf + i);
+
+ i += scnprintf(buf + i, sz - i, "link_entry[1]:\n");
+ i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu);
+ i += scnprintf(buf + i, sz - i, " media: ");
+ i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr);
+ i += scnprintf(buf + i, sz - i, "\n");
+ i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i);
+ i += scnprintf(buf + i, sz - i, " inputq: ");
+ i += tipc_list_dump(&n->links[1].inputq, false, buf + i);
+
+ i += scnprintf(buf + i, sz - i, "bclink:\n ");
+ i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i);
+
+ return i;
+}
+
+void tipc_node_pre_cleanup_net(struct net *exit_net)
+{
+ struct tipc_node *n;
+ struct tipc_net *tn;
+ struct net *tmp;
+
+ rcu_read_lock();
+ for_each_net_rcu(tmp) {
+ if (tmp == exit_net)
+ continue;
+ tn = tipc_net(tmp);
+ if (!tn)
+ continue;
+ spin_lock_bh(&tn->node_list_lock);
+ list_for_each_entry_rcu(n, &tn->node_list, list) {
+ if (!n->peer_net)
+ continue;
+ if (n->peer_net != exit_net)
+ continue;
+ tipc_node_write_lock(n);
+ n->peer_net = NULL;
+ n->peer_hash_mix = 0;
+ tipc_node_write_unlock_fast(n);
+ break;
+ }
+ spin_unlock_bh(&tn->node_list_lock);
+ }
+ rcu_read_unlock();
+}
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 898c22916984..154a5bbb0d29 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -45,37 +45,71 @@
/* Optional capabilities supported by this code version
*/
enum {
+ TIPC_SYN_BIT = (1),
TIPC_BCAST_SYNCH = (1 << 1),
TIPC_BCAST_STATE_NACK = (1 << 2),
TIPC_BLOCK_FLOWCTL = (1 << 3),
- TIPC_BCAST_RCAST = (1 << 4)
+ TIPC_BCAST_RCAST = (1 << 4),
+ TIPC_NODE_ID128 = (1 << 5),
+ TIPC_LINK_PROTO_SEQNO = (1 << 6),
+ TIPC_MCAST_RBCTL = (1 << 7),
+ TIPC_GAP_ACK_BLOCK = (1 << 8),
+ TIPC_TUNNEL_ENHANCED = (1 << 9),
+ TIPC_NAGLE = (1 << 10),
+ TIPC_NAMED_BCAST = (1 << 11)
};
-#define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH | \
- TIPC_BCAST_STATE_NACK | \
- TIPC_BCAST_RCAST | \
- TIPC_BLOCK_FLOWCTL)
+#define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \
+ TIPC_BCAST_SYNCH | \
+ TIPC_BCAST_STATE_NACK | \
+ TIPC_BCAST_RCAST | \
+ TIPC_BLOCK_FLOWCTL | \
+ TIPC_NODE_ID128 | \
+ TIPC_LINK_PROTO_SEQNO | \
+ TIPC_MCAST_RBCTL | \
+ TIPC_GAP_ACK_BLOCK | \
+ TIPC_TUNNEL_ENHANCED | \
+ TIPC_NAGLE | \
+ TIPC_NAMED_BCAST)
+
#define INVALID_BEARER_ID -1
void tipc_node_stop(struct net *net);
-void tipc_node_check_dest(struct net *net, u32 onode,
+bool tipc_node_get_id(struct net *net, u32 addr, u8 *id);
+u32 tipc_node_get_addr(struct tipc_node *node);
+char *tipc_node_get_id_str(struct tipc_node *node);
+void tipc_node_put(struct tipc_node *node);
+void tipc_node_get(struct tipc_node *node);
+struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
+ u16 capabilities, u32 hash_mixes,
+ bool preliminary);
+#ifdef CONFIG_TIPC_CRYPTO
+struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n);
+struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos);
+struct tipc_crypto *tipc_node_crypto_rx_by_addr(struct net *net, u32 addr);
+#endif
+u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr);
+void tipc_node_check_dest(struct net *net, u32 onode, u8 *peer_id128,
struct tipc_bearer *bearer,
- u16 capabilities, u32 signature,
+ u16 capabilities, u32 signature, u32 hash_mixes,
struct tipc_media_addr *maddr,
bool *respond, bool *dupl_addr);
void tipc_node_delete_links(struct net *net, int bearer_id);
+void tipc_node_apply_property(struct net *net, struct tipc_bearer *b, int prop);
int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 node,
char *linkname, size_t len);
int tipc_node_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
int selector);
+int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *list);
int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
u32 selector);
void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr);
void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr);
-void tipc_node_broadcast(struct net *net, struct sk_buff *skb);
+void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests);
int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
-int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel);
+int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected);
+bool tipc_node_is_up(struct net *net, u32 addr);
u16 tipc_node_get_capabilities(struct net *net, u32 addr);
int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb);
@@ -89,4 +123,9 @@ int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info);
int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb);
int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
struct netlink_callback *cb);
+#ifdef CONFIG_TIPC_CRYPTO
+int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info);
+int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info);
+#endif
+void tipc_node_pre_cleanup_net(struct net *exit_net);
#endif
diff --git a/net/tipc/server.c b/net/tipc/server.c
deleted file mode 100644
index 3cd6402e812c..000000000000
--- a/net/tipc/server.c
+++ /dev/null
@@ -1,638 +0,0 @@
-/*
- * net/tipc/server.c: TIPC server infrastructure
- *
- * Copyright (c) 2012-2013, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "server.h"
-#include "core.h"
-#include "socket.h"
-#include <net/sock.h>
-#include <linux/module.h>
-
-/* Number of messages to send before rescheduling */
-#define MAX_SEND_MSG_COUNT 25
-#define MAX_RECV_MSG_COUNT 25
-#define CF_CONNECTED 1
-#define CF_SERVER 2
-
-#define sock2con(x) ((struct tipc_conn *)(x)->sk_user_data)
-
-/**
- * struct tipc_conn - TIPC connection structure
- * @kref: reference counter to connection object
- * @conid: connection identifier
- * @sock: socket handler associated with connection
- * @flags: indicates connection state
- * @server: pointer to connected server
- * @rwork: receive work item
- * @usr_data: user-specified field
- * @rx_action: what to do when connection socket is active
- * @outqueue: pointer to first outbound message in queue
- * @outqueue_lock: control access to the outqueue
- * @outqueue: list of connection objects for its server
- * @swork: send work item
- */
-struct tipc_conn {
- struct kref kref;
- int conid;
- struct socket *sock;
- unsigned long flags;
- struct tipc_server *server;
- struct work_struct rwork;
- int (*rx_action) (struct tipc_conn *con);
- void *usr_data;
- struct list_head outqueue;
- spinlock_t outqueue_lock;
- struct work_struct swork;
-};
-
-/* An entry waiting to be sent */
-struct outqueue_entry {
- struct list_head list;
- struct kvec iov;
- struct sockaddr_tipc dest;
-};
-
-static void tipc_recv_work(struct work_struct *work);
-static void tipc_send_work(struct work_struct *work);
-static void tipc_clean_outqueues(struct tipc_conn *con);
-
-static void tipc_conn_kref_release(struct kref *kref)
-{
- struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
- struct tipc_server *s = con->server;
- struct sockaddr_tipc *saddr = s->saddr;
- struct socket *sock = con->sock;
- struct sock *sk;
-
- if (sock) {
- sk = sock->sk;
- if (test_bit(CF_SERVER, &con->flags)) {
- __module_get(sock->ops->owner);
- __module_get(sk->sk_prot_creator->owner);
- }
- saddr->scope = -TIPC_NODE_SCOPE;
- kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
- sock_release(sock);
- con->sock = NULL;
-
- spin_lock_bh(&s->idr_lock);
- idr_remove(&s->conn_idr, con->conid);
- s->idr_in_use--;
- spin_unlock_bh(&s->idr_lock);
- }
-
- tipc_clean_outqueues(con);
- kfree(con);
-}
-
-static void conn_put(struct tipc_conn *con)
-{
- kref_put(&con->kref, tipc_conn_kref_release);
-}
-
-static void conn_get(struct tipc_conn *con)
-{
- kref_get(&con->kref);
-}
-
-static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
-{
- struct tipc_conn *con;
-
- spin_lock_bh(&s->idr_lock);
- con = idr_find(&s->conn_idr, conid);
- if (con && test_bit(CF_CONNECTED, &con->flags))
- conn_get(con);
- else
- con = NULL;
- spin_unlock_bh(&s->idr_lock);
- return con;
-}
-
-static void sock_data_ready(struct sock *sk)
-{
- struct tipc_conn *con;
-
- read_lock_bh(&sk->sk_callback_lock);
- con = sock2con(sk);
- if (con && test_bit(CF_CONNECTED, &con->flags)) {
- conn_get(con);
- if (!queue_work(con->server->rcv_wq, &con->rwork))
- conn_put(con);
- }
- read_unlock_bh(&sk->sk_callback_lock);
-}
-
-static void sock_write_space(struct sock *sk)
-{
- struct tipc_conn *con;
-
- read_lock_bh(&sk->sk_callback_lock);
- con = sock2con(sk);
- if (con && test_bit(CF_CONNECTED, &con->flags)) {
- conn_get(con);
- if (!queue_work(con->server->send_wq, &con->swork))
- conn_put(con);
- }
- read_unlock_bh(&sk->sk_callback_lock);
-}
-
-static void tipc_register_callbacks(struct socket *sock, struct tipc_conn *con)
-{
- struct sock *sk = sock->sk;
-
- write_lock_bh(&sk->sk_callback_lock);
-
- sk->sk_data_ready = sock_data_ready;
- sk->sk_write_space = sock_write_space;
- sk->sk_user_data = con;
-
- con->sock = sock;
-
- write_unlock_bh(&sk->sk_callback_lock);
-}
-
-static void tipc_unregister_callbacks(struct tipc_conn *con)
-{
- struct sock *sk = con->sock->sk;
-
- write_lock_bh(&sk->sk_callback_lock);
- sk->sk_user_data = NULL;
- write_unlock_bh(&sk->sk_callback_lock);
-}
-
-static void tipc_close_conn(struct tipc_conn *con)
-{
- struct tipc_server *s = con->server;
-
- if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
- tipc_unregister_callbacks(con);
-
- if (con->conid)
- s->tipc_conn_release(con->conid, con->usr_data);
-
- /* We shouldn't flush pending works as we may be in the
- * thread. In fact the races with pending rx/tx work structs
- * are harmless for us here as we have already deleted this
- * connection from server connection list.
- */
- kernel_sock_shutdown(con->sock, SHUT_RDWR);
-
- conn_put(con);
- }
-}
-
-static struct tipc_conn *tipc_alloc_conn(struct tipc_server *s)
-{
- struct tipc_conn *con;
- int ret;
-
- con = kzalloc(sizeof(struct tipc_conn), GFP_ATOMIC);
- if (!con)
- return ERR_PTR(-ENOMEM);
-
- kref_init(&con->kref);
- INIT_LIST_HEAD(&con->outqueue);
- spin_lock_init(&con->outqueue_lock);
- INIT_WORK(&con->swork, tipc_send_work);
- INIT_WORK(&con->rwork, tipc_recv_work);
-
- spin_lock_bh(&s->idr_lock);
- ret = idr_alloc(&s->conn_idr, con, 0, 0, GFP_ATOMIC);
- if (ret < 0) {
- kfree(con);
- spin_unlock_bh(&s->idr_lock);
- return ERR_PTR(-ENOMEM);
- }
- con->conid = ret;
- s->idr_in_use++;
- spin_unlock_bh(&s->idr_lock);
-
- set_bit(CF_CONNECTED, &con->flags);
- con->server = s;
-
- return con;
-}
-
-static int tipc_receive_from_sock(struct tipc_conn *con)
-{
- struct msghdr msg = {};
- struct tipc_server *s = con->server;
- struct sockaddr_tipc addr;
- struct kvec iov;
- void *buf;
- int ret;
-
- buf = kmem_cache_alloc(s->rcvbuf_cache, GFP_ATOMIC);
- if (!buf) {
- ret = -ENOMEM;
- goto out_close;
- }
-
- iov.iov_base = buf;
- iov.iov_len = s->max_rcvbuf_size;
- msg.msg_name = &addr;
- ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len,
- MSG_DONTWAIT);
- if (ret <= 0) {
- kmem_cache_free(s->rcvbuf_cache, buf);
- goto out_close;
- }
-
- s->tipc_conn_recvmsg(sock_net(con->sock->sk), con->conid, &addr,
- con->usr_data, buf, ret);
-
- kmem_cache_free(s->rcvbuf_cache, buf);
-
- return 0;
-
-out_close:
- if (ret != -EWOULDBLOCK)
- tipc_close_conn(con);
- else if (ret == 0)
- /* Don't return success if we really got EOF */
- ret = -EAGAIN;
-
- return ret;
-}
-
-static int tipc_accept_from_sock(struct tipc_conn *con)
-{
- struct tipc_server *s = con->server;
- struct socket *sock = con->sock;
- struct socket *newsock;
- struct tipc_conn *newcon;
- int ret;
-
- ret = kernel_accept(sock, &newsock, O_NONBLOCK);
- if (ret < 0)
- return ret;
-
- newcon = tipc_alloc_conn(con->server);
- if (IS_ERR(newcon)) {
- ret = PTR_ERR(newcon);
- sock_release(newsock);
- return ret;
- }
-
- newcon->rx_action = tipc_receive_from_sock;
- tipc_register_callbacks(newsock, newcon);
-
- /* Notify that new connection is incoming */
- newcon->usr_data = s->tipc_conn_new(newcon->conid);
- if (!newcon->usr_data) {
- sock_release(newsock);
- return -ENOMEM;
- }
-
- /* Wake up receive process in case of 'SYN+' message */
- newsock->sk->sk_data_ready(newsock->sk);
- return ret;
-}
-
-static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
-{
- struct tipc_server *s = con->server;
- struct socket *sock = NULL;
- int ret;
-
- ret = sock_create_kern(s->net, AF_TIPC, SOCK_SEQPACKET, 0, &sock);
- if (ret < 0)
- return NULL;
- ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE,
- (char *)&s->imp, sizeof(s->imp));
- if (ret < 0)
- goto create_err;
- ret = kernel_bind(sock, (struct sockaddr *)s->saddr, sizeof(*s->saddr));
- if (ret < 0)
- goto create_err;
-
- switch (s->type) {
- case SOCK_STREAM:
- case SOCK_SEQPACKET:
- con->rx_action = tipc_accept_from_sock;
-
- ret = kernel_listen(sock, 0);
- if (ret < 0)
- goto create_err;
- break;
- case SOCK_DGRAM:
- case SOCK_RDM:
- con->rx_action = tipc_receive_from_sock;
- break;
- default:
- pr_err("Unknown socket type %d\n", s->type);
- goto create_err;
- }
-
- /* As server's listening socket owner and creator is the same module,
- * we have to decrease TIPC module reference count to guarantee that
- * it remains zero after the server socket is created, otherwise,
- * executing "rmmod" command is unable to make TIPC module deleted
- * after TIPC module is inserted successfully.
- *
- * However, the reference count is ever increased twice in
- * sock_create_kern(): one is to increase the reference count of owner
- * of TIPC socket's proto_ops struct; another is to increment the
- * reference count of owner of TIPC proto struct. Therefore, we must
- * decrement the module reference count twice to ensure that it keeps
- * zero after server's listening socket is created. Of course, we
- * must bump the module reference count twice as well before the socket
- * is closed.
- */
- module_put(sock->ops->owner);
- module_put(sock->sk->sk_prot_creator->owner);
- set_bit(CF_SERVER, &con->flags);
-
- return sock;
-
-create_err:
- kernel_sock_shutdown(sock, SHUT_RDWR);
- sock_release(sock);
- return NULL;
-}
-
-static int tipc_open_listening_sock(struct tipc_server *s)
-{
- struct socket *sock;
- struct tipc_conn *con;
-
- con = tipc_alloc_conn(s);
- if (IS_ERR(con))
- return PTR_ERR(con);
-
- sock = tipc_create_listen_sock(con);
- if (!sock) {
- idr_remove(&s->conn_idr, con->conid);
- s->idr_in_use--;
- kfree(con);
- return -EINVAL;
- }
-
- tipc_register_callbacks(sock, con);
- return 0;
-}
-
-static struct outqueue_entry *tipc_alloc_entry(void *data, int len)
-{
- struct outqueue_entry *entry;
- void *buf;
-
- entry = kmalloc(sizeof(struct outqueue_entry), GFP_ATOMIC);
- if (!entry)
- return NULL;
-
- buf = kmemdup(data, len, GFP_ATOMIC);
- if (!buf) {
- kfree(entry);
- return NULL;
- }
-
- entry->iov.iov_base = buf;
- entry->iov.iov_len = len;
-
- return entry;
-}
-
-static void tipc_free_entry(struct outqueue_entry *e)
-{
- kfree(e->iov.iov_base);
- kfree(e);
-}
-
-static void tipc_clean_outqueues(struct tipc_conn *con)
-{
- struct outqueue_entry *e, *safe;
-
- spin_lock_bh(&con->outqueue_lock);
- list_for_each_entry_safe(e, safe, &con->outqueue, list) {
- list_del(&e->list);
- tipc_free_entry(e);
- }
- spin_unlock_bh(&con->outqueue_lock);
-}
-
-int tipc_conn_sendmsg(struct tipc_server *s, int conid,
- struct sockaddr_tipc *addr, void *data, size_t len)
-{
- struct outqueue_entry *e;
- struct tipc_conn *con;
-
- con = tipc_conn_lookup(s, conid);
- if (!con)
- return -EINVAL;
-
- if (!test_bit(CF_CONNECTED, &con->flags)) {
- conn_put(con);
- return 0;
- }
-
- e = tipc_alloc_entry(data, len);
- if (!e) {
- conn_put(con);
- return -ENOMEM;
- }
-
- if (addr)
- memcpy(&e->dest, addr, sizeof(struct sockaddr_tipc));
-
- spin_lock_bh(&con->outqueue_lock);
- list_add_tail(&e->list, &con->outqueue);
- spin_unlock_bh(&con->outqueue_lock);
-
- if (!queue_work(s->send_wq, &con->swork))
- conn_put(con);
- return 0;
-}
-
-void tipc_conn_terminate(struct tipc_server *s, int conid)
-{
- struct tipc_conn *con;
-
- con = tipc_conn_lookup(s, conid);
- if (con) {
- tipc_close_conn(con);
- conn_put(con);
- }
-}
-
-static void tipc_send_to_sock(struct tipc_conn *con)
-{
- int count = 0;
- struct tipc_server *s = con->server;
- struct outqueue_entry *e;
- struct msghdr msg;
- int ret;
-
- spin_lock_bh(&con->outqueue_lock);
- while (test_bit(CF_CONNECTED, &con->flags)) {
- e = list_entry(con->outqueue.next, struct outqueue_entry,
- list);
- if ((struct list_head *) e == &con->outqueue)
- break;
- spin_unlock_bh(&con->outqueue_lock);
-
- memset(&msg, 0, sizeof(msg));
- msg.msg_flags = MSG_DONTWAIT;
-
- if (s->type == SOCK_DGRAM || s->type == SOCK_RDM) {
- msg.msg_name = &e->dest;
- msg.msg_namelen = sizeof(struct sockaddr_tipc);
- }
- ret = kernel_sendmsg(con->sock, &msg, &e->iov, 1,
- e->iov.iov_len);
- if (ret == -EWOULDBLOCK || ret == 0) {
- cond_resched();
- goto out;
- } else if (ret < 0) {
- goto send_err;
- }
-
- /* Don't starve users filling buffers */
- if (++count >= MAX_SEND_MSG_COUNT) {
- cond_resched();
- count = 0;
- }
-
- spin_lock_bh(&con->outqueue_lock);
- list_del(&e->list);
- tipc_free_entry(e);
- }
- spin_unlock_bh(&con->outqueue_lock);
-out:
- return;
-
-send_err:
- tipc_close_conn(con);
-}
-
-static void tipc_recv_work(struct work_struct *work)
-{
- struct tipc_conn *con = container_of(work, struct tipc_conn, rwork);
- int count = 0;
-
- while (test_bit(CF_CONNECTED, &con->flags)) {
- if (con->rx_action(con))
- break;
-
- /* Don't flood Rx machine */
- if (++count >= MAX_RECV_MSG_COUNT) {
- cond_resched();
- count = 0;
- }
- }
- conn_put(con);
-}
-
-static void tipc_send_work(struct work_struct *work)
-{
- struct tipc_conn *con = container_of(work, struct tipc_conn, swork);
-
- if (test_bit(CF_CONNECTED, &con->flags))
- tipc_send_to_sock(con);
-
- conn_put(con);
-}
-
-static void tipc_work_stop(struct tipc_server *s)
-{
- destroy_workqueue(s->rcv_wq);
- destroy_workqueue(s->send_wq);
-}
-
-static int tipc_work_start(struct tipc_server *s)
-{
- s->rcv_wq = alloc_ordered_workqueue("tipc_rcv", 0);
- if (!s->rcv_wq) {
- pr_err("can't start tipc receive workqueue\n");
- return -ENOMEM;
- }
-
- s->send_wq = alloc_ordered_workqueue("tipc_send", 0);
- if (!s->send_wq) {
- pr_err("can't start tipc send workqueue\n");
- destroy_workqueue(s->rcv_wq);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-int tipc_server_start(struct tipc_server *s)
-{
- int ret;
-
- spin_lock_init(&s->idr_lock);
- idr_init(&s->conn_idr);
- s->idr_in_use = 0;
-
- s->rcvbuf_cache = kmem_cache_create(s->name, s->max_rcvbuf_size,
- 0, SLAB_HWCACHE_ALIGN, NULL);
- if (!s->rcvbuf_cache)
- return -ENOMEM;
-
- ret = tipc_work_start(s);
- if (ret < 0) {
- kmem_cache_destroy(s->rcvbuf_cache);
- return ret;
- }
- ret = tipc_open_listening_sock(s);
- if (ret < 0) {
- tipc_work_stop(s);
- kmem_cache_destroy(s->rcvbuf_cache);
- return ret;
- }
- return ret;
-}
-
-void tipc_server_stop(struct tipc_server *s)
-{
- struct tipc_conn *con;
- int id;
-
- spin_lock_bh(&s->idr_lock);
- for (id = 0; s->idr_in_use; id++) {
- con = idr_find(&s->conn_idr, id);
- if (con) {
- spin_unlock_bh(&s->idr_lock);
- tipc_close_conn(con);
- spin_lock_bh(&s->idr_lock);
- }
- }
- spin_unlock_bh(&s->idr_lock);
-
- tipc_work_stop(s);
- kmem_cache_destroy(s->rcvbuf_cache);
- idr_destroy(&s->conn_idr);
-}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 101e3597338f..817b07d95a91 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1,8 +1,9 @@
/*
* net/tipc/socket.c: TIPC socket API
*
- * Copyright (c) 2001-2007, 2012-2016, Ericsson AB
+ * Copyright (c) 2001-2007, 2012-2019, Ericsson AB
* Copyright (c) 2004-2008, 2010-2013, Wind River Systems
+ * Copyright (c) 2020-2021, Red Hat Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,6 +37,7 @@
#include <linux/rhashtable.h>
#include <linux/sched/signal.h>
+#include <trace/events/sock.h>
#include "core.h"
#include "name_table.h"
@@ -45,13 +47,16 @@
#include "socket.h"
#include "bcast.h"
#include "netlink.h"
+#include "group.h"
+#include "trace.h"
-#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
-#define CONN_PROBING_INTERVAL msecs_to_jiffies(3600000) /* [ms] => 1 h */
-#define TIPC_FWD_MSG 1
+#define NAGLE_START_INIT 4
+#define NAGLE_START_MAX 1024
+#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
+#define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
#define TIPC_MAX_PORT 0xffffffff
#define TIPC_MIN_PORT 1
-#define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */
+#define TIPC_ACK_RATE 4 /* ACK at 1/4 of rcv window size */
enum {
TIPC_LISTEN = TCP_LISTEN,
@@ -61,43 +66,57 @@ enum {
TIPC_CONNECTING = TCP_SYN_SENT,
};
+struct sockaddr_pair {
+ struct sockaddr_tipc sock;
+ struct sockaddr_tipc member;
+};
+
/**
* struct tipc_sock - TIPC socket structure
* @sk: socket - interacts with 'port' and with user via the socket API
- * @conn_type: TIPC type used when connection was established
- * @conn_instance: TIPC instance used when connection was established
- * @published: non-zero if port has one or more associated names
* @max_pkt: maximum packet size "hint" used when building messages sent by port
+ * @maxnagle: maximum size of msg which can be subject to nagle
* @portid: unique port identity in TIPC socket hash table
* @phdr: preformatted message header used when sending messages
- * #cong_links: list of congested links
+ * @cong_links: list of congested links
* @publications: list of publications for port
- * @blocking_link: address of the congested link we are currently sleeping on
* @pub_count: total # of publications port has made during its lifetime
- * @probing_state:
* @conn_timeout: the time we can wait for an unresponded setup request
+ * @probe_unacked: probe has not received ack yet
* @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
* @cong_link_cnt: number of congested links
- * @sent_unacked: # messages sent by socket, and not yet acked by peer
+ * @snt_unacked: # messages sent by socket, and not yet acked by peer
+ * @snd_win: send window size
+ * @peer_caps: peer capabilities mask
* @rcv_unacked: # messages read by user, but not yet acked back to peer
+ * @rcv_win: receive window size
* @peer: 'connected' peer for dgram/rdm
* @node: hash table node
* @mc_method: cookie for use between socket and broadcast layer
* @rcu: rcu struct for tipc_sock
+ * @group: TIPC communications group
+ * @oneway: message count in one direction (FIXME)
+ * @nagle_start: current nagle value
+ * @snd_backlog: send backlog count
+ * @msg_acc: messages accepted; used in managing backlog and nagle
+ * @pkt_cnt: TIPC socket packet count
+ * @expect_ack: whether this TIPC socket is expecting an ack
+ * @nodelay: setsockopt() TIPC_NODELAY setting
+ * @group_is_open: TIPC socket group is fully open (FIXME)
+ * @published: true if port has one or more associated names
+ * @conn_addrtype: address type used when establishing connection
*/
struct tipc_sock {
struct sock sk;
- u32 conn_type;
- u32 conn_instance;
- int published;
u32 max_pkt;
+ u32 maxnagle;
u32 portid;
struct tipc_msg phdr;
struct list_head cong_links;
struct list_head publications;
u32 pub_count;
- uint conn_timeout;
atomic_t dupl_rcvcnt;
+ u16 conn_timeout;
bool probe_unacked;
u16 cong_link_cnt;
u16 snt_unacked;
@@ -109,25 +128,35 @@ struct tipc_sock {
struct rhash_head node;
struct tipc_mc_method mc_method;
struct rcu_head rcu;
+ struct tipc_group *group;
+ u32 oneway;
+ u32 nagle_start;
+ u16 snd_backlog;
+ u16 msg_acc;
+ u16 pkt_cnt;
+ bool expect_ack;
+ bool nodelay;
+ bool group_is_open;
+ bool published;
+ u8 conn_addrtype;
};
-static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
static void tipc_data_ready(struct sock *sk);
static void tipc_write_space(struct sock *sk);
static void tipc_sock_destruct(struct sock *sk);
static int tipc_release(struct socket *sock);
-static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
- bool kern);
-static void tipc_sk_timeout(unsigned long data);
-static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
- struct tipc_name_seq const *seq);
-static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
- struct tipc_name_seq const *seq);
+static void tipc_sk_timeout(struct timer_list *t);
+static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua);
+static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua);
+static int tipc_sk_leave(struct tipc_sock *tsk);
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
static int tipc_sk_insert(struct tipc_sock *tsk);
static void tipc_sk_remove(struct tipc_sock *tsk);
static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
+static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack);
+static int tipc_wait_for_connect(struct socket *sock, long *timeo_p);
static const struct proto_ops packet_ops;
static const struct proto_ops stream_ops;
@@ -175,22 +204,27 @@ static int tsk_importance(struct tipc_sock *tsk)
return msg_importance(&tsk->phdr);
}
-static int tsk_set_importance(struct tipc_sock *tsk, int imp)
+static struct tipc_sock *tipc_sk(const struct sock *sk)
+{
+ return container_of(sk, struct tipc_sock, sk);
+}
+
+int tsk_set_importance(struct sock *sk, int imp)
{
if (imp > TIPC_CRITICAL_IMPORTANCE)
return -EINVAL;
- msg_set_importance(&tsk->phdr, (u32)imp);
+ msg_set_importance(&tipc_sk(sk)->phdr, (u32)imp);
return 0;
}
-static struct tipc_sock *tipc_sk(const struct sock *sk)
+static bool tsk_conn_cong(struct tipc_sock *tsk)
{
- return container_of(sk, struct tipc_sock, sk);
+ return tsk->snt_unacked > tsk->snd_win;
}
-static bool tsk_conn_cong(struct tipc_sock *tsk)
+static u16 tsk_blocks(int len)
{
- return tsk->snt_unacked > tsk->snd_win;
+ return ((len / FLOWCTL_BLK_SZ) + 1);
}
/* tsk_blocks(): translate a buffer size in bytes to number of
@@ -213,13 +247,35 @@ static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
return 1;
}
+/* tsk_set_nagle - enable/disable nagle property by manipulating maxnagle
+ */
+static void tsk_set_nagle(struct tipc_sock *tsk)
+{
+ struct sock *sk = &tsk->sk;
+
+ tsk->maxnagle = 0;
+ if (sk->sk_type != SOCK_STREAM)
+ return;
+ if (tsk->nodelay)
+ return;
+ if (!(tsk->peer_caps & TIPC_NAGLE))
+ return;
+ /* Limit node local buffer size to avoid receive queue overflow */
+ if (tsk->max_pkt == MAX_MSG_SIZE)
+ tsk->maxnagle = 1500;
+ else
+ tsk->maxnagle = tsk->max_pkt;
+}
+
/**
* tsk_advance_rx_queue - discard first buffer in socket receive queue
+ * @sk: network socket
*
* Caller must hold socket lock
*/
static void tsk_advance_rx_queue(struct sock *sk)
{
+ trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
}
@@ -234,6 +290,7 @@ static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
if (!tipc_msg_reverse(onode, &skb, err))
return;
+ trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
dnode = msg_destnode(buf_msg(skb));
selector = msg_origport(buf_msg(skb));
tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
@@ -241,20 +298,22 @@ static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
/**
* tsk_rej_rx_queue - reject all buffers in socket receive queue
+ * @sk: network socket
+ * @error: response error code
*
* Caller must hold socket lock
*/
-static void tsk_rej_rx_queue(struct sock *sk)
+static void tsk_rej_rx_queue(struct sock *sk, int error)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
- tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
+ tipc_sk_respond(sk, skb, error);
}
-static bool tipc_sk_connected(struct sock *sk)
+static bool tipc_sk_connected(const struct sock *sk)
{
- return sk->sk_state == TIPC_ESTABLISHED;
+ return READ_ONCE(sk->sk_state) == TIPC_ESTABLISHED;
}
/* tipc_sk_type_connectionless - check if the socket is datagram socket
@@ -275,10 +334,9 @@ static bool tipc_sk_type_connectionless(struct sock *sk)
static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
{
struct sock *sk = &tsk->sk;
- struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
+ u32 self = tipc_own_addr(sock_net(sk));
u32 peer_port = tsk_peer_port(tsk);
- u32 orig_node;
- u32 peer_node;
+ u32 orig_node, peer_node;
if (unlikely(!tipc_sk_connected(sk)))
return false;
@@ -292,10 +350,10 @@ static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
if (likely(orig_node == peer_node))
return true;
- if (!orig_node && (peer_node == tn->own_addr))
+ if (!orig_node && peer_node == self)
return true;
- if (!peer_node && (orig_node == tn->own_addr))
+ if (!peer_node && orig_node == self)
return true;
return false;
@@ -364,16 +422,18 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout)
#define tipc_wait_for_cond(sock_, timeo_, condition_) \
({ \
+ DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
struct sock *sk_; \
int rc_; \
\
while ((rc_ = !(condition_))) { \
- DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
+ /* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \
+ smp_rmb(); \
sk_ = (sock_)->sk; \
rc_ = tipc_sk_sock_err((sock_), timeo_); \
if (rc_) \
break; \
- prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \
+ add_wait_queue(sk_sleep(sk_), &wait_); \
release_sock(sk_); \
*(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
sched_annotate_sleep(); \
@@ -393,12 +453,11 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout)
* This routine creates additional data structures used by the TIPC socket,
* initializes them, and links them together.
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*/
static int tipc_sk_create(struct net *net, struct socket *sock,
int protocol, int kern)
{
- struct tipc_net *tn;
const struct proto_ops *ops;
struct sock *sk;
struct tipc_sock *tsk;
@@ -430,16 +489,18 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
tsk = tipc_sk(sk);
tsk->max_pkt = MAX_PKT_DEFAULT;
+ tsk->maxnagle = 0;
+ tsk->nagle_start = NAGLE_START_INIT;
INIT_LIST_HEAD(&tsk->publications);
INIT_LIST_HEAD(&tsk->cong_links);
msg = &tsk->phdr;
- tn = net_generic(sock_net(sk), tipc_net_id);
/* Finish initializing socket data structures */
sock->ops = ops;
sock_init_data(sock, sk);
tipc_set_sk_state(sk, TIPC_OPEN);
if (tipc_sk_insert(tsk)) {
+ sk_free(sk);
pr_warn("Socket create failed; port number exhausted\n");
return -EINVAL;
}
@@ -447,18 +508,19 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
/* Ensure tsk is visible before we read own_addr. */
smp_mb();
- tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
- NAMED_H_SIZE, 0);
+ tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
+ TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
msg_set_origport(msg, tsk->portid);
- setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
+ timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
sk->sk_shutdown = 0;
- sk->sk_backlog_rcv = tipc_backlog_rcv;
- sk->sk_rcvbuf = sysctl_tipc_rmem[1];
+ sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
+ sk->sk_rcvbuf = READ_ONCE(sysctl_tipc_rmem[1]);
sk->sk_data_ready = tipc_data_ready;
sk->sk_write_space = tipc_write_space;
sk->sk_destruct = tipc_sock_destruct;
tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
+ tsk->group_is_open = true;
atomic_set(&tsk->dupl_rcvcnt, 0);
/* Start out with safe limits until we receive an advertised window */
@@ -470,7 +532,8 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
if (sock->type == SOCK_DGRAM)
tsk_set_unreliable(tsk, true);
}
-
+ __skb_queue_head_init(&tsk->mc_method.deferredq);
+ trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
return 0;
}
@@ -487,7 +550,7 @@ static void __tipc_shutdown(struct socket *sock, int error)
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
struct net *net = sock_net(sk);
- long timeout = CONN_TIMEOUT_DEFAULT;
+ long timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
u32 dnode = tsk_peer_node(tsk);
struct sk_buff *skb;
@@ -495,34 +558,50 @@ static void __tipc_shutdown(struct socket *sock, int error)
tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
!tsk_conn_cong(tsk)));
- /* Reject all unreceived messages, except on an active connection
- * (which disconnects locally & sends a 'FIN+' to peer).
- */
- while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
- if (TIPC_SKB_CB(skb)->bytes_read) {
- kfree_skb(skb);
- continue;
- }
- if (!tipc_sk_type_connectionless(sk) &&
- sk->sk_state != TIPC_DISCONNECTING) {
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
- tipc_node_remove_conn(net, dnode, tsk->portid);
- }
- tipc_sk_respond(sk, skb, error);
+ /* Push out delayed messages if in Nagle mode */
+ tipc_sk_push_backlog(tsk, false);
+ /* Remove pending SYN */
+ __skb_queue_purge(&sk->sk_write_queue);
+
+ /* Remove partially received buffer if any */
+ skb = skb_peek(&sk->sk_receive_queue);
+ if (skb && TIPC_SKB_CB(skb)->bytes_read) {
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ kfree_skb(skb);
}
- if (tipc_sk_type_connectionless(sk))
+ /* Reject all unreceived messages if connectionless */
+ if (tipc_sk_type_connectionless(sk)) {
+ tsk_rej_rx_queue(sk, error);
return;
+ }
- if (sk->sk_state != TIPC_DISCONNECTING) {
+ switch (sk->sk_state) {
+ case TIPC_CONNECTING:
+ case TIPC_ESTABLISHED:
+ tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+ tipc_node_remove_conn(net, dnode, tsk->portid);
+ /* Send a FIN+/- to its peer */
+ skb = __skb_dequeue(&sk->sk_receive_queue);
+ if (skb) {
+ __skb_queue_purge(&sk->sk_receive_queue);
+ tipc_sk_respond(sk, skb, error);
+ break;
+ }
skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
tsk_own_node(tsk), tsk_peer_port(tsk),
tsk->portid, error);
if (skb)
tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
- tipc_node_remove_conn(net, dnode, tsk->portid);
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+ break;
+ case TIPC_LISTEN:
+ /* Reject all SYN messages */
+ tsk_rej_rx_queue(sk, error);
+ break;
+ default:
+ __skb_queue_purge(&sk->sk_receive_queue);
+ break;
}
}
@@ -540,7 +619,7 @@ static void __tipc_shutdown(struct socket *sock, int error)
* are returned or discarded according to the "destination droppable" setting
* specified for the message by the sender.
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*/
static int tipc_release(struct socket *sock)
{
@@ -557,15 +636,19 @@ static int tipc_release(struct socket *sock)
tsk = tipc_sk(sk);
lock_sock(sk);
+ trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
__tipc_shutdown(sock, TIPC_ERR_NO_PORT);
sk->sk_shutdown = SHUTDOWN_MASK;
- tipc_sk_withdraw(tsk, 0, NULL);
+ tipc_sk_leave(tsk);
+ tipc_sk_withdraw(tsk, NULL);
+ __skb_queue_purge(&tsk->mc_method.deferredq);
sk_stop_timer(sk, &sk->sk_timer);
tipc_sk_remove(tsk);
+ sock_orphan(sk);
/* Reject any messages that accumulated in backlog queue */
release_sock(sk);
- u32_list_purge(&tsk->cong_links);
+ tipc_dest_list_purge(&tsk->cong_links);
tsk->cong_link_cnt = 0;
call_rcu(&tsk->rcu, tipc_sk_callback);
sock->sk = NULL;
@@ -574,85 +657,96 @@ static int tipc_release(struct socket *sock)
}
/**
- * tipc_bind - associate or disassocate TIPC name(s) with a socket
+ * __tipc_bind - associate or disassociate TIPC name(s) with a socket
* @sock: socket structure
- * @uaddr: socket address describing name(s) and desired operation
- * @uaddr_len: size of socket address data structure
+ * @skaddr: socket address describing name(s) and desired operation
+ * @alen: size of socket address data structure
*
- * Name and name sequence binding is indicated using a positive scope value;
+ * Name and name sequence binding are indicated using a positive scope value;
* a negative scope value unbinds the specified name. Specifying no name
* (i.e. a socket address length of 0) unbinds all names from the socket.
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*
* NOTE: This routine doesn't need to take the socket lock since it doesn't
* access any non-constant socket information.
*/
-static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
- int uaddr_len)
+static int __tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
{
- struct sock *sk = sock->sk;
- struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
- struct tipc_sock *tsk = tipc_sk(sk);
- int res = -EINVAL;
+ struct tipc_uaddr *ua = (struct tipc_uaddr *)skaddr;
+ struct tipc_sock *tsk = tipc_sk(sock->sk);
+ bool unbind = false;
- lock_sock(sk);
- if (unlikely(!uaddr_len)) {
- res = tipc_sk_withdraw(tsk, 0, NULL);
- goto exit;
- }
+ if (unlikely(!alen))
+ return tipc_sk_withdraw(tsk, NULL);
- if (uaddr_len < sizeof(struct sockaddr_tipc)) {
- res = -EINVAL;
- goto exit;
+ if (ua->addrtype == TIPC_SERVICE_ADDR) {
+ ua->addrtype = TIPC_SERVICE_RANGE;
+ ua->sr.upper = ua->sr.lower;
}
- if (addr->family != AF_TIPC) {
- res = -EAFNOSUPPORT;
- goto exit;
+ if (ua->scope < 0) {
+ unbind = true;
+ ua->scope = -ua->scope;
}
+ /* Users may still use deprecated TIPC_ZONE_SCOPE */
+ if (ua->scope != TIPC_NODE_SCOPE)
+ ua->scope = TIPC_CLUSTER_SCOPE;
- if (addr->addrtype == TIPC_ADDR_NAME)
- addr->addr.nameseq.upper = addr->addr.nameseq.lower;
- else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
- res = -EAFNOSUPPORT;
- goto exit;
- }
+ if (tsk->group)
+ return -EACCES;
- if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
- (addr->addr.nameseq.type != TIPC_TOP_SRV) &&
- (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
- res = -EACCES;
- goto exit;
- }
+ if (unbind)
+ return tipc_sk_withdraw(tsk, ua);
+ return tipc_sk_publish(tsk, ua);
+}
- res = (addr->scope > 0) ?
- tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
- tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
-exit:
- release_sock(sk);
+int tipc_sk_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
+{
+ int res;
+
+ lock_sock(sock->sk);
+ res = __tipc_bind(sock, skaddr, alen);
+ release_sock(sock->sk);
return res;
}
+static int tipc_bind(struct socket *sock, struct sockaddr_unsized *skaddr, int alen)
+{
+ struct tipc_uaddr *ua = (struct tipc_uaddr *)skaddr;
+ u32 atype = ua->addrtype;
+
+ if (alen) {
+ if (!tipc_uaddr_valid(ua, alen))
+ return -EINVAL;
+ if (atype == TIPC_SOCKET_ADDR)
+ return -EAFNOSUPPORT;
+ if (ua->sr.type < TIPC_RESERVED_TYPES) {
+ pr_warn_once("Can't bind to reserved service type %u\n",
+ ua->sr.type);
+ return -EACCES;
+ }
+ }
+ return tipc_sk_bind(sock, (struct sockaddr *)skaddr, alen);
+}
+
/**
* tipc_getname - get port ID of socket or peer socket
* @sock: socket structure
* @uaddr: area for returned socket address
- * @uaddr_len: area for returned length of socket address
* @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*
* NOTE: This routine doesn't need to take the socket lock since it only
* accesses socket information that is unchanging (or which changes in
* a completely predictable manner).
*/
static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
- int *uaddr_len, int peer)
+ int peer)
{
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
- struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
memset(addr, 0, sizeof(*addr));
if (peer) {
@@ -663,16 +757,15 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
addr->addr.id.node = tsk_peer_node(tsk);
} else {
addr->addr.id.ref = tsk->portid;
- addr->addr.id.node = tn->own_addr;
+ addr->addr.id.node = tipc_own_addr(sock_net(sk));
}
- *uaddr_len = sizeof(*addr);
- addr->addrtype = TIPC_ADDR_ID;
+ addr->addrtype = TIPC_SOCKET_ADDR;
addr->family = AF_TIPC;
addr->scope = 0;
addr->addr.name.domain = 0;
- return 0;
+ return sizeof(*addr);
}
/**
@@ -681,7 +774,7 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
* @sock: socket for which to calculate the poll bits
* @wait: ???
*
- * Returns pollmask value
+ * Return: pollmask value
*
* COMMENTARY:
* It appears that the usual socket locking mechanisms are not useful here
@@ -693,57 +786,59 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
* imply that the operation will succeed, merely that it should be performed
* and will not block.
*/
-static unsigned int tipc_poll(struct file *file, struct socket *sock,
+static __poll_t tipc_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
- u32 mask = 0;
+ __poll_t revents = 0;
- sock_poll_wait(file, sk_sleep(sk), wait);
+ sock_poll_wait(file, sock, wait);
+ trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
if (sk->sk_shutdown & RCV_SHUTDOWN)
- mask |= POLLRDHUP | POLLIN | POLLRDNORM;
+ revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
- mask |= POLLHUP;
+ revents |= EPOLLHUP;
switch (sk->sk_state) {
case TIPC_ESTABLISHED:
if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
- mask |= POLLOUT;
- /* fall thru' */
+ revents |= EPOLLOUT;
+ fallthrough;
case TIPC_LISTEN:
case TIPC_CONNECTING:
- if (!skb_queue_empty(&sk->sk_receive_queue))
- mask |= (POLLIN | POLLRDNORM);
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+ revents |= EPOLLIN | EPOLLRDNORM;
break;
case TIPC_OPEN:
- if (!tsk->cong_link_cnt)
- mask |= POLLOUT;
- if (tipc_sk_type_connectionless(sk) &&
- (!skb_queue_empty(&sk->sk_receive_queue)))
- mask |= (POLLIN | POLLRDNORM);
+ if (tsk->group_is_open && !tsk->cong_link_cnt)
+ revents |= EPOLLOUT;
+ if (!tipc_sk_type_connectionless(sk))
+ break;
+ if (skb_queue_empty_lockless(&sk->sk_receive_queue))
+ break;
+ revents |= EPOLLIN | EPOLLRDNORM;
break;
case TIPC_DISCONNECTING:
- mask = (POLLIN | POLLRDNORM | POLLHUP);
+ revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
break;
}
-
- return mask;
+ return revents;
}
/**
* tipc_sendmcast - send multicast message
* @sock: socket structure
- * @seq: destination address
+ * @ua: destination address struct
* @msg: message to send
* @dlen: length of data to send
* @timeout: timeout to wait for wakeup
*
* Called from function tipc_sendmsg(), which has done all sanity checks
- * Returns the number of bytes sent on success, or errno
+ * Return: the number of bytes sent on success, or errno
*/
-static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
+static int tipc_sendmcast(struct socket *sock, struct tipc_uaddr *ua,
struct msghdr *msg, size_t dlen, long timeout)
{
struct sock *sk = sock->sk;
@@ -751,12 +846,13 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
struct tipc_msg *hdr = &tsk->phdr;
struct net *net = sock_net(sk);
int mtu = tipc_bcast_get_mtu(net);
- struct tipc_mc_method *method = &tsk->mc_method;
- u32 domain = addr_domain(net, TIPC_CLUSTER_SCOPE);
struct sk_buff_head pkts;
struct tipc_nlist dsts;
int rc;
+ if (tsk->group)
+ return -EACCES;
+
/* Block or return if any destination link is congested */
rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
if (unlikely(rc))
@@ -764,8 +860,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
/* Lookup destination nodes */
tipc_nlist_init(&dsts, tipc_own_addr(net));
- tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
- seq->upper, domain, &dsts);
+ tipc_nametbl_lookup_mcast_nodes(net, ua, &dsts);
if (!dsts.local && !dsts.remote)
return -EHOSTUNREACH;
@@ -775,18 +870,21 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
msg_set_destport(hdr, 0);
msg_set_destnode(hdr, 0);
- msg_set_nametype(hdr, seq->type);
- msg_set_namelower(hdr, seq->lower);
- msg_set_nameupper(hdr, seq->upper);
+ msg_set_nametype(hdr, ua->sr.type);
+ msg_set_namelower(hdr, ua->sr.lower);
+ msg_set_nameupper(hdr, ua->sr.upper);
/* Build message as chain of buffers */
- skb_queue_head_init(&pkts);
+ __skb_queue_head_init(&pkts);
rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
/* Send message if build was successful */
- if (unlikely(rc == dlen))
- rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
+ if (unlikely(rc == dlen)) {
+ trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
+ TIPC_DUMP_SK_SNDQ, " ");
+ rc = tipc_mcast_xmit(net, &pkts, &tsk->mc_method, &dsts,
&tsk->cong_link_cnt);
+ }
tipc_nlist_purge(&dsts);
@@ -794,7 +892,296 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
}
/**
+ * tipc_send_group_msg - send a message to a member in the group
+ * @net: network namespace
+ * @tsk: tipc socket
+ * @m: message to send
+ * @mb: group member
+ * @dnode: destination node
+ * @dport: destination port
+ * @dlen: total length of message data
+ */
+static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
+ struct msghdr *m, struct tipc_member *mb,
+ u32 dnode, u32 dport, int dlen)
+{
+ u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
+ struct tipc_mc_method *method = &tsk->mc_method;
+ int blks = tsk_blocks(GROUP_H_SIZE + dlen);
+ struct tipc_msg *hdr = &tsk->phdr;
+ struct sk_buff_head pkts;
+ int mtu, rc;
+
+ /* Complete message header */
+ msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
+ msg_set_hdr_sz(hdr, GROUP_H_SIZE);
+ msg_set_destport(hdr, dport);
+ msg_set_destnode(hdr, dnode);
+ msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
+
+ /* Build message as chain of buffers */
+ __skb_queue_head_init(&pkts);
+ mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
+ rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
+ if (unlikely(rc != dlen))
+ return rc;
+
+ /* Send message */
+ rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
+ if (unlikely(rc == -ELINKCONG)) {
+ tipc_dest_push(&tsk->cong_links, dnode, 0);
+ tsk->cong_link_cnt++;
+ }
+
+ /* Update send window */
+ tipc_group_update_member(mb, blks);
+
+ /* A broadcast sent within next EXPIRE period must follow same path */
+ method->rcast = true;
+ method->mandatory = true;
+ return dlen;
+}
+
+/**
+ * tipc_send_group_unicast - send message to a member in the group
+ * @sock: socket structure
+ * @m: message to send
+ * @dlen: total length of message data
+ * @timeout: timeout to wait for wakeup
+ *
+ * Called from function tipc_sendmsg(), which has done all sanity checks
+ * Return: the number of bytes sent on success, or errno
+ */
+static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
+ int dlen, long timeout)
+{
+ struct sock *sk = sock->sk;
+ struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
+ int blks = tsk_blocks(GROUP_H_SIZE + dlen);
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct net *net = sock_net(sk);
+ struct tipc_member *mb = NULL;
+ u32 node, port;
+ int rc;
+
+ node = ua->sk.node;
+ port = ua->sk.ref;
+ if (!port && !node)
+ return -EHOSTUNREACH;
+
+ /* Block or return if destination link or member is congested */
+ rc = tipc_wait_for_cond(sock, &timeout,
+ !tipc_dest_find(&tsk->cong_links, node, 0) &&
+ tsk->group &&
+ !tipc_group_cong(tsk->group, node, port, blks,
+ &mb));
+ if (unlikely(rc))
+ return rc;
+
+ if (unlikely(!mb))
+ return -EHOSTUNREACH;
+
+ rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
+
+ return rc ? rc : dlen;
+}
+
+/**
+ * tipc_send_group_anycast - send message to any member with given identity
+ * @sock: socket structure
+ * @m: message to send
+ * @dlen: total length of message data
+ * @timeout: timeout to wait for wakeup
+ *
+ * Called from function tipc_sendmsg(), which has done all sanity checks
+ * Return: the number of bytes sent on success, or errno
+ */
+static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
+ int dlen, long timeout)
+{
+ struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
+ struct sock *sk = sock->sk;
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct list_head *cong_links = &tsk->cong_links;
+ int blks = tsk_blocks(GROUP_H_SIZE + dlen);
+ struct tipc_msg *hdr = &tsk->phdr;
+ struct tipc_member *first = NULL;
+ struct tipc_member *mbr = NULL;
+ struct net *net = sock_net(sk);
+ u32 node, port, exclude;
+ LIST_HEAD(dsts);
+ int lookups = 0;
+ int dstcnt, rc;
+ bool cong;
+
+ ua->sa.type = msg_nametype(hdr);
+ ua->scope = msg_lookup_scope(hdr);
+
+ while (++lookups < 4) {
+ exclude = tipc_group_exclude(tsk->group);
+
+ first = NULL;
+
+ /* Look for a non-congested destination member, if any */
+ while (1) {
+ if (!tipc_nametbl_lookup_group(net, ua, &dsts, &dstcnt,
+ exclude, false))
+ return -EHOSTUNREACH;
+ tipc_dest_pop(&dsts, &node, &port);
+ cong = tipc_group_cong(tsk->group, node, port, blks,
+ &mbr);
+ if (!cong)
+ break;
+ if (mbr == first)
+ break;
+ if (!first)
+ first = mbr;
+ }
+
+ /* Start over if destination was not in member list */
+ if (unlikely(!mbr))
+ continue;
+
+ if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
+ break;
+
+ /* Block or return if destination link or member is congested */
+ rc = tipc_wait_for_cond(sock, &timeout,
+ !tipc_dest_find(cong_links, node, 0) &&
+ tsk->group &&
+ !tipc_group_cong(tsk->group, node, port,
+ blks, &mbr));
+ if (unlikely(rc))
+ return rc;
+
+ /* Send, unless destination disappeared while waiting */
+ if (likely(mbr))
+ break;
+ }
+
+ if (unlikely(lookups >= 4))
+ return -EHOSTUNREACH;
+
+ rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
+
+ return rc ? rc : dlen;
+}
+
+/**
+ * tipc_send_group_bcast - send message to all members in communication group
+ * @sock: socket structure
+ * @m: message to send
+ * @dlen: total length of message data
+ * @timeout: timeout to wait for wakeup
+ *
+ * Called from function tipc_sendmsg(), which has done all sanity checks
+ * Return: the number of bytes sent on success, or errno
+ */
+static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
+ int dlen, long timeout)
+{
+ struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
+ struct sock *sk = sock->sk;
+ struct net *net = sock_net(sk);
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_nlist *dsts;
+ struct tipc_mc_method *method = &tsk->mc_method;
+ bool ack = method->mandatory && method->rcast;
+ int blks = tsk_blocks(MCAST_H_SIZE + dlen);
+ struct tipc_msg *hdr = &tsk->phdr;
+ int mtu = tipc_bcast_get_mtu(net);
+ struct sk_buff_head pkts;
+ int rc = -EHOSTUNREACH;
+
+ /* Block or return if any destination link or member is congested */
+ rc = tipc_wait_for_cond(sock, &timeout,
+ !tsk->cong_link_cnt && tsk->group &&
+ !tipc_group_bc_cong(tsk->group, blks));
+ if (unlikely(rc))
+ return rc;
+
+ dsts = tipc_group_dests(tsk->group);
+ if (!dsts->local && !dsts->remote)
+ return -EHOSTUNREACH;
+
+ /* Complete message header */
+ if (ua) {
+ msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
+ msg_set_nameinst(hdr, ua->sa.instance);
+ } else {
+ msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
+ msg_set_nameinst(hdr, 0);
+ }
+ msg_set_hdr_sz(hdr, GROUP_H_SIZE);
+ msg_set_destport(hdr, 0);
+ msg_set_destnode(hdr, 0);
+ msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
+
+ /* Avoid getting stuck with repeated forced replicasts */
+ msg_set_grp_bc_ack_req(hdr, ack);
+
+ /* Build message as chain of buffers */
+ __skb_queue_head_init(&pkts);
+ rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
+ if (unlikely(rc != dlen))
+ return rc;
+
+ /* Send message */
+ rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
+ if (unlikely(rc))
+ return rc;
+
+ /* Update broadcast sequence number and send windows */
+ tipc_group_update_bc_members(tsk->group, blks, ack);
+
+ /* Broadcast link is now free to choose method for next broadcast */
+ method->mandatory = false;
+ method->expires = jiffies;
+
+ return dlen;
+}
+
+/**
+ * tipc_send_group_mcast - send message to all members with given identity
+ * @sock: socket structure
+ * @m: message to send
+ * @dlen: total length of message data
+ * @timeout: timeout to wait for wakeup
+ *
+ * Called from function tipc_sendmsg(), which has done all sanity checks
+ * Return: the number of bytes sent on success, or errno
+ */
+static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
+ int dlen, long timeout)
+{
+ struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
+ struct sock *sk = sock->sk;
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_group *grp = tsk->group;
+ struct tipc_msg *hdr = &tsk->phdr;
+ struct net *net = sock_net(sk);
+ u32 dstcnt, exclude;
+ LIST_HEAD(dsts);
+
+ ua->sa.type = msg_nametype(hdr);
+ ua->scope = msg_lookup_scope(hdr);
+ exclude = tipc_group_exclude(grp);
+
+ if (!tipc_nametbl_lookup_group(net, ua, &dsts, &dstcnt, exclude, true))
+ return -EHOSTUNREACH;
+
+ if (dstcnt == 1) {
+ tipc_dest_pop(&dsts, &ua->sk.node, &ua->sk.ref);
+ return tipc_send_group_unicast(sock, m, dlen, timeout);
+ }
+
+ tipc_dest_list_purge(&dsts);
+ return tipc_send_group_bcast(sock, m, dlen, timeout);
+}
+
+/**
* tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
+ * @net: the associated network namespace
* @arrvq: queue with arriving messages, to be cloned after destination lookup
* @inputq: queue with cloned messages, delivered to socket after dest lookup
*
@@ -803,32 +1190,59 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
struct sk_buff_head *inputq)
{
- struct tipc_msg *msg;
- struct list_head dports;
- u32 portid;
- u32 scope = TIPC_CLUSTER_SCOPE;
- struct sk_buff_head tmpq;
- uint hsz;
+ u32 self = tipc_own_addr(net);
struct sk_buff *skb, *_skb;
+ u32 portid, onode;
+ struct sk_buff_head tmpq;
+ struct list_head dports;
+ struct tipc_msg *hdr;
+ struct tipc_uaddr ua;
+ int user, mtyp, hlen;
__skb_queue_head_init(&tmpq);
INIT_LIST_HEAD(&dports);
+ ua.addrtype = TIPC_SERVICE_RANGE;
+ /* tipc_skb_peek() increments the head skb's reference counter */
skb = tipc_skb_peek(arrvq, &inputq->lock);
for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
- msg = buf_msg(skb);
- hsz = skb_headroom(skb) + msg_hdr_sz(msg);
-
- if (in_own_node(net, msg_orignode(msg)))
- scope = TIPC_NODE_SCOPE;
-
- /* Create destination port list and message clones: */
- tipc_nametbl_mc_translate(net,
- msg_nametype(msg), msg_namelower(msg),
- msg_nameupper(msg), scope, &dports);
- portid = u32_pop(&dports);
- for (; portid; portid = u32_pop(&dports)) {
- _skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
+ hdr = buf_msg(skb);
+ user = msg_user(hdr);
+ mtyp = msg_type(hdr);
+ hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
+ onode = msg_orignode(hdr);
+ ua.sr.type = msg_nametype(hdr);
+ ua.sr.lower = msg_namelower(hdr);
+ ua.sr.upper = msg_nameupper(hdr);
+ if (onode == self)
+ ua.scope = TIPC_ANY_SCOPE;
+ else
+ ua.scope = TIPC_CLUSTER_SCOPE;
+
+ if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
+ spin_lock_bh(&inputq->lock);
+ if (skb_peek(arrvq) == skb) {
+ __skb_dequeue(arrvq);
+ __skb_queue_tail(inputq, skb);
+ }
+ kfree_skb(skb);
+ spin_unlock_bh(&inputq->lock);
+ continue;
+ }
+
+ /* Group messages require exact scope match */
+ if (msg_in_group(hdr)) {
+ ua.sr.lower = 0;
+ ua.sr.upper = ~0;
+ ua.scope = msg_lookup_scope(hdr);
+ }
+
+ /* Create destination port list: */
+ tipc_nametbl_lookup_mcast_sockets(net, &ua, &dports);
+
+ /* Clone message per destination */
+ while (tipc_dest_pop(&dports, NULL, &portid)) {
+ _skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
if (_skb) {
msg_set_destport(buf_msg(_skb), portid);
__skb_queue_tail(&tmpq, _skb);
@@ -836,10 +1250,11 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
}
pr_warn("Failed to clone mcast rcv buffer\n");
}
- /* Append to inputq if not already done by other thread */
+ /* Append clones to inputq only if skb is still head of arrvq */
spin_lock_bh(&inputq->lock);
if (skb_peek(arrvq) == skb) {
skb_queue_splice_tail_init(&tmpq, inputq);
+ /* Decrement the skb's refcnt */
kfree_skb(__skb_dequeue(arrvq));
}
spin_unlock_bh(&inputq->lock);
@@ -849,30 +1264,94 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
tipc_sk_rcv(net, inputq);
}
+/* tipc_sk_push_backlog(): send accumulated buffers in socket write queue
+ * when socket is in Nagle mode
+ */
+static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack)
+{
+ struct sk_buff_head *txq = &tsk->sk.sk_write_queue;
+ struct sk_buff *skb = skb_peek_tail(txq);
+ struct net *net = sock_net(&tsk->sk);
+ u32 dnode = tsk_peer_node(tsk);
+ int rc;
+
+ if (nagle_ack) {
+ tsk->pkt_cnt += skb_queue_len(txq);
+ if (!tsk->pkt_cnt || tsk->msg_acc / tsk->pkt_cnt < 2) {
+ tsk->oneway = 0;
+ if (tsk->nagle_start < NAGLE_START_MAX)
+ tsk->nagle_start *= 2;
+ tsk->expect_ack = false;
+ pr_debug("tsk %10u: bad nagle %u -> %u, next start %u!\n",
+ tsk->portid, tsk->msg_acc, tsk->pkt_cnt,
+ tsk->nagle_start);
+ } else {
+ tsk->nagle_start = NAGLE_START_INIT;
+ if (skb) {
+ msg_set_ack_required(buf_msg(skb));
+ tsk->expect_ack = true;
+ } else {
+ tsk->expect_ack = false;
+ }
+ }
+ tsk->msg_acc = 0;
+ tsk->pkt_cnt = 0;
+ }
+
+ if (!skb || tsk->cong_link_cnt)
+ return;
+
+ /* Do not send SYN again after congestion */
+ if (msg_is_syn(buf_msg(skb)))
+ return;
+
+ if (tsk->msg_acc)
+ tsk->pkt_cnt += skb_queue_len(txq);
+ tsk->snt_unacked += tsk->snd_backlog;
+ tsk->snd_backlog = 0;
+ rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
+ if (rc == -ELINKCONG)
+ tsk->cong_link_cnt = 1;
+}
+
/**
- * tipc_sk_proto_rcv - receive a connection mng protocol message
+ * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
* @tsk: receiving socket
* @skb: pointer to message buffer.
+ * @inputq: buffer list containing the buffers
+ * @xmitq: output message area
*/
-static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
- struct sk_buff_head *xmitq)
+static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
+ struct sk_buff_head *inputq,
+ struct sk_buff_head *xmitq)
{
- struct sock *sk = &tsk->sk;
- u32 onode = tsk_own_node(tsk);
struct tipc_msg *hdr = buf_msg(skb);
+ u32 onode = tsk_own_node(tsk);
+ struct sock *sk = &tsk->sk;
int mtyp = msg_type(hdr);
- bool conn_cong;
+ bool was_cong;
/* Ignore if connection cannot be validated: */
- if (!tsk_peer_msg(tsk, hdr))
+ if (!tsk_peer_msg(tsk, hdr)) {
+ trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
goto exit;
+ }
if (unlikely(msg_errcode(hdr))) {
tipc_set_sk_state(sk, TIPC_DISCONNECTING);
tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
tsk_peer_port(tsk));
sk->sk_state_change(sk);
- goto exit;
+
+ /* State change is ignored if socket already awake,
+ * - convert msg to abort msg and add to inqueue
+ */
+ msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
+ msg_set_type(hdr, TIPC_CONN_MSG);
+ msg_set_size(hdr, BASIC_H_SIZE);
+ msg_set_hdr_sz(hdr, BASIC_H_SIZE);
+ __skb_queue_tail(inputq, skb);
+ return;
}
tsk->probe_unacked = false;
@@ -883,11 +1362,12 @@ static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
__skb_queue_tail(xmitq, skb);
return;
} else if (mtyp == CONN_ACK) {
- conn_cong = tsk_conn_cong(tsk);
+ was_cong = tsk_conn_cong(tsk);
+ tipc_sk_push_backlog(tsk, msg_nagle_ack(hdr));
tsk->snt_unacked -= msg_conn_ack(hdr);
if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
tsk->snd_win = msg_adv_win(hdr);
- if (conn_cong)
+ if (was_cong && !tsk_conn_cong(tsk))
sk->sk_write_space(sk);
} else if (mtyp != CONN_PROBE_REPLY) {
pr_warn("Received unknown CONN_PROTO msg\n");
@@ -907,7 +1387,7 @@ exit:
* and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
* (Note: 'SYN+' is prohibited on SOCK_STREAM.)
*
- * Returns the number of bytes sent on success, or errno otherwise
+ * Return: the number of bytes sent on success, or errno otherwise
*/
static int tipc_sendmsg(struct socket *sock,
struct msghdr *m, size_t dsz)
@@ -927,31 +1407,44 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct tipc_sock *tsk = tipc_sk(sk);
- DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
+ struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name;
long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
struct list_head *clinks = &tsk->cong_links;
bool syn = !tipc_sk_type_connectionless(sk);
+ struct tipc_group *grp = tsk->group;
struct tipc_msg *hdr = &tsk->phdr;
- struct tipc_name_seq *seq;
+ struct tipc_socket_addr skaddr;
struct sk_buff_head pkts;
- u32 type, inst, domain;
- u32 dnode, dport;
- int mtu, rc;
+ int atype, mtu, rc;
if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
return -EMSGSIZE;
- if (unlikely(!dest)) {
- dest = &tsk->peer;
- if (!syn || dest->family != AF_TIPC)
- return -EDESTADDRREQ;
- }
-
- if (unlikely(m->msg_namelen < sizeof(*dest)))
+ if (ua) {
+ if (!tipc_uaddr_valid(ua, m->msg_namelen))
+ return -EINVAL;
+ atype = ua->addrtype;
+ }
+
+ /* If socket belongs to a communication group follow other paths */
+ if (grp) {
+ if (!ua)
+ return tipc_send_group_bcast(sock, m, dlen, timeout);
+ if (atype == TIPC_SERVICE_ADDR)
+ return tipc_send_group_anycast(sock, m, dlen, timeout);
+ if (atype == TIPC_SOCKET_ADDR)
+ return tipc_send_group_unicast(sock, m, dlen, timeout);
+ if (atype == TIPC_SERVICE_RANGE)
+ return tipc_send_group_mcast(sock, m, dlen, timeout);
return -EINVAL;
+ }
- if (unlikely(dest->family != AF_TIPC))
- return -EINVAL;
+ if (!ua) {
+ ua = (struct tipc_uaddr *)&tsk->peer;
+ if (!syn && ua->family != AF_TIPC)
+ return -EDESTADDRREQ;
+ atype = ua->addrtype;
+ }
if (unlikely(syn)) {
if (sk->sk_state == TIPC_LISTEN)
@@ -960,61 +1453,75 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
return -EISCONN;
if (tsk->published)
return -EOPNOTSUPP;
- if (dest->addrtype == TIPC_ADDR_NAME) {
- tsk->conn_type = dest->addr.name.name.type;
- tsk->conn_instance = dest->addr.name.name.instance;
- }
+ if (atype == TIPC_SERVICE_ADDR)
+ tsk->conn_addrtype = atype;
+ msg_set_syn(hdr, 1);
}
- seq = &dest->addr.nameseq;
- if (dest->addrtype == TIPC_ADDR_MCAST)
- return tipc_sendmcast(sock, seq, m, dlen, timeout);
+ memset(&skaddr, 0, sizeof(skaddr));
- if (dest->addrtype == TIPC_ADDR_NAME) {
- type = dest->addr.name.name.type;
- inst = dest->addr.name.name.instance;
- domain = dest->addr.name.domain;
- dnode = domain;
- msg_set_type(hdr, TIPC_NAMED_MSG);
- msg_set_hdr_sz(hdr, NAMED_H_SIZE);
- msg_set_nametype(hdr, type);
- msg_set_nameinst(hdr, inst);
- msg_set_lookup_scope(hdr, tipc_addr_scope(domain));
- dport = tipc_nametbl_translate(net, type, inst, &dnode);
- msg_set_destnode(hdr, dnode);
- msg_set_destport(hdr, dport);
- if (unlikely(!dport && !dnode))
+ /* Determine destination */
+ if (atype == TIPC_SERVICE_RANGE) {
+ return tipc_sendmcast(sock, ua, m, dlen, timeout);
+ } else if (atype == TIPC_SERVICE_ADDR) {
+ skaddr.node = ua->lookup_node;
+ ua->scope = tipc_node2scope(skaddr.node);
+ if (!tipc_nametbl_lookup_anycast(net, ua, &skaddr))
return -EHOSTUNREACH;
-
- } else if (dest->addrtype == TIPC_ADDR_ID) {
- dnode = dest->addr.id.node;
- msg_set_type(hdr, TIPC_DIRECT_MSG);
- msg_set_lookup_scope(hdr, 0);
- msg_set_destnode(hdr, dnode);
- msg_set_destport(hdr, dest->addr.id.ref);
- msg_set_hdr_sz(hdr, BASIC_H_SIZE);
+ } else if (atype == TIPC_SOCKET_ADDR) {
+ skaddr = ua->sk;
+ } else {
+ return -EINVAL;
}
/* Block or return if destination link is congested */
- rc = tipc_wait_for_cond(sock, &timeout, !u32_find(clinks, dnode));
+ rc = tipc_wait_for_cond(sock, &timeout,
+ !tipc_dest_find(clinks, skaddr.node, 0));
if (unlikely(rc))
return rc;
- skb_queue_head_init(&pkts);
- mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
+ /* Finally build message header */
+ msg_set_destnode(hdr, skaddr.node);
+ msg_set_destport(hdr, skaddr.ref);
+ if (atype == TIPC_SERVICE_ADDR) {
+ msg_set_type(hdr, TIPC_NAMED_MSG);
+ msg_set_hdr_sz(hdr, NAMED_H_SIZE);
+ msg_set_nametype(hdr, ua->sa.type);
+ msg_set_nameinst(hdr, ua->sa.instance);
+ msg_set_lookup_scope(hdr, ua->scope);
+ } else { /* TIPC_SOCKET_ADDR */
+ msg_set_type(hdr, TIPC_DIRECT_MSG);
+ msg_set_lookup_scope(hdr, 0);
+ msg_set_hdr_sz(hdr, BASIC_H_SIZE);
+ }
+
+ /* Add message body */
+ __skb_queue_head_init(&pkts);
+ mtu = tipc_node_get_mtu(net, skaddr.node, tsk->portid, true);
rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
if (unlikely(rc != dlen))
return rc;
+ if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) {
+ __skb_queue_purge(&pkts);
+ return -ENOMEM;
+ }
- rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
+ /* Send message */
+ trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
+ rc = tipc_node_xmit(net, &pkts, skaddr.node, tsk->portid);
if (unlikely(rc == -ELINKCONG)) {
- u32_push(clinks, dnode);
+ tipc_dest_push(clinks, skaddr.node, 0);
tsk->cong_link_cnt++;
rc = 0;
}
- if (unlikely(syn && !rc))
+ if (unlikely(syn && !rc)) {
tipc_set_sk_state(sk, TIPC_CONNECTING);
+ if (dlen && timeout) {
+ timeout = msecs_to_jiffies(timeout);
+ tipc_wait_for_connect(sock, &timeout);
+ }
+ }
return rc ? rc : dlen;
}
@@ -1027,7 +1534,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
*
* Used for SOCK_STREAM data.
*
- * Returns the number of bytes sent on success (or partial success),
+ * Return: the number of bytes sent on success (or partial success),
* or errno if no data sent
*/
static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
@@ -1047,24 +1554,27 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
struct sock *sk = sock->sk;
DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
+ struct sk_buff_head *txq = &sk->sk_write_queue;
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_msg *hdr = &tsk->phdr;
struct net *net = sock_net(sk);
- struct sk_buff_head pkts;
+ struct sk_buff *skb;
u32 dnode = tsk_peer_node(tsk);
+ int maxnagle = tsk->maxnagle;
+ int maxpkt = tsk->max_pkt;
int send, sent = 0;
- int rc = 0;
-
- skb_queue_head_init(&pkts);
+ int blocks, rc = 0;
if (unlikely(dlen > INT_MAX))
return -EMSGSIZE;
/* Handle implicit connection setup */
- if (unlikely(dest)) {
+ if (unlikely(dest && sk->sk_state == TIPC_OPEN)) {
rc = __tipc_sendmsg(sock, m, dlen);
- if (dlen && (dlen == rc))
+ if (dlen && dlen == rc) {
+ tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
+ }
return rc;
}
@@ -1075,19 +1585,48 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
tipc_sk_connected(sk)));
if (unlikely(rc))
break;
-
send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
- rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
- if (unlikely(rc != send))
- break;
-
- rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
+ blocks = tsk->snd_backlog;
+ if (tsk->oneway++ >= tsk->nagle_start && maxnagle &&
+ send <= maxnagle) {
+ rc = tipc_msg_append(hdr, m, send, maxnagle, txq);
+ if (unlikely(rc < 0))
+ break;
+ blocks += rc;
+ tsk->msg_acc++;
+ if (blocks <= 64 && tsk->expect_ack) {
+ tsk->snd_backlog = blocks;
+ sent += send;
+ break;
+ } else if (blocks > 64) {
+ tsk->pkt_cnt += skb_queue_len(txq);
+ } else {
+ skb = skb_peek_tail(txq);
+ if (skb) {
+ msg_set_ack_required(buf_msg(skb));
+ tsk->expect_ack = true;
+ } else {
+ tsk->expect_ack = false;
+ }
+ tsk->msg_acc = 0;
+ tsk->pkt_cnt = 0;
+ }
+ } else {
+ rc = tipc_msg_build(hdr, m, sent, send, maxpkt, txq);
+ if (unlikely(rc != send))
+ break;
+ blocks += tsk_inc(tsk, send + MIN_H_SIZE);
+ }
+ trace_tipc_sk_sendstream(sk, skb_peek(txq),
+ TIPC_DUMP_SK_SNDQ, " ");
+ rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
if (unlikely(rc == -ELINKCONG)) {
tsk->cong_link_cnt = 1;
rc = 0;
}
if (likely(!rc)) {
- tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
+ tsk->snt_unacked += blocks;
+ tsk->snd_backlog = 0;
sent += send;
}
} while (sent < dlen && !rc);
@@ -1103,7 +1642,7 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
*
* Used for SOCK_SEQPACKET messages.
*
- * Returns the number of bytes sent on success, or errno otherwise
+ * Return: the number of bytes sent on success, or errno otherwise
*/
static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
{
@@ -1122,17 +1661,20 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
struct net *net = sock_net(sk);
struct tipc_msg *msg = &tsk->phdr;
+ msg_set_syn(msg, 0);
msg_set_destnode(msg, peer_node);
msg_set_destport(msg, peer_port);
msg_set_type(msg, TIPC_CONN_MSG);
msg_set_lookup_scope(msg, 0);
msg_set_hdr_sz(msg, SHORT_H_SIZE);
- sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
tipc_set_sk_state(sk, TIPC_ESTABLISHED);
tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
- tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
+ tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid, true);
tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
+ tsk_set_nagle(tsk);
+ __skb_queue_purge(&sk->sk_write_queue);
if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
return;
@@ -1142,115 +1684,123 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
}
/**
- * set_orig_addr - capture sender's address for received message
+ * tipc_sk_set_orig_addr - capture sender's address for received message
* @m: descriptor for message info
- * @msg: received message header
+ * @skb: received message
*
* Note: Address is not captured if not requested by receiver.
*/
-static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
+static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
{
- DECLARE_SOCKADDR(struct sockaddr_tipc *, addr, m->msg_name);
+ DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
+ struct tipc_msg *hdr = buf_msg(skb);
- if (addr) {
- addr->family = AF_TIPC;
- addr->addrtype = TIPC_ADDR_ID;
- memset(&addr->addr, 0, sizeof(addr->addr));
- addr->addr.id.ref = msg_origport(msg);
- addr->addr.id.node = msg_orignode(msg);
- addr->addr.name.domain = 0; /* could leave uninitialized */
- addr->scope = 0; /* could leave uninitialized */
- m->msg_namelen = sizeof(struct sockaddr_tipc);
- }
+ if (!srcaddr)
+ return;
+
+ srcaddr->sock.family = AF_TIPC;
+ srcaddr->sock.addrtype = TIPC_SOCKET_ADDR;
+ srcaddr->sock.scope = 0;
+ srcaddr->sock.addr.id.ref = msg_origport(hdr);
+ srcaddr->sock.addr.id.node = msg_orignode(hdr);
+ srcaddr->sock.addr.name.domain = 0;
+ m->msg_namelen = sizeof(struct sockaddr_tipc);
+
+ if (!msg_in_group(hdr))
+ return;
+
+ /* Group message users may also want to know sending member's id */
+ srcaddr->member.family = AF_TIPC;
+ srcaddr->member.addrtype = TIPC_SERVICE_ADDR;
+ srcaddr->member.scope = 0;
+ srcaddr->member.addr.name.name.type = msg_nametype(hdr);
+ srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
+ srcaddr->member.addr.name.domain = 0;
+ m->msg_namelen = sizeof(*srcaddr);
}
/**
* tipc_sk_anc_data_recv - optionally capture ancillary data for received message
* @m: descriptor for message info
- * @msg: received message header
+ * @skb: received message buffer
* @tsk: TIPC port associated with message
*
* Note: Ancillary data is not captured if not requested by receiver.
*
- * Returns 0 if successful, otherwise errno
+ * Return: 0 if successful, otherwise errno
*/
-static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
+static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
struct tipc_sock *tsk)
{
- u32 anc_data[3];
- u32 err;
- u32 dest_type;
- int has_name;
- int res;
+ struct tipc_msg *hdr;
+ u32 data[3] = {0,};
+ bool has_addr;
+ int dlen, rc;
if (likely(m->msg_controllen == 0))
return 0;
- /* Optionally capture errored message object(s) */
- err = msg ? msg_errcode(msg) : 0;
- if (unlikely(err)) {
- anc_data[0] = err;
- anc_data[1] = msg_data_sz(msg);
- res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
- if (res)
- return res;
- if (anc_data[1]) {
- res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
- msg_data(msg));
- if (res)
- return res;
- }
- }
+ hdr = buf_msg(skb);
+ dlen = msg_data_sz(hdr);
- /* Optionally capture message destination object */
- dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
- switch (dest_type) {
+ /* Capture errored message object, if any */
+ if (msg_errcode(hdr)) {
+ if (skb_linearize(skb))
+ return -ENOMEM;
+ hdr = buf_msg(skb);
+ data[0] = msg_errcode(hdr);
+ data[1] = dlen;
+ rc = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, data);
+ if (rc || !dlen)
+ return rc;
+ rc = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, dlen, msg_data(hdr));
+ if (rc)
+ return rc;
+ }
+
+ /* Capture TIPC_SERVICE_ADDR/RANGE destination address, if any */
+ switch (msg_type(hdr)) {
case TIPC_NAMED_MSG:
- has_name = 1;
- anc_data[0] = msg_nametype(msg);
- anc_data[1] = msg_namelower(msg);
- anc_data[2] = msg_namelower(msg);
+ has_addr = true;
+ data[0] = msg_nametype(hdr);
+ data[1] = msg_namelower(hdr);
+ data[2] = data[1];
break;
case TIPC_MCAST_MSG:
- has_name = 1;
- anc_data[0] = msg_nametype(msg);
- anc_data[1] = msg_namelower(msg);
- anc_data[2] = msg_nameupper(msg);
+ has_addr = true;
+ data[0] = msg_nametype(hdr);
+ data[1] = msg_namelower(hdr);
+ data[2] = msg_nameupper(hdr);
break;
case TIPC_CONN_MSG:
- has_name = (tsk->conn_type != 0);
- anc_data[0] = tsk->conn_type;
- anc_data[1] = tsk->conn_instance;
- anc_data[2] = tsk->conn_instance;
+ has_addr = !!tsk->conn_addrtype;
+ data[0] = msg_nametype(&tsk->phdr);
+ data[1] = msg_nameinst(&tsk->phdr);
+ data[2] = data[1];
break;
default:
- has_name = 0;
+ has_addr = false;
}
- if (has_name) {
- res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
- if (res)
- return res;
- }
-
- return 0;
+ if (!has_addr)
+ return 0;
+ return put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, data);
}
-static void tipc_sk_send_ack(struct tipc_sock *tsk)
+static struct sk_buff *tipc_sk_build_ack(struct tipc_sock *tsk)
{
struct sock *sk = &tsk->sk;
- struct net *net = sock_net(sk);
struct sk_buff *skb = NULL;
struct tipc_msg *msg;
u32 peer_port = tsk_peer_port(tsk);
u32 dnode = tsk_peer_node(tsk);
if (!tipc_sk_connected(sk))
- return;
+ return NULL;
skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
dnode, tsk_own_node(tsk), peer_port,
tsk->portid, TIPC_OK);
if (!skb)
- return;
+ return NULL;
msg = buf_msg(skb);
msg_set_conn_ack(msg, tsk->rcv_unacked);
tsk->rcv_unacked = 0;
@@ -1260,13 +1810,25 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk)
tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
msg_set_adv_win(msg, tsk->rcv_win);
}
- tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
+ return skb;
+}
+
+static void tipc_sk_send_ack(struct tipc_sock *tsk)
+{
+ struct sk_buff *skb;
+
+ skb = tipc_sk_build_ack(tsk);
+ if (!skb)
+ return;
+
+ tipc_node_xmit_skb(sock_net(&tsk->sk), skb, tsk_peer_node(tsk),
+ msg_link_selector(buf_msg(skb)));
}
static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
{
struct sock *sk = sock->sk;
- DEFINE_WAIT(wait);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
long timeo = *timeop;
int err = sock_error(sk);
@@ -1274,15 +1836,17 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
return err;
for (;;) {
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
if (sk->sk_shutdown & RCV_SHUTDOWN) {
err = -ENOTCONN;
break;
}
+ add_wait_queue(sk_sleep(sk), &wait);
release_sock(sk);
- timeo = schedule_timeout(timeo);
+ timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
+ sched_annotate_sleep();
lock_sock(sk);
+ remove_wait_queue(sk_sleep(sk), &wait);
}
err = 0;
if (!skb_queue_empty(&sk->sk_receive_queue))
@@ -1298,13 +1862,13 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
if (err)
break;
}
- finish_wait(sk_sleep(sk), &wait);
*timeop = timeo;
return err;
}
/**
* tipc_recvmsg - receive packet-oriented message
+ * @sock: network socket
* @m: descriptor for message info
* @buflen: length of user buffer area
* @flags: receive flags
@@ -1312,17 +1876,20 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
* Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
* If the complete message doesn't fit in user area, truncate it.
*
- * Returns size of returned message data, errno otherwise
+ * Return: size of returned message data, errno otherwise
*/
static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
size_t buflen, int flags)
{
struct sock *sk = sock->sk;
- struct tipc_sock *tsk = tipc_sk(sk);
- struct sk_buff *skb;
- struct tipc_msg *hdr;
bool connected = !tipc_sk_type_connectionless(sk);
+ struct tipc_sock *tsk = tipc_sk(sk);
int rc, err, hlen, dlen, copy;
+ struct tipc_skb_cb *skb_cb;
+ struct sk_buff_head xmitq;
+ struct tipc_msg *hdr;
+ struct sk_buff *skb;
+ bool grp_evt;
long timeout;
/* Catch invalid receive requests */
@@ -1336,51 +1903,90 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
}
timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+ /* Step rcv queue to first msg with data or error; wait if necessary */
do {
- /* Look at first msg in receive queue; wait if necessary */
rc = tipc_wait_for_rcvmsg(sock, &timeout);
if (unlikely(rc))
goto exit;
skb = skb_peek(&sk->sk_receive_queue);
+ skb_cb = TIPC_SKB_CB(skb);
hdr = buf_msg(skb);
dlen = msg_data_sz(hdr);
hlen = msg_hdr_sz(hdr);
err = msg_errcode(hdr);
+ grp_evt = msg_is_grp_evt(hdr);
if (likely(dlen || err))
break;
tsk_advance_rx_queue(sk);
} while (1);
/* Collect msg meta data, including error code and rejected data */
- set_orig_addr(m, hdr);
- rc = tipc_sk_anc_data_recv(m, hdr, tsk);
+ tipc_sk_set_orig_addr(m, skb);
+ rc = tipc_sk_anc_data_recv(m, skb, tsk);
if (unlikely(rc))
goto exit;
+ hdr = buf_msg(skb);
/* Capture data if non-error msg, otherwise just set return value */
if (likely(!err)) {
- copy = min_t(int, dlen, buflen);
- if (unlikely(copy != dlen))
- m->msg_flags |= MSG_TRUNC;
- rc = skb_copy_datagram_msg(skb, hlen, m, copy);
+ int offset = skb_cb->bytes_read;
+
+ copy = min_t(int, dlen - offset, buflen);
+ rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
+ if (unlikely(rc))
+ goto exit;
+ if (unlikely(offset + copy < dlen)) {
+ if (flags & MSG_EOR) {
+ if (!(flags & MSG_PEEK))
+ skb_cb->bytes_read = offset + copy;
+ } else {
+ m->msg_flags |= MSG_TRUNC;
+ skb_cb->bytes_read = 0;
+ }
+ } else {
+ if (flags & MSG_EOR)
+ m->msg_flags |= MSG_EOR;
+ skb_cb->bytes_read = 0;
+ }
} else {
copy = 0;
rc = 0;
- if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
+ if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control) {
rc = -ECONNRESET;
+ goto exit;
+ }
+ }
+
+ /* Mark message as group event if applicable */
+ if (unlikely(grp_evt)) {
+ if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
+ m->msg_flags |= MSG_EOR;
+ m->msg_flags |= MSG_OOB;
+ copy = 0;
}
- if (unlikely(rc))
- goto exit;
/* Caption of data or error code/rejected data was successful */
if (unlikely(flags & MSG_PEEK))
goto exit;
+ /* Send group flow control advertisement when applicable */
+ if (tsk->group && msg_in_group(hdr) && !grp_evt) {
+ __skb_queue_head_init(&xmitq);
+ tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
+ msg_orignode(hdr), msg_origport(hdr),
+ &xmitq);
+ tipc_node_distr_xmit(sock_net(sk), &xmitq);
+ }
+
+ if (skb_cb->bytes_read)
+ goto exit;
+
tsk_advance_rx_queue(sk);
+
if (likely(!connected))
goto exit;
- /* Send connection flow control ack when applicable */
+ /* Send connection flow control advertisement when applicable */
tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
tipc_sk_send_ack(tsk);
@@ -1391,6 +1997,7 @@ exit:
/**
* tipc_recvstream - receive stream-oriented data
+ * @sock: network socket
* @m: descriptor for message info
* @buflen: total size of user buffer area
* @flags: receive flags
@@ -1398,7 +2005,7 @@ exit:
* Used for SOCK_STREAM messages only. If not enough data is available
* will optionally wait for more; never truncates data.
*
- * Returns size of returned message data, errno otherwise
+ * Return: size of returned message data, errno otherwise
*/
static int tipc_recvstream(struct socket *sock, struct msghdr *m,
size_t buflen, int flags)
@@ -1446,10 +2053,11 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
/* Collect msg meta data, incl. error code and rejected data */
if (!copied) {
- set_orig_addr(m, hdr);
- rc = tipc_sk_anc_data_recv(m, hdr, tsk);
+ tipc_sk_set_orig_addr(m, skb);
+ rc = tipc_sk_anc_data_recv(m, skb, tsk);
if (rc)
break;
+ hdr = buf_msg(skb);
}
/* Copy data if msg ok, otherwise return error/partial data */
@@ -1481,7 +2089,7 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
/* Send connection flow control advertisement when applicable */
tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
- if (unlikely(tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE))
+ if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
tipc_sk_send_ack(tsk);
/* Exit if all requested data or FIN/error received */
@@ -1505,25 +2113,26 @@ static void tipc_write_space(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
- wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
- POLLWRNORM | POLLWRBAND);
+ wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
+ EPOLLWRNORM | EPOLLWRBAND);
rcu_read_unlock();
}
/**
* tipc_data_ready - wake up threads to indicate messages have been received
* @sk: socket
- * @len: the length of messages
*/
static void tipc_data_ready(struct sock *sk)
{
struct socket_wq *wq;
+ trace_sk_data_ready(sk);
+
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
- wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
- POLLRDNORM | POLLRDBAND);
+ wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
+ EPOLLRDNORM | EPOLLRDBAND);
rcu_read_unlock();
}
@@ -1532,92 +2141,146 @@ static void tipc_sock_destruct(struct sock *sk)
__skb_queue_purge(&sk->sk_receive_queue);
}
+static void tipc_sk_proto_rcv(struct sock *sk,
+ struct sk_buff_head *inputq,
+ struct sk_buff_head *xmitq)
+{
+ struct sk_buff *skb = __skb_dequeue(inputq);
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_msg *hdr = buf_msg(skb);
+ struct tipc_group *grp = tsk->group;
+ bool wakeup = false;
+
+ switch (msg_user(hdr)) {
+ case CONN_MANAGER:
+ tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
+ return;
+ case SOCK_WAKEUP:
+ tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
+ /* coupled with smp_rmb() in tipc_wait_for_cond() */
+ smp_wmb();
+ tsk->cong_link_cnt--;
+ wakeup = true;
+ tipc_sk_push_backlog(tsk, false);
+ break;
+ case GROUP_PROTOCOL:
+ tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
+ break;
+ case TOP_SRV:
+ tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
+ hdr, inputq, xmitq);
+ break;
+ default:
+ break;
+ }
+
+ if (wakeup)
+ sk->sk_write_space(sk);
+
+ kfree_skb(skb);
+}
+
/**
- * filter_connect - Handle all incoming messages for a connection-based socket
+ * tipc_sk_filter_connect - check incoming message for a connection-based socket
* @tsk: TIPC socket
- * @skb: pointer to message buffer. Set to NULL if buffer is consumed
- *
- * Returns true if everything ok, false otherwise
+ * @skb: pointer to message buffer.
+ * @xmitq: for Nagle ACK if any
+ * Return: true if message should be added to receive queue, false otherwise
*/
-static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
+static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
{
struct sock *sk = &tsk->sk;
struct net *net = sock_net(sk);
struct tipc_msg *hdr = buf_msg(skb);
- u32 pport = msg_origport(hdr);
- u32 pnode = msg_orignode(hdr);
+ bool con_msg = msg_connected(hdr);
+ u32 pport = tsk_peer_port(tsk);
+ u32 pnode = tsk_peer_node(tsk);
+ u32 oport = msg_origport(hdr);
+ u32 onode = msg_orignode(hdr);
+ int err = msg_errcode(hdr);
+ unsigned long delay;
if (unlikely(msg_mcast(hdr)))
return false;
+ tsk->oneway = 0;
switch (sk->sk_state) {
case TIPC_CONNECTING:
- /* Accept only ACK or NACK message */
- if (unlikely(!msg_connected(hdr))) {
- if (pport != tsk_peer_port(tsk) ||
- pnode != tsk_peer_node(tsk))
- return false;
-
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
- sk->sk_err = ECONNREFUSED;
- sk->sk_state_change(sk);
- return true;
- }
-
- if (unlikely(msg_errcode(hdr))) {
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
- sk->sk_err = ECONNREFUSED;
- sk->sk_state_change(sk);
- return true;
- }
-
- if (unlikely(!msg_isdata(hdr))) {
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
- sk->sk_err = EINVAL;
+ /* Setup ACK */
+ if (likely(con_msg)) {
+ if (err)
+ break;
+ tipc_sk_finish_conn(tsk, oport, onode);
+ msg_set_importance(&tsk->phdr, msg_importance(hdr));
+ /* ACK+ message with data is added to receive queue */
+ if (msg_data_sz(hdr))
+ return true;
+ /* Empty ACK-, - wake up sleeping connect() and drop */
sk->sk_state_change(sk);
- return true;
+ msg_set_dest_droppable(hdr, 1);
+ return false;
}
+ /* Ignore connectionless message if not from listening socket */
+ if (oport != pport || onode != pnode)
+ return false;
- tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
- msg_set_importance(&tsk->phdr, msg_importance(hdr));
-
- /* If 'ACK+' message, add to socket receive queue */
- if (msg_data_sz(hdr))
- return true;
-
- /* If empty 'ACK-' message, wake up sleeping connect() */
- sk->sk_data_ready(sk);
+ /* Rejected SYN */
+ if (err != TIPC_ERR_OVERLOAD)
+ break;
- /* 'ACK-' message is neither accepted nor rejected: */
- msg_set_dest_droppable(hdr, 1);
+ /* Prepare for new setup attempt if we have a SYN clone */
+ if (skb_queue_empty(&sk->sk_write_queue))
+ break;
+ get_random_bytes(&delay, 2);
+ delay %= (tsk->conn_timeout / 4);
+ delay = msecs_to_jiffies(delay + 100);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
return false;
-
case TIPC_OPEN:
case TIPC_DISCONNECTING:
- break;
+ return false;
case TIPC_LISTEN:
/* Accept only SYN message */
- if (!msg_connected(hdr) && !(msg_errcode(hdr)))
+ if (!msg_is_syn(hdr) &&
+ tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
+ return false;
+ if (!con_msg && !err)
return true;
- break;
+ return false;
case TIPC_ESTABLISHED:
+ if (!skb_queue_empty(&sk->sk_write_queue))
+ tipc_sk_push_backlog(tsk, false);
/* Accept only connection-based messages sent by peer */
- if (unlikely(!tsk_peer_msg(tsk, hdr)))
- return false;
-
- if (unlikely(msg_errcode(hdr))) {
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
- /* Let timer expire on it's own */
- tipc_node_remove_conn(net, tsk_peer_node(tsk),
- tsk->portid);
- sk->sk_state_change(sk);
+ if (likely(con_msg && !err && pport == oport &&
+ pnode == onode)) {
+ if (msg_ack_required(hdr)) {
+ struct sk_buff *skb;
+
+ skb = tipc_sk_build_ack(tsk);
+ if (skb) {
+ msg_set_nagle_ack(buf_msg(skb));
+ __skb_queue_tail(xmitq, skb);
+ }
+ }
+ return true;
}
+ if (!tsk_peer_msg(tsk, hdr))
+ return false;
+ if (!err)
+ return true;
+ tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+ tipc_node_remove_conn(net, pnode, tsk->portid);
+ sk->sk_state_change(sk);
return true;
default:
pr_err("Unknown sk_state %u\n", sk->sk_state);
}
-
- return false;
+ /* Abort connection setup attempt */
+ tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+ sk->sk_err = ECONNREFUSED;
+ sk->sk_state_change(sk);
+ return true;
}
/**
@@ -1636,125 +2299,115 @@ static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
* TIPC_HIGH_IMPORTANCE (8 MB)
* TIPC_CRITICAL_IMPORTANCE (16 MB)
*
- * Returns overload limit according to corresponding message importance
+ * Return: overload limit according to corresponding message importance
*/
static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
{
struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_msg *hdr = buf_msg(skb);
+ if (unlikely(msg_in_group(hdr)))
+ return READ_ONCE(sk->sk_rcvbuf);
+
if (unlikely(!msg_connected(hdr)))
- return sk->sk_rcvbuf << msg_importance(hdr);
+ return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
- return sk->sk_rcvbuf;
+ return READ_ONCE(sk->sk_rcvbuf);
return FLOWCTL_MSG_LIM;
}
/**
- * filter_rcv - validate incoming message
+ * tipc_sk_filter_rcv - validate incoming message
* @sk: socket
* @skb: pointer to message.
+ * @xmitq: output message area (FIXME)
*
* Enqueues message on receive queue if acceptable; optionally handles
* disconnect indication for a connected socket.
*
* Called with socket lock already taken
- *
- * Returns true if message was added to socket receive queue, otherwise false
*/
-static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
- struct sk_buff_head *xmitq)
+static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
{
+ bool sk_conn = !tipc_sk_type_connectionless(sk);
struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_group *grp = tsk->group;
struct tipc_msg *hdr = buf_msg(skb);
- unsigned int limit = rcvbuf_limit(sk, skb);
- int err = TIPC_OK;
- int usr = msg_user(hdr);
- u32 onode;
+ struct net *net = sock_net(sk);
+ struct sk_buff_head inputq;
+ int mtyp = msg_type(hdr);
+ int limit, err = TIPC_OK;
- if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
- tipc_sk_proto_rcv(tsk, skb, xmitq);
- return false;
- }
+ trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
+ TIPC_SKB_CB(skb)->bytes_read = 0;
+ __skb_queue_head_init(&inputq);
+ __skb_queue_tail(&inputq, skb);
- if (unlikely(usr == SOCK_WAKEUP)) {
- onode = msg_orignode(hdr);
- kfree_skb(skb);
- u32_del(&tsk->cong_links, onode);
- tsk->cong_link_cnt--;
- sk->sk_write_space(sk);
- return false;
- }
+ if (unlikely(!msg_isdata(hdr)))
+ tipc_sk_proto_rcv(sk, &inputq, xmitq);
- /* Drop if illegal message type */
- if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) {
- kfree_skb(skb);
- return false;
- }
+ if (unlikely(grp))
+ tipc_group_filter_msg(grp, &inputq, xmitq);
- /* Reject if wrong message type for current socket state */
- if (tipc_sk_type_connectionless(sk)) {
- if (msg_connected(hdr)) {
+ if (unlikely(!grp) && mtyp == TIPC_MCAST_MSG)
+ tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq);
+
+ /* Validate and add to receive buffer if there is space */
+ while ((skb = __skb_dequeue(&inputq))) {
+ hdr = buf_msg(skb);
+ limit = rcvbuf_limit(sk, skb);
+ if ((sk_conn && !tipc_sk_filter_connect(tsk, skb, xmitq)) ||
+ (!sk_conn && msg_connected(hdr)) ||
+ (!grp && msg_in_group(hdr)))
err = TIPC_ERR_NO_PORT;
- goto reject;
+ else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
+ trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
+ "err_overload2!");
+ sk_drops_inc(sk);
+ err = TIPC_ERR_OVERLOAD;
}
- } else if (unlikely(!filter_connect(tsk, skb))) {
- err = TIPC_ERR_NO_PORT;
- goto reject;
- }
- /* Reject message if there isn't room to queue it */
- if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) {
- err = TIPC_ERR_OVERLOAD;
- goto reject;
+ if (unlikely(err)) {
+ if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) {
+ trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
+ "@filter_rcv!");
+ __skb_queue_tail(xmitq, skb);
+ }
+ err = TIPC_OK;
+ continue;
+ }
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+ skb_set_owner_r(skb, sk);
+ trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
+ "rcvq >90% allocated!");
+ sk->sk_data_ready(sk);
}
-
- /* Enqueue message */
- TIPC_SKB_CB(skb)->bytes_read = 0;
- __skb_queue_tail(&sk->sk_receive_queue, skb);
- skb_set_owner_r(skb, sk);
-
- sk->sk_data_ready(sk);
- return true;
-
-reject:
- if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
- __skb_queue_tail(xmitq, skb);
- return false;
}
/**
- * tipc_backlog_rcv - handle incoming message from backlog queue
+ * tipc_sk_backlog_rcv - handle incoming message from backlog queue
* @sk: socket
* @skb: message
*
* Caller must hold socket lock
- *
- * Returns 0
*/
-static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
- unsigned int truesize = skb->truesize;
+ unsigned int before = sk_rmem_alloc_get(sk);
struct sk_buff_head xmitq;
- u32 dnode, selector;
+ unsigned int added;
__skb_queue_head_init(&xmitq);
- if (likely(filter_rcv(sk, skb, &xmitq))) {
- atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
- return 0;
- }
-
- if (skb_queue_empty(&xmitq))
- return 0;
+ tipc_sk_filter_rcv(sk, skb, &xmitq);
+ added = sk_rmem_alloc_get(sk) - before;
+ atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
- /* Send response/rejected message */
- skb = __skb_dequeue(&xmitq);
- dnode = msg_destnode(buf_msg(skb));
- selector = msg_origport(buf_msg(skb));
- tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
+ /* Send pending response/rejected messages, if any */
+ tipc_node_distr_xmit(sock_net(sk), &xmitq);
return 0;
}
@@ -1764,13 +2417,14 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
* @inputq: list of incoming buffers with potentially different destinations
* @sk: socket where the buffers should be enqueued
* @dport: port number for the socket
+ * @xmitq: output queue
*
* Caller must hold socket lock
*/
static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
u32 dport, struct sk_buff_head *xmitq)
{
- unsigned long time_limit = jiffies + 2;
+ unsigned long time_limit = jiffies + usecs_to_jiffies(20000);
struct sk_buff *skb;
unsigned int lim;
atomic_t *dcnt;
@@ -1786,7 +2440,7 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
/* Add message directly to receive queue if possible */
if (!sock_owned_by_user(sk)) {
- filter_rcv(sk, skb, xmitq);
+ tipc_sk_filter_rcv(sk, skb, xmitq);
continue;
}
@@ -1795,19 +2449,28 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
if (!sk->sk_backlog.len)
atomic_set(dcnt, 0);
lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
- if (likely(!sk_add_backlog(sk, skb, lim)))
+ if (likely(!sk_add_backlog(sk, skb, lim))) {
+ trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
+ "bklg & rcvq >90% allocated!");
continue;
+ }
+ trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
/* Overload => reject message back to sender */
onode = tipc_own_addr(sock_net(sk));
- if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
+ sk_drops_inc(sk);
+ if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
+ trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
+ "@sk_enqueue!");
__skb_queue_tail(xmitq, skb);
+ }
break;
}
}
/**
* tipc_sk_rcv - handle a chain of incoming buffers
+ * @net: the associated network namespace
* @inputq: buffer list containing the buffers
* Consumes all buffers in list until inputq is empty
* Note: may be called in multiple threads referring to the same queue
@@ -1833,14 +2496,10 @@ void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
spin_unlock_bh(&sk->sk_lock.slock);
}
/* Send pending response/rejected messages, if any */
- while ((skb = __skb_dequeue(&xmitq))) {
- dnode = msg_destnode(buf_msg(skb));
- tipc_node_xmit_skb(net, skb, dnode, dport);
- }
+ tipc_node_distr_xmit(sock_net(sk), &xmitq);
sock_put(sk);
continue;
}
-
/* No destination socket => dequeue skb if still there */
skb = tipc_skb_dequeue(inputq, dport);
if (!skb)
@@ -1854,6 +2513,8 @@ void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
/* Prepare for message rejection */
if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
continue;
+
+ trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
xmit:
dnode = msg_destnode(buf_msg(skb));
tipc_node_xmit_skb(net, skb, dnode, dport);
@@ -1874,15 +2535,27 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
return -ETIMEDOUT;
if (signal_pending(current))
return sock_intr_errno(*timeo_p);
+ if (sk->sk_state == TIPC_DISCONNECTING)
+ break;
add_wait_queue(sk_sleep(sk), &wait);
- done = sk_wait_event(sk, timeo_p,
- sk->sk_state != TIPC_CONNECTING, &wait);
+ done = sk_wait_event(sk, timeo_p, tipc_sk_connected(sk),
+ &wait);
remove_wait_queue(sk_sleep(sk), &wait);
} while (!done);
return 0;
}
+static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
+{
+ if (addr->family != AF_TIPC)
+ return false;
+ if (addr->addrtype == TIPC_SERVICE_RANGE)
+ return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
+ return (addr->addrtype == TIPC_SERVICE_ADDR ||
+ addr->addrtype == TIPC_SOCKET_ADDR);
+}
+
/**
* tipc_connect - establish a connection to another TIPC port
* @sock: socket structure
@@ -1890,9 +2563,9 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
* @destlen: size of socket address data structure
* @flags: file-related flags associated with socket
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*/
-static int tipc_connect(struct socket *sock, struct sockaddr *dest,
+static int tipc_connect(struct socket *sock, struct sockaddr_unsized *dest,
int destlen, int flags)
{
struct sock *sk = sock->sk;
@@ -1903,27 +2576,31 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest,
int previous;
int res = 0;
+ if (destlen != sizeof(struct sockaddr_tipc))
+ return -EINVAL;
+
lock_sock(sk);
- /* DGRAM/RDM connect(), just save the destaddr */
- if (tipc_sk_type_connectionless(sk)) {
- if (dst->family == AF_UNSPEC) {
- memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
- } else if (destlen != sizeof(struct sockaddr_tipc)) {
- res = -EINVAL;
- } else {
- memcpy(&tsk->peer, dest, destlen);
- }
+ if (tsk->group) {
+ res = -EINVAL;
goto exit;
}
- /*
- * Reject connection attempt using multicast address
- *
- * Note: send_msg() validates the rest of the address fields,
- * so there's no need to do it here
- */
- if (dst->addrtype == TIPC_ADDR_MCAST) {
+ if (dst->family == AF_UNSPEC) {
+ memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
+ if (!tipc_sk_type_connectionless(sk))
+ res = -EINVAL;
+ goto exit;
+ }
+ if (!tipc_sockaddr_is_sane(dst)) {
+ res = -EINVAL;
+ goto exit;
+ }
+ /* DGRAM/RDM connect(), just save the destaddr */
+ if (tipc_sk_type_connectionless(sk)) {
+ memcpy(&tsk->peer, dest, destlen);
+ goto exit;
+ } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
res = -EINVAL;
goto exit;
}
@@ -1935,6 +2612,7 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest,
/* Send a 'SYN-' to destination */
m.msg_name = dest;
m.msg_namelen = destlen;
+ iov_iter_kvec(&m.msg_iter, ITER_SOURCE, NULL, 0, 0);
/* If connect is in non-blocking case, set MSG_DONTWAIT to
* indicate send_msg() is never blocked.
@@ -1951,7 +2629,7 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest,
* case is EINPROGRESS, rather than EALREADY.
*/
res = -EINPROGRESS;
- /* fall thru' */
+ fallthrough;
case TIPC_CONNECTING:
if (!timeout) {
if (previous == TIPC_CONNECTING)
@@ -1979,7 +2657,7 @@ exit:
* @sock: socket structure
* @len: (unused)
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*/
static int tipc_listen(struct socket *sock, int len)
{
@@ -1996,7 +2674,7 @@ static int tipc_listen(struct socket *sock, int len)
static int tipc_wait_for_accept(struct socket *sock, long timeo)
{
struct sock *sk = sock->sk;
- DEFINE_WAIT(wait);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
int err;
/* True wake-one mechanism for incoming connections: only
@@ -2005,12 +2683,12 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
* anymore, the common case will execute the loop only once.
*/
for (;;) {
- prepare_to_wait_exclusive(sk_sleep(sk), &wait,
- TASK_INTERRUPTIBLE);
if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
+ add_wait_queue(sk_sleep(sk), &wait);
release_sock(sk);
- timeo = schedule_timeout(timeo);
+ timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
lock_sock(sk);
+ remove_wait_queue(sk_sleep(sk), &wait);
}
err = 0;
if (!skb_queue_empty(&sk->sk_receive_queue))
@@ -2022,25 +2700,25 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
if (signal_pending(current))
break;
}
- finish_wait(sk_sleep(sk), &wait);
return err;
}
/**
* tipc_accept - wait for connection request
* @sock: listening socket
- * @newsock: new socket that is to be connected
- * @flags: file-related flags associated with socket
+ * @new_sock: new socket that is to be connected
+ * @arg: arguments for accept
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*/
-static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
- bool kern)
+static int tipc_accept(struct socket *sock, struct socket *new_sock,
+ struct proto_accept_arg *arg)
{
struct sock *new_sk, *sk = sock->sk;
- struct sk_buff *buf;
struct tipc_sock *new_tsock;
+ struct msghdr m = {NULL,};
struct tipc_msg *msg;
+ struct sk_buff *buf;
long timeo;
int res;
@@ -2050,14 +2728,14 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
res = -EINVAL;
goto exit;
}
- timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+ timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK);
res = tipc_wait_for_accept(sock, timeo);
if (res)
goto exit;
buf = skb_peek(&sk->sk_receive_queue);
- res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
+ res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, arg->kern);
if (res)
goto exit;
security_sk_clone(sock->sk, new_sock->sk);
@@ -2073,31 +2751,31 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
* Reject any stray messages received by new socket
* before the socket lock was taken (very, very unlikely)
*/
- tsk_rej_rx_queue(new_sk);
+ tsk_rej_rx_queue(new_sk, TIPC_ERR_NO_PORT);
/* Connect new socket to it's peer */
tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
- tsk_set_importance(new_tsock, msg_importance(msg));
+ tsk_set_importance(new_sk, msg_importance(msg));
if (msg_named(msg)) {
- new_tsock->conn_type = msg_nametype(msg);
- new_tsock->conn_instance = msg_nameinst(msg);
+ new_tsock->conn_addrtype = TIPC_SERVICE_ADDR;
+ msg_set_nametype(&new_tsock->phdr, msg_nametype(msg));
+ msg_set_nameinst(&new_tsock->phdr, msg_nameinst(msg));
}
/*
- * Respond to 'SYN-' by discarding it & returning 'ACK'-.
- * Respond to 'SYN+' by queuing it on new socket.
+ * Respond to 'SYN-' by discarding it & returning 'ACK'.
+ * Respond to 'SYN+' by queuing it on new socket & returning 'ACK'.
*/
if (!msg_data_sz(msg)) {
- struct msghdr m = {NULL,};
-
tsk_advance_rx_queue(sk);
- __tipc_sendstream(new_sock, &m, 0);
} else {
__skb_dequeue(&sk->sk_receive_queue);
__skb_queue_head(&new_sk->sk_receive_queue, buf);
skb_set_owner_r(buf, new_sk);
}
+ iov_iter_kvec(&m.msg_iter, ITER_SOURCE, NULL, 0, 0);
+ __tipc_sendstream(new_sock, &m, 0);
release_sock(new_sk);
exit:
release_sock(sk);
@@ -2111,7 +2789,7 @@ exit:
*
* Terminates connection (if necessary), then purges socket's receive queue.
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*/
static int tipc_shutdown(struct socket *sock, int how)
{
@@ -2123,73 +2801,108 @@ static int tipc_shutdown(struct socket *sock, int how)
lock_sock(sk);
+ trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
__tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
- sk->sk_shutdown = SEND_SHUTDOWN;
+ sk->sk_shutdown = SHUTDOWN_MASK;
if (sk->sk_state == TIPC_DISCONNECTING) {
/* Discard any unreceived messages */
__skb_queue_purge(&sk->sk_receive_queue);
- /* Wake up anyone sleeping in poll */
- sk->sk_state_change(sk);
res = 0;
} else {
res = -ENOTCONN;
}
+ /* Wake up anyone sleeping in poll. */
+ sk->sk_state_change(sk);
release_sock(sk);
return res;
}
-static void tipc_sk_timeout(unsigned long data)
+static void tipc_sk_check_probing_state(struct sock *sk,
+ struct sk_buff_head *list)
{
- struct tipc_sock *tsk = (struct tipc_sock *)data;
- struct sock *sk = &tsk->sk;
- struct sk_buff *skb = NULL;
- u32 peer_port, peer_node;
- u32 own_node = tsk_own_node(tsk);
+ struct tipc_sock *tsk = tipc_sk(sk);
+ u32 pnode = tsk_peer_node(tsk);
+ u32 pport = tsk_peer_port(tsk);
+ u32 self = tsk_own_node(tsk);
+ u32 oport = tsk->portid;
+ struct sk_buff *skb;
- bh_lock_sock(sk);
- if (!tipc_sk_connected(sk)) {
- bh_unlock_sock(sk);
- goto exit;
+ if (tsk->probe_unacked) {
+ tipc_set_sk_state(sk, TIPC_DISCONNECTING);
+ sk->sk_err = ECONNABORTED;
+ tipc_node_remove_conn(sock_net(sk), pnode, pport);
+ sk->sk_state_change(sk);
+ return;
}
- peer_port = tsk_peer_port(tsk);
- peer_node = tsk_peer_node(tsk);
+ /* Prepare new probe */
+ skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
+ pnode, self, pport, oport, TIPC_OK);
+ if (skb)
+ __skb_queue_tail(list, skb);
+ tsk->probe_unacked = true;
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
+}
- if (tsk->probe_unacked) {
- if (!sock_owned_by_user(sk)) {
- tipc_set_sk_state(sk, TIPC_DISCONNECTING);
- tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
- tsk_peer_port(tsk));
- sk->sk_state_change(sk);
- } else {
- /* Try again later */
- sk_reset_timer(sk, &sk->sk_timer, (HZ / 20));
- }
+static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
+{
+ struct tipc_sock *tsk = tipc_sk(sk);
+
+ /* Try again later if dest link is congested */
+ if (tsk->cong_link_cnt) {
+ sk_reset_timer(sk, &sk->sk_timer,
+ jiffies + msecs_to_jiffies(100));
+ return;
+ }
+ /* Prepare SYN for retransmit */
+ tipc_msg_skb_clone(&sk->sk_write_queue, list);
+}
+
+static void tipc_sk_timeout(struct timer_list *t)
+{
+ struct sock *sk = timer_container_of(sk, t, sk_timer);
+ struct tipc_sock *tsk = tipc_sk(sk);
+ u32 pnode = tsk_peer_node(tsk);
+ struct sk_buff_head list;
+ int rc = 0;
+ __skb_queue_head_init(&list);
+ bh_lock_sock(sk);
+
+ /* Try again later if socket is busy */
+ if (sock_owned_by_user(sk)) {
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
bh_unlock_sock(sk);
- goto exit;
+ sock_put(sk);
+ return;
}
- skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
- INT_H_SIZE, 0, peer_node, own_node,
- peer_port, tsk->portid, TIPC_OK);
- tsk->probe_unacked = true;
- sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTERVAL);
+ if (sk->sk_state == TIPC_ESTABLISHED)
+ tipc_sk_check_probing_state(sk, &list);
+ else if (sk->sk_state == TIPC_CONNECTING)
+ tipc_sk_retry_connect(sk, &list);
+
bh_unlock_sock(sk);
- if (skb)
- tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
-exit:
+
+ if (!skb_queue_empty(&list))
+ rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
+
+ /* SYN messages may cause link congestion */
+ if (rc == -ELINKCONG) {
+ tipc_dest_push(&tsk->cong_links, pnode, 0);
+ tsk->cong_link_cnt = 1;
+ }
sock_put(sk);
}
-static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
- struct tipc_name_seq const *seq)
+static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua)
{
struct sock *sk = &tsk->sk;
struct net *net = sock_net(sk);
- struct publication *publ;
+ struct tipc_socket_addr skaddr;
+ struct publication *p;
u32 key;
if (tipc_sk_connected(sk))
@@ -2197,47 +2910,49 @@ static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
key = tsk->portid + tsk->pub_count + 1;
if (key == tsk->portid)
return -EADDRINUSE;
-
- publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
- scope, tsk->portid, key);
- if (unlikely(!publ))
+ skaddr.ref = tsk->portid;
+ skaddr.node = tipc_own_addr(net);
+ p = tipc_nametbl_publish(net, ua, &skaddr, key);
+ if (unlikely(!p))
return -EINVAL;
- list_add(&publ->pport_list, &tsk->publications);
+ list_add(&p->binding_sock, &tsk->publications);
tsk->pub_count++;
- tsk->published = 1;
+ tsk->published = true;
return 0;
}
-static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
- struct tipc_name_seq const *seq)
+static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua)
{
struct net *net = sock_net(&tsk->sk);
- struct publication *publ;
- struct publication *safe;
+ struct publication *safe, *p;
+ struct tipc_uaddr _ua;
int rc = -EINVAL;
- list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
- if (seq) {
- if (publ->scope != scope)
- continue;
- if (publ->type != seq->type)
- continue;
- if (publ->lower != seq->lower)
- continue;
- if (publ->upper != seq->upper)
- break;
- tipc_nametbl_withdraw(net, publ->type, publ->lower,
- publ->ref, publ->key);
- rc = 0;
- break;
+ list_for_each_entry_safe(p, safe, &tsk->publications, binding_sock) {
+ if (!ua) {
+ tipc_uaddr(&_ua, TIPC_SERVICE_RANGE, p->scope,
+ p->sr.type, p->sr.lower, p->sr.upper);
+ tipc_nametbl_withdraw(net, &_ua, &p->sk, p->key);
+ continue;
}
- tipc_nametbl_withdraw(net, publ->type, publ->lower,
- publ->ref, publ->key);
+ /* Unbind specific publication */
+ if (p->scope != ua->scope)
+ continue;
+ if (p->sr.type != ua->sr.type)
+ continue;
+ if (p->sr.lower != ua->sr.lower)
+ continue;
+ if (p->sr.upper != ua->sr.upper)
+ break;
+ tipc_nametbl_withdraw(net, ua, &p->sk, p->key);
rc = 0;
+ break;
}
- if (list_empty(&tsk->publications))
+ if (list_empty(&tsk->publications)) {
tsk->published = 0;
+ rc = 0;
+ }
return rc;
}
@@ -2254,20 +2969,24 @@ void tipc_sk_reinit(struct net *net)
rhashtable_walk_enter(&tn->sk_rht, &iter);
do {
- tsk = ERR_PTR(rhashtable_walk_start(&iter));
- if (tsk)
- continue;
+ rhashtable_walk_start(&iter);
while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
- spin_lock_bh(&tsk->sk.sk_lock.slock);
+ sock_hold(&tsk->sk);
+ rhashtable_walk_stop(&iter);
+ lock_sock(&tsk->sk);
msg = &tsk->phdr;
- msg_set_prevnode(msg, tn->own_addr);
- msg_set_orignode(msg, tn->own_addr);
- spin_unlock_bh(&tsk->sk.sk_lock.slock);
+ msg_set_prevnode(msg, tipc_own_addr(net));
+ msg_set_orignode(msg, tipc_own_addr(net));
+ release_sock(&tsk->sk);
+ rhashtable_walk_start(&iter);
+ sock_put(&tsk->sk);
}
rhashtable_walk_stop(&iter);
} while (tsk == ERR_PTR(-EAGAIN));
+
+ rhashtable_walk_exit(&iter);
}
static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
@@ -2276,7 +2995,7 @@ static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
struct tipc_sock *tsk;
rcu_read_lock();
- tsk = rhashtable_lookup_fast(&tn->sk_rht, &portid, tsk_rht_params);
+ tsk = rhashtable_lookup(&tn->sk_rht, &portid, tsk_rht_params);
if (tsk)
sock_hold(&tsk->sk);
rcu_read_unlock();
@@ -2290,7 +3009,7 @@ static int tipc_sk_insert(struct tipc_sock *tsk)
struct net *net = sock_net(sk);
struct tipc_net *tn = net_generic(net, tipc_net_id);
u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
- u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
+ u32 portid = get_random_u32_below(remaining) + TIPC_MIN_PORT;
while (remaining--) {
portid++;
@@ -2312,10 +3031,8 @@ static void tipc_sk_remove(struct tipc_sock *tsk)
struct sock *sk = &tsk->sk;
struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
- if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
- WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
+ if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params))
__sock_put(sk);
- }
}
static const struct rhashtable_params tsk_rht_params = {
@@ -2345,6 +3062,63 @@ void tipc_sk_rht_destroy(struct net *net)
rhashtable_destroy(&tn->sk_rht);
}
+static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
+{
+ struct net *net = sock_net(&tsk->sk);
+ struct tipc_group *grp = tsk->group;
+ struct tipc_msg *hdr = &tsk->phdr;
+ struct tipc_uaddr ua;
+ int rc;
+
+ if (mreq->type < TIPC_RESERVED_TYPES)
+ return -EACCES;
+ if (mreq->scope > TIPC_NODE_SCOPE)
+ return -EINVAL;
+ if (mreq->scope != TIPC_NODE_SCOPE)
+ mreq->scope = TIPC_CLUSTER_SCOPE;
+ if (grp)
+ return -EACCES;
+ grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
+ if (!grp)
+ return -ENOMEM;
+ tsk->group = grp;
+ msg_set_lookup_scope(hdr, mreq->scope);
+ msg_set_nametype(hdr, mreq->type);
+ msg_set_dest_droppable(hdr, true);
+ tipc_uaddr(&ua, TIPC_SERVICE_RANGE, mreq->scope,
+ mreq->type, mreq->instance, mreq->instance);
+ tipc_nametbl_build_group(net, grp, &ua);
+ rc = tipc_sk_publish(tsk, &ua);
+ if (rc) {
+ tipc_group_delete(net, grp);
+ tsk->group = NULL;
+ return rc;
+ }
+ /* Eliminate any risk that a broadcast overtakes sent JOINs */
+ tsk->mc_method.rcast = true;
+ tsk->mc_method.mandatory = true;
+ tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
+ return rc;
+}
+
+static int tipc_sk_leave(struct tipc_sock *tsk)
+{
+ struct net *net = sock_net(&tsk->sk);
+ struct tipc_group *grp = tsk->group;
+ struct tipc_uaddr ua;
+ int scope;
+
+ if (!grp)
+ return -EINVAL;
+ ua.addrtype = TIPC_SERVICE_RANGE;
+ tipc_group_self(grp, &ua.sr, &scope);
+ ua.scope = scope;
+ tipc_group_delete(net, grp);
+ tsk->group = NULL;
+ tipc_sk_withdraw(tsk, &ua);
+ return 0;
+}
+
/**
* tipc_setsockopt - set socket option
* @sock: socket structure
@@ -2356,13 +3130,14 @@ void tipc_sk_rht_destroy(struct net *net)
* For stream sockets only, accepts and ignores all IPPROTO_TCP options
* (to ease compatibility).
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*/
static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
- char __user *ov, unsigned int ol)
+ sockptr_t ov, unsigned int ol)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
+ struct tipc_group_req mreq;
u32 value = 0;
int res = 0;
@@ -2376,14 +3151,20 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
case TIPC_SRC_DROPPABLE:
case TIPC_DEST_DROPPABLE:
case TIPC_CONN_TIMEOUT:
+ case TIPC_NODELAY:
if (ol < sizeof(value))
return -EINVAL;
- res = get_user(value, (u32 __user *)ov);
- if (res)
- return res;
+ if (copy_from_sockptr(&value, ov, sizeof(u32)))
+ return -EFAULT;
+ break;
+ case TIPC_GROUP_JOIN:
+ if (ol < sizeof(mreq))
+ return -EINVAL;
+ if (copy_from_sockptr(&mreq, ov, sizeof(mreq)))
+ return -EFAULT;
break;
default:
- if (ov || ol)
+ if (!sockptr_is_null(ov) || ol)
return -EINVAL;
}
@@ -2391,7 +3172,7 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
switch (opt) {
case TIPC_IMPORTANCE:
- res = tsk_set_importance(tsk, value);
+ res = tsk_set_importance(sk, value);
break;
case TIPC_SRC_DROPPABLE:
if (sock->type != SOCK_STREAM)
@@ -2413,6 +3194,16 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
tsk->mc_method.rcast = true;
tsk->mc_method.mandatory = true;
break;
+ case TIPC_GROUP_JOIN:
+ res = tipc_sk_join(tsk, &mreq);
+ break;
+ case TIPC_GROUP_LEAVE:
+ res = tipc_sk_leave(tsk);
+ break;
+ case TIPC_NODELAY:
+ tsk->nodelay = !!value;
+ tsk_set_nagle(tsk);
+ break;
default:
res = -EINVAL;
}
@@ -2433,14 +3224,15 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
* For stream sockets only, returns 0 length result for all IPPROTO_TCP options
* (to ease compatibility).
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*/
static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
char __user *ov, int __user *ol)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
- int len;
+ struct tipc_service_range seq;
+ int len, scope;
u32 value;
int res;
@@ -2474,6 +3266,15 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
case TIPC_SOCK_RECVQ_DEPTH:
value = skb_queue_len(&sk->sk_receive_queue);
break;
+ case TIPC_SOCK_RECVQ_USED:
+ value = sk_rmem_alloc_get(sk);
+ break;
+ case TIPC_GROUP_JOIN:
+ seq.type = 0;
+ if (tsk->group)
+ tipc_group_self(tsk->group, &seq, &scope);
+ value = seq.type;
+ break;
default:
res = -EINVAL;
}
@@ -2494,7 +3295,8 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
- struct sock *sk = sock->sk;
+ struct net *net = sock_net(sock->sk);
+ struct tipc_sioc_nodeid_req nr = {0};
struct tipc_sioc_ln_req lnr;
void __user *argp = (void __user *)arg;
@@ -2502,7 +3304,7 @@ static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
case SIOCGETLINKNAME:
if (copy_from_user(&lnr, argp, sizeof(lnr)))
return -EFAULT;
- if (!tipc_node_get_linkname(sock_net(sk),
+ if (!tipc_node_get_linkname(net,
lnr.bearer_id & 0xffff, lnr.peer,
lnr.linkname, TIPC_MAX_LINK_NAME)) {
if (copy_to_user(argp, &lnr, sizeof(lnr)))
@@ -2510,6 +3312,14 @@ static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
return 0;
}
return -EADDRNOTAVAIL;
+ case SIOCGETNODEID:
+ if (copy_from_user(&nr, argp, sizeof(nr)))
+ return -EFAULT;
+ if (!tipc_node_get_id(net, nr.peer, nr.node_id))
+ return -EADDRNOTAVAIL;
+ if (copy_to_user(argp, &nr, sizeof(nr)))
+ return -EFAULT;
+ return 0;
default:
return -ENOIOCTLCMD;
}
@@ -2522,12 +3332,12 @@ static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
u32 onode = tipc_own_addr(sock_net(sock1->sk));
tsk1->peer.family = AF_TIPC;
- tsk1->peer.addrtype = TIPC_ADDR_ID;
+ tsk1->peer.addrtype = TIPC_SOCKET_ADDR;
tsk1->peer.scope = TIPC_NODE_SCOPE;
tsk1->peer.addr.id.ref = tsk2->portid;
tsk1->peer.addr.id.node = onode;
tsk2->peer.family = AF_TIPC;
- tsk2->peer.addrtype = TIPC_ADDR_ID;
+ tsk2->peer.addrtype = TIPC_SOCKET_ADDR;
tsk2->peer.scope = TIPC_NODE_SCOPE;
tsk2->peer.addr.id.ref = tsk1->portid;
tsk2->peer.addr.id.node = onode;
@@ -2557,7 +3367,6 @@ static const struct proto_ops msg_ops = {
.sendmsg = tipc_sendmsg,
.recvmsg = tipc_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage
};
static const struct proto_ops packet_ops = {
@@ -2578,7 +3387,6 @@ static const struct proto_ops packet_ops = {
.sendmsg = tipc_send_packet,
.recvmsg = tipc_recvmsg,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage
};
static const struct proto_ops stream_ops = {
@@ -2599,7 +3407,6 @@ static const struct proto_ops stream_ops = {
.sendmsg = tipc_sendstream,
.recvmsg = tipc_recvstream,
.mmap = sock_no_mmap,
- .sendpage = sock_no_sendpage
};
static const struct net_proto_family tipc_family_ops = {
@@ -2618,7 +3425,7 @@ static struct proto tipc_proto = {
/**
* tipc_socket_init - initialize TIPC socket interface
*
- * Returns 0 on success, errno otherwise
+ * Return: 0 on success, errno otherwise
*/
int tipc_socket_init(void)
{
@@ -2652,26 +3459,29 @@ void tipc_socket_stop(void)
/* Caller should hold socket lock for the passed tipc socket. */
static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
{
- u32 peer_node;
- u32 peer_port;
+ u32 peer_node, peer_port;
+ u32 conn_type, conn_instance;
struct nlattr *nest;
peer_node = tsk_peer_node(tsk);
peer_port = tsk_peer_port(tsk);
-
- nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
+ conn_type = msg_nametype(&tsk->phdr);
+ conn_instance = msg_nameinst(&tsk->phdr);
+ nest = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_CON);
+ if (!nest)
+ return -EMSGSIZE;
if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
goto msg_full;
if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
goto msg_full;
- if (tsk->conn_type != 0) {
+ if (tsk->conn_addrtype != 0) {
if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
goto msg_full;
- if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
+ if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, conn_type))
goto msg_full;
- if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
+ if (nla_put_u32(skb, TIPC_NLA_CON_INST, conn_instance))
goto msg_full;
}
nla_nest_end(skb, nest);
@@ -2684,38 +3494,45 @@ msg_full:
return -EMSGSIZE;
}
+static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
+ *tsk)
+{
+ struct net *net = sock_net(skb->sk);
+ struct sock *sk = &tsk->sk;
+
+ if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
+ nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
+ return -EMSGSIZE;
+
+ if (tipc_sk_connected(sk)) {
+ if (__tipc_nl_add_sk_con(skb, tsk))
+ return -EMSGSIZE;
+ } else if (!list_empty(&tsk->publications)) {
+ if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
+ return -EMSGSIZE;
+ }
+ return 0;
+}
+
/* Caller should hold socket lock for the passed tipc socket. */
static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
struct tipc_sock *tsk)
{
- int err;
- void *hdr;
struct nlattr *attrs;
- struct net *net = sock_net(skb->sk);
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct sock *sk = &tsk->sk;
+ void *hdr;
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
&tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
if (!hdr)
goto msg_cancel;
- attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
+ attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
if (!attrs)
goto genlmsg_cancel;
- if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
- goto attr_msg_cancel;
- if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
+
+ if (__tipc_nl_add_sk_info(skb, tsk))
goto attr_msg_cancel;
- if (tipc_sk_connected(sk)) {
- err = __tipc_nl_add_sk_con(skb, tsk);
- if (err)
- goto attr_msg_cancel;
- } else if (!list_empty(&tsk->publications)) {
- if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
- goto attr_msg_cancel;
- }
nla_nest_end(skb, attrs);
genlmsg_end(skb, hdr);
@@ -2729,44 +3546,149 @@ msg_cancel:
return -EMSGSIZE;
}
-int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
+int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
+ int (*skb_handler)(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct tipc_sock *tsk))
{
- int err;
+ struct rhashtable_iter *iter = (void *)cb->args[4];
struct tipc_sock *tsk;
- const struct bucket_table *tbl;
- struct rhash_head *pos;
- struct net *net = sock_net(skb->sk);
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- u32 tbl_id = cb->args[0];
- u32 prev_portid = cb->args[1];
+ int err;
- rcu_read_lock();
- tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
- for (; tbl_id < tbl->size; tbl_id++) {
- rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
- spin_lock_bh(&tsk->sk.sk_lock.slock);
- if (prev_portid && prev_portid != tsk->portid) {
- spin_unlock_bh(&tsk->sk.sk_lock.slock);
+ rhashtable_walk_start(iter);
+ while ((tsk = rhashtable_walk_next(iter)) != NULL) {
+ if (IS_ERR(tsk)) {
+ if (PTR_ERR(tsk) == -EAGAIN)
continue;
- }
+ break;
+ }
- err = __tipc_nl_add_sk(skb, cb, tsk);
- if (err) {
- prev_portid = tsk->portid;
- spin_unlock_bh(&tsk->sk.sk_lock.slock);
- goto out;
- }
- prev_portid = 0;
- spin_unlock_bh(&tsk->sk.sk_lock.slock);
+ sock_hold(&tsk->sk);
+ rhashtable_walk_stop(iter);
+ lock_sock(&tsk->sk);
+ err = skb_handler(skb, cb, tsk);
+ if (err) {
+ release_sock(&tsk->sk);
+ sock_put(&tsk->sk);
+ goto out;
}
+ release_sock(&tsk->sk);
+ rhashtable_walk_start(iter);
+ sock_put(&tsk->sk);
}
+ rhashtable_walk_stop(iter);
out:
- rcu_read_unlock();
- cb->args[0] = tbl_id;
- cb->args[1] = prev_portid;
-
return skb->len;
}
+EXPORT_SYMBOL(tipc_nl_sk_walk);
+
+int tipc_dump_start(struct netlink_callback *cb)
+{
+ return __tipc_dump_start(cb, sock_net(cb->skb->sk));
+}
+EXPORT_SYMBOL(tipc_dump_start);
+
+int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
+{
+ /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
+ struct rhashtable_iter *iter = (void *)cb->args[4];
+ struct tipc_net *tn = tipc_net(net);
+
+ if (!iter) {
+ iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+ return -ENOMEM;
+
+ cb->args[4] = (long)iter;
+ }
+
+ rhashtable_walk_enter(&tn->sk_rht, iter);
+ return 0;
+}
+
+int tipc_dump_done(struct netlink_callback *cb)
+{
+ struct rhashtable_iter *hti = (void *)cb->args[4];
+
+ rhashtable_walk_exit(hti);
+ kfree(hti);
+ return 0;
+}
+EXPORT_SYMBOL(tipc_dump_done);
+
+int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
+ struct tipc_sock *tsk, u32 sk_filter_state,
+ u64 (*tipc_diag_gen_cookie)(struct sock *sk))
+{
+ struct sock *sk = &tsk->sk;
+ struct nlattr *attrs;
+ struct nlattr *stat;
+
+ /*filter response w.r.t sk_state*/
+ if (!(sk_filter_state & (1 << sk->sk_state)))
+ return 0;
+
+ attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
+ if (!attrs)
+ goto msg_cancel;
+
+ if (__tipc_nl_add_sk_info(skb, tsk))
+ goto attr_msg_cancel;
+
+ if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
+ nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
+ nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
+ nla_put_u32(skb, TIPC_NLA_SOCK_UID,
+ from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
+ sk_uid(sk))) ||
+ nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
+ tipc_diag_gen_cookie(sk),
+ TIPC_NLA_SOCK_PAD))
+ goto attr_msg_cancel;
+
+ stat = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_STAT);
+ if (!stat)
+ goto attr_msg_cancel;
+
+ if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
+ skb_queue_len(&sk->sk_receive_queue)) ||
+ nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
+ skb_queue_len(&sk->sk_write_queue)) ||
+ nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
+ sk_drops_read(sk)))
+ goto stat_msg_cancel;
+
+ if (tsk->cong_link_cnt &&
+ nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
+ goto stat_msg_cancel;
+
+ if (tsk_conn_cong(tsk) &&
+ nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
+ goto stat_msg_cancel;
+
+ nla_nest_end(skb, stat);
+
+ if (tsk->group)
+ if (tipc_group_fill_sock_diag(tsk->group, skb))
+ goto stat_msg_cancel;
+
+ nla_nest_end(skb, attrs);
+
+ return 0;
+
+stat_msg_cancel:
+ nla_nest_cancel(skb, stat);
+attr_msg_cancel:
+ nla_nest_cancel(skb, attrs);
+msg_cancel:
+ return -EMSGSIZE;
+}
+EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
+
+int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
+}
/* Caller should hold socket lock for the passed tipc socket. */
static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
@@ -2781,17 +3703,17 @@ static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
if (!hdr)
goto msg_cancel;
- attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
+ attrs = nla_nest_start_noflag(skb, TIPC_NLA_PUBL);
if (!attrs)
goto genlmsg_cancel;
if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
goto attr_msg_cancel;
- if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
+ if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->sr.type))
goto attr_msg_cancel;
- if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
+ if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->sr.lower))
goto attr_msg_cancel;
- if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
+ if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->sr.upper))
goto attr_msg_cancel;
nla_nest_end(skb, attrs);
@@ -2816,11 +3738,11 @@ static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
struct publication *p;
if (*last_publ) {
- list_for_each_entry(p, &tsk->publications, pport_list) {
+ list_for_each_entry(p, &tsk->publications, binding_sock) {
if (p->key == *last_publ)
break;
}
- if (p->key != *last_publ) {
+ if (list_entry_is_head(p, &tsk->publications, binding_sock)) {
/* We never set seq or call nl_dump_check_consistent()
* this means that setting prev_seq here will cause the
* consistence check to fail in the netlink callback
@@ -2833,10 +3755,10 @@ static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
}
} else {
p = list_first_entry(&tsk->publications, struct publication,
- pport_list);
+ binding_sock);
}
- list_for_each_entry_from(p, &tsk->publications, pport_list) {
+ list_for_each_entry_from(p, &tsk->publications, binding_sock) {
err = __tipc_nl_add_sk_publ(skb, cb, p);
if (err) {
*last_publ = p->key;
@@ -2858,19 +3780,15 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct tipc_sock *tsk;
if (!tsk_portid) {
- struct nlattr **attrs;
+ struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
- err = tipc_nlmsg_parse(cb->nlh, &attrs);
- if (err)
- return err;
-
if (!attrs[TIPC_NLA_SOCK])
return -EINVAL;
- err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
- attrs[TIPC_NLA_SOCK],
- tipc_nl_sock_policy, NULL);
+ err = nla_parse_nested_deprecated(sock, TIPC_NLA_SOCK_MAX,
+ attrs[TIPC_NLA_SOCK],
+ tipc_nl_sock_policy, NULL);
if (err)
return err;
@@ -2900,3 +3818,191 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
}
+
+/**
+ * tipc_sk_filtering - check if a socket should be traced
+ * @sk: the socket to be examined
+ *
+ * @sysctl_tipc_sk_filter is used as the socket tuple for filtering:
+ * (portid, sock type, name type, name lower, name upper)
+ *
+ * Return: true if the socket meets the socket tuple data
+ * (value 0 = 'any') or when there is no tuple set (all = 0),
+ * otherwise false
+ */
+bool tipc_sk_filtering(struct sock *sk)
+{
+ struct tipc_sock *tsk;
+ struct publication *p;
+ u32 _port, _sktype, _type, _lower, _upper;
+ u32 type = 0, lower = 0, upper = 0;
+
+ if (!sk)
+ return true;
+
+ tsk = tipc_sk(sk);
+
+ _port = sysctl_tipc_sk_filter[0];
+ _sktype = sysctl_tipc_sk_filter[1];
+ _type = sysctl_tipc_sk_filter[2];
+ _lower = sysctl_tipc_sk_filter[3];
+ _upper = sysctl_tipc_sk_filter[4];
+
+ if (!_port && !_sktype && !_type && !_lower && !_upper)
+ return true;
+
+ if (_port)
+ return (_port == tsk->portid);
+
+ if (_sktype && _sktype != sk->sk_type)
+ return false;
+
+ if (tsk->published) {
+ p = list_first_entry_or_null(&tsk->publications,
+ struct publication, binding_sock);
+ if (p) {
+ type = p->sr.type;
+ lower = p->sr.lower;
+ upper = p->sr.upper;
+ }
+ }
+
+ if (!tipc_sk_type_connectionless(sk)) {
+ type = msg_nametype(&tsk->phdr);
+ lower = msg_nameinst(&tsk->phdr);
+ upper = lower;
+ }
+
+ if ((_type && _type != type) || (_lower && _lower != lower) ||
+ (_upper && _upper != upper))
+ return false;
+
+ return true;
+}
+
+u32 tipc_sock_get_portid(struct sock *sk)
+{
+ return (sk) ? (tipc_sk(sk))->portid : 0;
+}
+
+/**
+ * tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded,
+ * both the rcv and backlog queues are considered
+ * @sk: tipc sk to be checked
+ * @skb: tipc msg to be checked
+ *
+ * Return: true if the socket rx queue allocation is > 90%, otherwise false
+ */
+
+bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
+{
+ atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt;
+ unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
+ unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk);
+
+ return (qsize > lim * 90 / 100);
+}
+
+/**
+ * tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded,
+ * only the rcv queue is considered
+ * @sk: tipc sk to be checked
+ * @skb: tipc msg to be checked
+ *
+ * Return: true if the socket rx queue allocation is > 90%, otherwise false
+ */
+
+bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
+{
+ unsigned int lim = rcvbuf_limit(sk, skb);
+ unsigned int qsize = sk_rmem_alloc_get(sk);
+
+ return (qsize > lim * 90 / 100);
+}
+
+/**
+ * tipc_sk_dump - dump TIPC socket
+ * @sk: tipc sk to be dumped
+ * @dqueues: bitmask to decide if any socket queue to be dumped?
+ * - TIPC_DUMP_NONE: don't dump socket queues
+ * - TIPC_DUMP_SK_SNDQ: dump socket send queue
+ * - TIPC_DUMP_SK_RCVQ: dump socket rcv queue
+ * - TIPC_DUMP_SK_BKLGQ: dump socket backlog queue
+ * - TIPC_DUMP_ALL: dump all the socket queues above
+ * @buf: returned buffer of dump data in format
+ */
+int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
+{
+ int i = 0;
+ size_t sz = (dqueues) ? SK_LMAX : SK_LMIN;
+ u32 conn_type, conn_instance;
+ struct tipc_sock *tsk;
+ struct publication *p;
+ bool tsk_connected;
+
+ if (!sk) {
+ i += scnprintf(buf, sz, "sk data: (null)\n");
+ return i;
+ }
+
+ tsk = tipc_sk(sk);
+ tsk_connected = !tipc_sk_type_connectionless(sk);
+
+ i += scnprintf(buf, sz, "sk data: %u", sk->sk_type);
+ i += scnprintf(buf + i, sz - i, " %d", sk->sk_state);
+ i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
+ i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
+ i += scnprintf(buf + i, sz - i, " | %u", tsk_connected);
+ if (tsk_connected) {
+ i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
+ i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
+ conn_type = msg_nametype(&tsk->phdr);
+ conn_instance = msg_nameinst(&tsk->phdr);
+ i += scnprintf(buf + i, sz - i, " %u", conn_type);
+ i += scnprintf(buf + i, sz - i, " %u", conn_instance);
+ }
+ i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
+ if (tsk->published) {
+ p = list_first_entry_or_null(&tsk->publications,
+ struct publication, binding_sock);
+ i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.type : 0);
+ i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.lower : 0);
+ i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.upper : 0);
+ }
+ i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
+ i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
+ i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
+ i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
+ i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
+ i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
+ i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
+ i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));
+ i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown);
+ i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk));
+ i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
+ i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
+ i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
+ i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len));
+
+ if (dqueues & TIPC_DUMP_SK_SNDQ) {
+ i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
+ i += tipc_list_dump(&sk->sk_write_queue, false, buf + i);
+ }
+
+ if (dqueues & TIPC_DUMP_SK_RCVQ) {
+ i += scnprintf(buf + i, sz - i, "sk_receive_queue: ");
+ i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i);
+ }
+
+ if (dqueues & TIPC_DUMP_SK_BKLGQ) {
+ i += scnprintf(buf + i, sz - i, "sk_backlog:\n head ");
+ i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i);
+ if (sk->sk_backlog.tail != sk->sk_backlog.head) {
+ i += scnprintf(buf + i, sz - i, " tail ");
+ i += tipc_skb_dump(sk->sk_backlog.tail, false,
+ buf + i);
+ }
+ }
+
+ return i;
+}
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index 06fb5944cf76..02cdf166807d 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -49,6 +49,8 @@
#define RCVBUF_DEF (FLOWCTL_BLK_SZ * 1024 * 2)
#define RCVBUF_MAX (FLOWCTL_BLK_SZ * 1024 * 16)
+struct tipc_sock;
+
int tipc_socket_init(void);
void tipc_socket_stop(void);
void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
@@ -59,5 +61,20 @@ int tipc_sk_rht_init(struct net *net);
void tipc_sk_rht_destroy(struct net *net);
int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb);
int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb);
+int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
+ struct tipc_sock *tsk, u32 sk_filter_state,
+ u64 (*tipc_diag_gen_cookie)(struct sock *sk));
+int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
+ int (*skb_handler)(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct tipc_sock *tsk));
+int tipc_dump_start(struct netlink_callback *cb);
+int __tipc_dump_start(struct netlink_callback *cb, struct net *net);
+int tipc_dump_done(struct netlink_callback *cb);
+u32 tipc_sock_get_portid(struct sock *sk);
+bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb);
+bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb);
+int tipc_sk_bind(struct socket *sock, struct sockaddr *skaddr, int alen);
+int tsk_set_importance(struct sock *sk, int imp);
#endif
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 0bf91cd3733c..f8490d94e323 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -1,8 +1,9 @@
/*
* net/tipc/subscr.c: TIPC network topology service
*
- * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2000-2017, Ericsson AB
* Copyright (c) 2005-2007, 2010-2013, Wind River Systems
+ * Copyright (c) 2020-2021, Red Hat Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -38,359 +39,145 @@
#include "name_table.h"
#include "subscr.h"
-/**
- * struct tipc_subscriber - TIPC network topology subscriber
- * @kref: reference counter to tipc_subscription object
- * @conid: connection identifier to server connecting to subscriber
- * @lock: control access to subscriber
- * @subscrp_list: list of subscription objects for this subscriber
- */
-struct tipc_subscriber {
- struct kref kref;
- int conid;
- spinlock_t lock;
- struct list_head subscrp_list;
-};
-
-static void tipc_subscrp_delete(struct tipc_subscription *sub);
-static void tipc_subscrb_put(struct tipc_subscriber *subscriber);
-
-/**
- * htohl - convert value to endianness used by destination
- * @in: value to convert
- * @swap: non-zero if endianness must be reversed
- *
- * Returns converted value
- */
-static u32 htohl(u32 in, int swap)
+static void tipc_sub_send_event(struct tipc_subscription *sub,
+ struct publication *p,
+ u32 event)
{
- return swap ? swab32(in) : in;
-}
-
-static void tipc_subscrp_send_event(struct tipc_subscription *sub,
- u32 found_lower, u32 found_upper,
- u32 event, u32 port_ref, u32 node)
-{
- struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
- struct tipc_subscriber *subscriber = sub->subscriber;
- struct kvec msg_sect;
+ struct tipc_subscr *s = &sub->evt.s;
+ struct tipc_event *evt = &sub->evt;
- msg_sect.iov_base = (void *)&sub->evt;
- msg_sect.iov_len = sizeof(struct tipc_event);
- sub->evt.event = htohl(event, sub->swap);
- sub->evt.found_lower = htohl(found_lower, sub->swap);
- sub->evt.found_upper = htohl(found_upper, sub->swap);
- sub->evt.port.ref = htohl(port_ref, sub->swap);
- sub->evt.port.node = htohl(node, sub->swap);
- tipc_conn_sendmsg(tn->topsrv, subscriber->conid, NULL,
- msg_sect.iov_base, msg_sect.iov_len);
+ if (sub->inactive)
+ return;
+ tipc_evt_write(evt, event, event);
+ if (p) {
+ tipc_evt_write(evt, found_lower, p->sr.lower);
+ tipc_evt_write(evt, found_upper, p->sr.upper);
+ tipc_evt_write(evt, port.ref, p->sk.ref);
+ tipc_evt_write(evt, port.node, p->sk.node);
+ } else {
+ tipc_evt_write(evt, found_lower, s->seq.lower);
+ tipc_evt_write(evt, found_upper, s->seq.upper);
+ tipc_evt_write(evt, port.ref, 0);
+ tipc_evt_write(evt, port.node, 0);
+ }
+ tipc_topsrv_queue_evt(sub->net, sub->conid, event, evt);
}
/**
- * tipc_subscrp_check_overlap - test for subscription overlap with the
- * given values
+ * tipc_sub_check_overlap - test for subscription overlap with the given values
+ * @subscribed: the service range subscribed for
+ * @found: the service range we are checking for match
*
- * Returns 1 if there is overlap, otherwise 0.
+ * Returns true if there is overlap, otherwise false.
*/
-int tipc_subscrp_check_overlap(struct tipc_name_seq *seq, u32 found_lower,
- u32 found_upper)
-{
- if (found_lower < seq->lower)
- found_lower = seq->lower;
- if (found_upper > seq->upper)
- found_upper = seq->upper;
- if (found_lower > found_upper)
- return 0;
- return 1;
-}
-
-u32 tipc_subscrp_convert_seq_type(u32 type, int swap)
+static bool tipc_sub_check_overlap(struct tipc_service_range *subscribed,
+ struct tipc_service_range *found)
{
- return htohl(type, swap);
-}
+ u32 found_lower = found->lower;
+ u32 found_upper = found->upper;
-void tipc_subscrp_convert_seq(struct tipc_name_seq *in, int swap,
- struct tipc_name_seq *out)
-{
- out->type = htohl(in->type, swap);
- out->lower = htohl(in->lower, swap);
- out->upper = htohl(in->upper, swap);
+ if (found_lower < subscribed->lower)
+ found_lower = subscribed->lower;
+ if (found_upper > subscribed->upper)
+ found_upper = subscribed->upper;
+ return found_lower <= found_upper;
}
-void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
- u32 found_upper, u32 event, u32 port_ref,
- u32 node, int must)
+void tipc_sub_report_overlap(struct tipc_subscription *sub,
+ struct publication *p,
+ u32 event, bool must)
{
- struct tipc_name_seq seq;
+ struct tipc_service_range *sr = &sub->s.seq;
+ u32 filter = sub->s.filter;
- tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq);
- if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper))
+ if (!tipc_sub_check_overlap(sr, &p->sr))
return;
- if (!must &&
- !(htohl(sub->evt.s.filter, sub->swap) & TIPC_SUB_PORTS))
+ if (!must && !(filter & TIPC_SUB_PORTS))
return;
-
- tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
- node);
-}
-
-static void tipc_subscrp_timeout(unsigned long data)
-{
- struct tipc_subscription *sub = (struct tipc_subscription *)data;
- struct tipc_subscriber *subscriber = sub->subscriber;
-
- spin_lock_bh(&subscriber->lock);
- tipc_nametbl_unsubscribe(sub);
- list_del(&sub->subscrp_list);
- spin_unlock_bh(&subscriber->lock);
-
- /* Notify subscriber of timeout */
- tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
- TIPC_SUBSCR_TIMEOUT, 0, 0);
-
- tipc_subscrp_put(sub);
-}
-
-static void tipc_subscrb_kref_release(struct kref *kref)
-{
- kfree(container_of(kref,struct tipc_subscriber, kref));
+ if (filter & TIPC_SUB_CLUSTER_SCOPE && p->scope == TIPC_NODE_SCOPE)
+ return;
+ if (filter & TIPC_SUB_NODE_SCOPE && p->scope != TIPC_NODE_SCOPE)
+ return;
+ spin_lock(&sub->lock);
+ tipc_sub_send_event(sub, p, event);
+ spin_unlock(&sub->lock);
}
-static void tipc_subscrb_put(struct tipc_subscriber *subscriber)
+static void tipc_sub_timeout(struct timer_list *t)
{
- kref_put(&subscriber->kref, tipc_subscrb_kref_release);
-}
+ struct tipc_subscription *sub = timer_container_of(sub, t, timer);
-static void tipc_subscrb_get(struct tipc_subscriber *subscriber)
-{
- kref_get(&subscriber->kref);
+ spin_lock(&sub->lock);
+ tipc_sub_send_event(sub, NULL, TIPC_SUBSCR_TIMEOUT);
+ sub->inactive = true;
+ spin_unlock(&sub->lock);
}
-static void tipc_subscrp_kref_release(struct kref *kref)
+static void tipc_sub_kref_release(struct kref *kref)
{
- struct tipc_subscription *sub = container_of(kref,
- struct tipc_subscription,
- kref);
- struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
- struct tipc_subscriber *subscriber = sub->subscriber;
-
- atomic_dec(&tn->subscription_count);
- kfree(sub);
- tipc_subscrb_put(subscriber);
+ kfree(container_of(kref, struct tipc_subscription, kref));
}
-void tipc_subscrp_put(struct tipc_subscription *subscription)
+void tipc_sub_put(struct tipc_subscription *subscription)
{
- kref_put(&subscription->kref, tipc_subscrp_kref_release);
+ kref_put(&subscription->kref, tipc_sub_kref_release);
}
-void tipc_subscrp_get(struct tipc_subscription *subscription)
+void tipc_sub_get(struct tipc_subscription *subscription)
{
kref_get(&subscription->kref);
}
-/* tipc_subscrb_subscrp_delete - delete a specific subscription or all
- * subscriptions for a given subscriber.
- */
-static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
- struct tipc_subscr *s)
-{
- struct list_head *subscription_list = &subscriber->subscrp_list;
- struct tipc_subscription *sub, *temp;
-
- spin_lock_bh(&subscriber->lock);
- list_for_each_entry_safe(sub, temp, subscription_list, subscrp_list) {
- if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
- continue;
-
- tipc_nametbl_unsubscribe(sub);
- list_del(&sub->subscrp_list);
- tipc_subscrp_delete(sub);
-
- if (s)
- break;
- }
- spin_unlock_bh(&subscriber->lock);
-}
-
-static struct tipc_subscriber *tipc_subscrb_create(int conid)
-{
- struct tipc_subscriber *subscriber;
-
- subscriber = kzalloc(sizeof(*subscriber), GFP_ATOMIC);
- if (!subscriber) {
- pr_warn("Subscriber rejected, no memory\n");
- return NULL;
- }
- INIT_LIST_HEAD(&subscriber->subscrp_list);
- kref_init(&subscriber->kref);
- subscriber->conid = conid;
- spin_lock_init(&subscriber->lock);
-
- return subscriber;
-}
-
-static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
-{
- tipc_subscrb_subscrp_delete(subscriber, NULL);
- tipc_subscrb_put(subscriber);
-}
-
-static void tipc_subscrp_delete(struct tipc_subscription *sub)
+struct tipc_subscription *tipc_sub_subscribe(struct net *net,
+ struct tipc_subscr *s,
+ int conid)
{
- u32 timeout = htohl(sub->evt.s.timeout, sub->swap);
-
- if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer))
- tipc_subscrp_put(sub);
-}
-
-static void tipc_subscrp_cancel(struct tipc_subscr *s,
- struct tipc_subscriber *subscriber)
-{
- tipc_subscrb_subscrp_delete(subscriber, s);
-}
-
-static struct tipc_subscription *tipc_subscrp_create(struct net *net,
- struct tipc_subscr *s,
- int swap)
-{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
+ u32 lower = tipc_sub_read(s, seq.lower);
+ u32 upper = tipc_sub_read(s, seq.upper);
+ u32 filter = tipc_sub_read(s, filter);
struct tipc_subscription *sub;
- u32 filter = htohl(s->filter, swap);
+ u32 timeout;
- /* Refuse subscription if global limit exceeded */
- if (atomic_read(&tn->subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
- pr_warn("Subscription rejected, limit reached (%u)\n",
- TIPC_MAX_SUBSCRIPTIONS);
+ if ((filter & TIPC_SUB_PORTS && filter & TIPC_SUB_SERVICE) ||
+ lower > upper) {
+ pr_warn("Subscription rejected, illegal request\n");
return NULL;
}
-
- /* Allocate subscription object */
sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
if (!sub) {
pr_warn("Subscription rejected, no memory\n");
return NULL;
}
-
- /* Initialize subscription object */
+ INIT_LIST_HEAD(&sub->service_list);
+ INIT_LIST_HEAD(&sub->sub_list);
sub->net = net;
- if (((filter & TIPC_SUB_PORTS) && (filter & TIPC_SUB_SERVICE)) ||
- (htohl(s->seq.lower, swap) > htohl(s->seq.upper, swap))) {
- pr_warn("Subscription rejected, illegal request\n");
+ sub->conid = conid;
+ sub->inactive = false;
+ memcpy(&sub->evt.s, s, sizeof(*s));
+ sub->s.seq.type = tipc_sub_read(s, seq.type);
+ sub->s.seq.lower = lower;
+ sub->s.seq.upper = upper;
+ sub->s.filter = filter;
+ sub->s.timeout = tipc_sub_read(s, timeout);
+ memcpy(sub->s.usr_handle, s->usr_handle, 8);
+ spin_lock_init(&sub->lock);
+ kref_init(&sub->kref);
+ if (!tipc_nametbl_subscribe(sub)) {
kfree(sub);
return NULL;
}
-
- sub->swap = swap;
- memcpy(&sub->evt.s, s, sizeof(*s));
- atomic_inc(&tn->subscription_count);
- kref_init(&sub->kref);
- return sub;
-}
-
-static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
- struct tipc_subscriber *subscriber, int swap)
-{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_subscription *sub = NULL;
- u32 timeout;
-
- sub = tipc_subscrp_create(net, s, swap);
- if (!sub)
- return tipc_conn_terminate(tn->topsrv, subscriber->conid);
-
- spin_lock_bh(&subscriber->lock);
- list_add(&sub->subscrp_list, &subscriber->subscrp_list);
- sub->subscriber = subscriber;
- tipc_nametbl_subscribe(sub);
- tipc_subscrb_get(subscriber);
- spin_unlock_bh(&subscriber->lock);
-
- setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
- timeout = htohl(sub->evt.s.timeout, swap);
-
+ timer_setup(&sub->timer, tipc_sub_timeout, 0);
+ timeout = tipc_sub_read(&sub->evt.s, timeout);
if (timeout != TIPC_WAIT_FOREVER)
mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
+ return sub;
}
-/* Handle one termination request for the subscriber */
-static void tipc_subscrb_release_cb(int conid, void *usr_data)
-{
- tipc_subscrb_delete((struct tipc_subscriber *)usr_data);
-}
-
-/* Handle one request to create a new subscription for the subscriber */
-static void tipc_subscrb_rcv_cb(struct net *net, int conid,
- struct sockaddr_tipc *addr, void *usr_data,
- void *buf, size_t len)
-{
- struct tipc_subscriber *subscriber = usr_data;
- struct tipc_subscr *s = (struct tipc_subscr *)buf;
- int swap;
-
- /* Determine subscriber's endianness */
- swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE |
- TIPC_SUB_CANCEL));
-
- /* Detect & process a subscription cancellation request */
- if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
- s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
- return tipc_subscrp_cancel(s, subscriber);
- }
-
- tipc_subscrp_subscribe(net, s, subscriber, swap);
-}
-
-/* Handle one request to establish a new subscriber */
-static void *tipc_subscrb_connect_cb(int conid)
-{
- return (void *)tipc_subscrb_create(conid);
-}
-
-int tipc_topsrv_start(struct net *net)
-{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- const char name[] = "topology_server";
- struct tipc_server *topsrv;
- struct sockaddr_tipc *saddr;
-
- saddr = kzalloc(sizeof(*saddr), GFP_ATOMIC);
- if (!saddr)
- return -ENOMEM;
- saddr->family = AF_TIPC;
- saddr->addrtype = TIPC_ADDR_NAMESEQ;
- saddr->addr.nameseq.type = TIPC_TOP_SRV;
- saddr->addr.nameseq.lower = TIPC_TOP_SRV;
- saddr->addr.nameseq.upper = TIPC_TOP_SRV;
- saddr->scope = TIPC_NODE_SCOPE;
-
- topsrv = kzalloc(sizeof(*topsrv), GFP_ATOMIC);
- if (!topsrv) {
- kfree(saddr);
- return -ENOMEM;
- }
- topsrv->net = net;
- topsrv->saddr = saddr;
- topsrv->imp = TIPC_CRITICAL_IMPORTANCE;
- topsrv->type = SOCK_SEQPACKET;
- topsrv->max_rcvbuf_size = sizeof(struct tipc_subscr);
- topsrv->tipc_conn_recvmsg = tipc_subscrb_rcv_cb;
- topsrv->tipc_conn_new = tipc_subscrb_connect_cb;
- topsrv->tipc_conn_release = tipc_subscrb_release_cb;
-
- strncpy(topsrv->name, name, strlen(name) + 1);
- tn->topsrv = topsrv;
- atomic_set(&tn->subscription_count, 0);
-
- return tipc_server_start(topsrv);
-}
-
-void tipc_topsrv_stop(struct net *net)
+void tipc_sub_unsubscribe(struct tipc_subscription *sub)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct tipc_server *topsrv = tn->topsrv;
-
- tipc_server_stop(topsrv);
- kfree(topsrv->saddr);
- kfree(topsrv);
+ tipc_nametbl_unsubscribe(sub);
+ if (sub->evt.s.timeout != TIPC_WAIT_FOREVER)
+ timer_delete_sync(&sub->timer);
+ list_del(&sub->sub_list);
+ tipc_sub_put(sub);
}
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index ee52957dc952..60b877531b66 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -1,8 +1,9 @@
/*
* net/tipc/subscr.h: Include file for TIPC network topology service
*
- * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2003-2017, Ericsson AB
* Copyright (c) 2005-2007, 2012-2013, Wind River Systems
+ * Copyright (c) 2020-2021, Red Hat Inc
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,48 +38,85 @@
#ifndef _TIPC_SUBSCR_H
#define _TIPC_SUBSCR_H
-#include "server.h"
+#include "topsrv.h"
-#define TIPC_MAX_SUBSCRIPTIONS 65535
-#define TIPC_MAX_PUBLICATIONS 65535
+#define TIPC_MAX_SUBSCR 65535
+#define TIPC_MAX_PUBL 65535
+struct publication;
struct tipc_subscription;
-struct tipc_subscriber;
+struct tipc_conn;
/**
* struct tipc_subscription - TIPC network topology subscription object
- * @subscriber: pointer to its subscriber
- * @seq: name sequence associated with subscription
- * @net: point to network namespace
- * @timer: timer governing subscription duration (optional)
- * @nameseq_list: adjacent subscriptions in name sequence's subscription list
- * @subscrp_list: adjacent subscriptions in subscriber's subscription list
- * @swap: indicates if subscriber uses opposite endianness in its messages
+ * @s: host-endian copy of the user subscription
* @evt: template for events generated by subscription
+ * @kref: reference count for this subscription
+ * @net: network namespace associated with subscription
+ * @timer: timer governing subscription duration (optional)
+ * @service_list: adjacent subscriptions in name sequence's subscription list
+ * @sub_list: adjacent subscriptions in subscriber's subscription list
+ * @conid: connection identifier of topology server
+ * @inactive: true if this subscription is inactive
+ * @lock: serialize up/down and timer events
*/
struct tipc_subscription {
+ struct tipc_subscr s;
+ struct tipc_event evt;
struct kref kref;
- struct tipc_subscriber *subscriber;
struct net *net;
struct timer_list timer;
- struct list_head nameseq_list;
- struct list_head subscrp_list;
- int swap;
- struct tipc_event evt;
+ struct list_head service_list;
+ struct list_head sub_list;
+ int conid;
+ bool inactive;
+ spinlock_t lock;
};
-int tipc_subscrp_check_overlap(struct tipc_name_seq *seq, u32 found_lower,
- u32 found_upper);
-void tipc_subscrp_report_overlap(struct tipc_subscription *sub,
- u32 found_lower, u32 found_upper, u32 event,
- u32 port_ref, u32 node, int must);
-void tipc_subscrp_convert_seq(struct tipc_name_seq *in, int swap,
- struct tipc_name_seq *out);
-u32 tipc_subscrp_convert_seq_type(u32 type, int swap);
-int tipc_topsrv_start(struct net *net);
-void tipc_topsrv_stop(struct net *net);
+struct tipc_subscription *tipc_sub_subscribe(struct net *net,
+ struct tipc_subscr *s,
+ int conid);
+void tipc_sub_unsubscribe(struct tipc_subscription *sub);
+void tipc_sub_report_overlap(struct tipc_subscription *sub,
+ struct publication *p,
+ u32 event, bool must);
+
+int __net_init tipc_topsrv_init_net(struct net *net);
+void __net_exit tipc_topsrv_exit_net(struct net *net);
+
+void tipc_sub_put(struct tipc_subscription *subscription);
+void tipc_sub_get(struct tipc_subscription *subscription);
-void tipc_subscrp_put(struct tipc_subscription *subscription);
-void tipc_subscrp_get(struct tipc_subscription *subscription);
+#define TIPC_FILTER_MASK (TIPC_SUB_PORTS | TIPC_SUB_SERVICE | TIPC_SUB_CANCEL)
+
+/* tipc_sub_read - return field_ of struct sub_ in host endian format
+ */
+#define tipc_sub_read(sub_, field_) \
+ ({ \
+ struct tipc_subscr *sub__ = sub_; \
+ u32 val__ = (sub__)->field_; \
+ int swap_ = !((sub__)->filter & TIPC_FILTER_MASK); \
+ (swap_ ? swab32(val__) : val__); \
+ })
+
+/* tipc_sub_write - write val_ to field_ of struct sub_ in user endian format
+ */
+#define tipc_sub_write(sub_, field_, val_) \
+ ({ \
+ struct tipc_subscr *sub__ = sub_; \
+ u32 val__ = val_; \
+ int swap_ = !((sub__)->filter & TIPC_FILTER_MASK); \
+ (sub__)->field_ = swap_ ? swab32(val__) : val__; \
+ })
+
+/* tipc_evt_write - write val_ to field_ of struct evt_ in user endian format
+ */
+#define tipc_evt_write(evt_, field_, val_) \
+ ({ \
+ struct tipc_event *evt__ = evt_; \
+ u32 val__ = val_; \
+ int swap_ = !((evt__)->s.filter & (TIPC_FILTER_MASK)); \
+ (evt__)->field_ = swap_ ? swab32(val__) : val__; \
+ })
#endif
diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c
index 1a779b1e8510..30d2e06e3d8c 100644
--- a/net/tipc/sysctl.c
+++ b/net/tipc/sysctl.c
@@ -34,7 +34,9 @@
*/
#include "core.h"
-
+#include "trace.h"
+#include "crypto.h"
+#include "bcast.h"
#include <linux/sysctl.h>
static struct ctl_table_header *tipc_ctl_hdr;
@@ -45,16 +47,50 @@ static struct ctl_table tipc_table[] = {
.data = &sysctl_tipc_rmem,
.maxlen = sizeof(sysctl_tipc_rmem),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ONE,
},
{
.procname = "named_timeout",
.data = &sysctl_tipc_named_timeout,
.maxlen = sizeof(sysctl_tipc_named_timeout),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ },
+ {
+ .procname = "sk_filter",
+ .data = &sysctl_tipc_sk_filter,
+ .maxlen = sizeof(sysctl_tipc_sk_filter),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ },
+#ifdef CONFIG_TIPC_CRYPTO
+ {
+ .procname = "max_tfms",
+ .data = &sysctl_tipc_max_tfms,
+ .maxlen = sizeof(sysctl_tipc_max_tfms),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ONE,
+ },
+ {
+ .procname = "key_exchange_enabled",
+ .data = &sysctl_tipc_key_exchange_enabled,
+ .maxlen = sizeof(sysctl_tipc_key_exchange_enabled),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+#endif
+ {
+ .procname = "bc_retruni",
+ .data = &sysctl_tipc_bc_retruni,
+ .maxlen = sizeof(sysctl_tipc_bc_retruni),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
},
- {}
};
int tipc_register_sysctl(void)
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
new file mode 100644
index 000000000000..aad7f96b6009
--- /dev/null
+++ b/net/tipc/topsrv.c
@@ -0,0 +1,733 @@
+/*
+ * net/tipc/server.c: TIPC server infrastructure
+ *
+ * Copyright (c) 2012-2013, Wind River Systems
+ * Copyright (c) 2017-2018, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "subscr.h"
+#include "topsrv.h"
+#include "core.h"
+#include "socket.h"
+#include "addr.h"
+#include "msg.h"
+#include "bearer.h"
+#include <net/sock.h>
+#include <linux/module.h>
+#include <trace/events/sock.h>
+
+/* Number of messages to send before rescheduling */
+#define MAX_SEND_MSG_COUNT 25
+#define MAX_RECV_MSG_COUNT 25
+#define CF_CONNECTED 1
+
+#define TIPC_SERVER_NAME_LEN 32
+
+/**
+ * struct tipc_topsrv - TIPC server structure
+ * @conn_idr: identifier set of connection
+ * @idr_lock: protect the connection identifier set
+ * @idr_in_use: amount of allocated identifier entry
+ * @net: network namespace instance
+ * @awork: accept work item
+ * @rcv_wq: receive workqueue
+ * @send_wq: send workqueue
+ * @listener: topsrv listener socket
+ * @name: server name
+ */
+struct tipc_topsrv {
+ struct idr conn_idr;
+ spinlock_t idr_lock; /* for idr list */
+ int idr_in_use;
+ struct net *net;
+ struct work_struct awork;
+ struct workqueue_struct *rcv_wq;
+ struct workqueue_struct *send_wq;
+ struct socket *listener;
+ char name[TIPC_SERVER_NAME_LEN];
+};
+
+/**
+ * struct tipc_conn - TIPC connection structure
+ * @kref: reference counter to connection object
+ * @conid: connection identifier
+ * @sock: socket handler associated with connection
+ * @flags: indicates connection state
+ * @server: pointer to connected server
+ * @sub_list: list to all pertaining subscriptions
+ * @sub_lock: lock protecting the subscription list
+ * @rwork: receive work item
+ * @outqueue: pointer to first outbound message in queue
+ * @outqueue_lock: control access to the outqueue
+ * @swork: send work item
+ */
+struct tipc_conn {
+ struct kref kref;
+ int conid;
+ struct socket *sock;
+ unsigned long flags;
+ struct tipc_topsrv *server;
+ struct list_head sub_list;
+ spinlock_t sub_lock; /* for subscription list */
+ struct work_struct rwork;
+ struct list_head outqueue;
+ spinlock_t outqueue_lock; /* for outqueue */
+ struct work_struct swork;
+};
+
+/* An entry waiting to be sent */
+struct outqueue_entry {
+ bool inactive;
+ struct tipc_event evt;
+ struct list_head list;
+};
+
+static void tipc_conn_recv_work(struct work_struct *work);
+static void tipc_conn_send_work(struct work_struct *work);
+static void tipc_topsrv_kern_evt(struct net *net, struct tipc_event *evt);
+static void tipc_conn_delete_sub(struct tipc_conn *con, struct tipc_subscr *s);
+
+static bool connected(struct tipc_conn *con)
+{
+ return con && test_bit(CF_CONNECTED, &con->flags);
+}
+
+static void tipc_conn_kref_release(struct kref *kref)
+{
+ struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
+ struct tipc_topsrv *s = con->server;
+ struct outqueue_entry *e, *safe;
+
+ spin_lock_bh(&s->idr_lock);
+ idr_remove(&s->conn_idr, con->conid);
+ s->idr_in_use--;
+ spin_unlock_bh(&s->idr_lock);
+ if (con->sock)
+ sock_release(con->sock);
+
+ spin_lock_bh(&con->outqueue_lock);
+ list_for_each_entry_safe(e, safe, &con->outqueue, list) {
+ list_del(&e->list);
+ kfree(e);
+ }
+ spin_unlock_bh(&con->outqueue_lock);
+ kfree(con);
+}
+
+static void conn_put(struct tipc_conn *con)
+{
+ kref_put(&con->kref, tipc_conn_kref_release);
+}
+
+static void conn_get(struct tipc_conn *con)
+{
+ kref_get(&con->kref);
+}
+
+static void tipc_conn_close(struct tipc_conn *con)
+{
+ struct sock *sk = con->sock->sk;
+ bool disconnect = false;
+
+ write_lock_bh(&sk->sk_callback_lock);
+ disconnect = test_and_clear_bit(CF_CONNECTED, &con->flags);
+
+ if (disconnect) {
+ sk->sk_user_data = NULL;
+ tipc_conn_delete_sub(con, NULL);
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ /* Handle concurrent calls from sending and receiving threads */
+ if (!disconnect)
+ return;
+
+ /* Don't flush pending works, -just let them expire */
+ kernel_sock_shutdown(con->sock, SHUT_RDWR);
+
+ conn_put(con);
+}
+
+static struct tipc_conn *tipc_conn_alloc(struct tipc_topsrv *s, struct socket *sock)
+{
+ struct tipc_conn *con;
+ int ret;
+
+ con = kzalloc(sizeof(*con), GFP_ATOMIC);
+ if (!con)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&con->kref);
+ INIT_LIST_HEAD(&con->outqueue);
+ INIT_LIST_HEAD(&con->sub_list);
+ spin_lock_init(&con->outqueue_lock);
+ spin_lock_init(&con->sub_lock);
+ INIT_WORK(&con->swork, tipc_conn_send_work);
+ INIT_WORK(&con->rwork, tipc_conn_recv_work);
+
+ spin_lock_bh(&s->idr_lock);
+ ret = idr_alloc(&s->conn_idr, con, 0, 0, GFP_ATOMIC);
+ if (ret < 0) {
+ kfree(con);
+ spin_unlock_bh(&s->idr_lock);
+ return ERR_PTR(-ENOMEM);
+ }
+ con->conid = ret;
+ s->idr_in_use++;
+
+ set_bit(CF_CONNECTED, &con->flags);
+ con->server = s;
+ con->sock = sock;
+ conn_get(con);
+ spin_unlock_bh(&s->idr_lock);
+
+ return con;
+}
+
+static struct tipc_conn *tipc_conn_lookup(struct tipc_topsrv *s, int conid)
+{
+ struct tipc_conn *con;
+
+ spin_lock_bh(&s->idr_lock);
+ con = idr_find(&s->conn_idr, conid);
+ if (!connected(con) || !kref_get_unless_zero(&con->kref))
+ con = NULL;
+ spin_unlock_bh(&s->idr_lock);
+ return con;
+}
+
+/* tipc_conn_delete_sub - delete a specific or all subscriptions
+ * for a given subscriber
+ */
+static void tipc_conn_delete_sub(struct tipc_conn *con, struct tipc_subscr *s)
+{
+ struct tipc_net *tn = tipc_net(con->server->net);
+ struct list_head *sub_list = &con->sub_list;
+ struct tipc_subscription *sub, *tmp;
+
+ spin_lock_bh(&con->sub_lock);
+ list_for_each_entry_safe(sub, tmp, sub_list, sub_list) {
+ if (!s || !memcmp(s, &sub->evt.s, sizeof(*s))) {
+ tipc_sub_unsubscribe(sub);
+ atomic_dec(&tn->subscription_count);
+ if (s)
+ break;
+ }
+ }
+ spin_unlock_bh(&con->sub_lock);
+}
+
+static void tipc_conn_send_to_sock(struct tipc_conn *con)
+{
+ struct list_head *queue = &con->outqueue;
+ struct tipc_topsrv *srv = con->server;
+ struct outqueue_entry *e;
+ struct tipc_event *evt;
+ struct msghdr msg;
+ struct kvec iov;
+ int count = 0;
+ int ret;
+
+ spin_lock_bh(&con->outqueue_lock);
+
+ while (!list_empty(queue)) {
+ e = list_first_entry(queue, struct outqueue_entry, list);
+ evt = &e->evt;
+ spin_unlock_bh(&con->outqueue_lock);
+
+ if (e->inactive)
+ tipc_conn_delete_sub(con, &evt->s);
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_flags = MSG_DONTWAIT;
+ iov.iov_base = evt;
+ iov.iov_len = sizeof(*evt);
+ msg.msg_name = NULL;
+
+ if (con->sock) {
+ ret = kernel_sendmsg(con->sock, &msg, &iov,
+ 1, sizeof(*evt));
+ if (ret == -EWOULDBLOCK || ret == 0) {
+ cond_resched();
+ return;
+ } else if (ret < 0) {
+ return tipc_conn_close(con);
+ }
+ } else {
+ tipc_topsrv_kern_evt(srv->net, evt);
+ }
+
+ /* Don't starve users filling buffers */
+ if (++count >= MAX_SEND_MSG_COUNT) {
+ cond_resched();
+ count = 0;
+ }
+ spin_lock_bh(&con->outqueue_lock);
+ list_del(&e->list);
+ kfree(e);
+ }
+ spin_unlock_bh(&con->outqueue_lock);
+}
+
+static void tipc_conn_send_work(struct work_struct *work)
+{
+ struct tipc_conn *con = container_of(work, struct tipc_conn, swork);
+
+ if (connected(con))
+ tipc_conn_send_to_sock(con);
+
+ conn_put(con);
+}
+
+/* tipc_topsrv_queue_evt() - interrupt level call from a subscription instance
+ * The queued work is launched into tipc_conn_send_work()->tipc_conn_send_to_sock()
+ */
+void tipc_topsrv_queue_evt(struct net *net, int conid,
+ u32 event, struct tipc_event *evt)
+{
+ struct tipc_topsrv *srv = tipc_topsrv(net);
+ struct outqueue_entry *e;
+ struct tipc_conn *con;
+
+ con = tipc_conn_lookup(srv, conid);
+ if (!con)
+ return;
+
+ if (!connected(con))
+ goto err;
+
+ e = kmalloc(sizeof(*e), GFP_ATOMIC);
+ if (!e)
+ goto err;
+ e->inactive = (event == TIPC_SUBSCR_TIMEOUT);
+ memcpy(&e->evt, evt, sizeof(*evt));
+ spin_lock_bh(&con->outqueue_lock);
+ list_add_tail(&e->list, &con->outqueue);
+ spin_unlock_bh(&con->outqueue_lock);
+
+ if (queue_work(srv->send_wq, &con->swork))
+ return;
+err:
+ conn_put(con);
+}
+
+/* tipc_conn_write_space - interrupt callback after a sendmsg EAGAIN
+ * Indicates that there now is more space in the send buffer
+ * The queued work is launched into tipc_send_work()->tipc_conn_send_to_sock()
+ */
+static void tipc_conn_write_space(struct sock *sk)
+{
+ struct tipc_conn *con;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ con = sk->sk_user_data;
+ if (connected(con)) {
+ conn_get(con);
+ if (!queue_work(con->server->send_wq, &con->swork))
+ conn_put(con);
+ }
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
+static int tipc_conn_rcv_sub(struct tipc_topsrv *srv,
+ struct tipc_conn *con,
+ struct tipc_subscr *s)
+{
+ struct tipc_net *tn = tipc_net(srv->net);
+ struct tipc_subscription *sub;
+ u32 s_filter = tipc_sub_read(s, filter);
+
+ if (s_filter & TIPC_SUB_CANCEL) {
+ tipc_sub_write(s, filter, s_filter & ~TIPC_SUB_CANCEL);
+ tipc_conn_delete_sub(con, s);
+ return 0;
+ }
+ if (atomic_read(&tn->subscription_count) >= TIPC_MAX_SUBSCR) {
+ pr_warn("Subscription rejected, max (%u)\n", TIPC_MAX_SUBSCR);
+ return -1;
+ }
+ sub = tipc_sub_subscribe(srv->net, s, con->conid);
+ if (!sub)
+ return -1;
+ atomic_inc(&tn->subscription_count);
+ spin_lock_bh(&con->sub_lock);
+ list_add(&sub->sub_list, &con->sub_list);
+ spin_unlock_bh(&con->sub_lock);
+ return 0;
+}
+
+static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
+{
+ struct tipc_topsrv *srv = con->server;
+ struct sock *sk = con->sock->sk;
+ struct msghdr msg = {};
+ struct tipc_subscr s;
+ struct kvec iov;
+ int ret;
+
+ iov.iov_base = &s;
+ iov.iov_len = sizeof(s);
+ msg.msg_name = NULL;
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, iov.iov_len);
+ ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
+ if (ret == -EWOULDBLOCK)
+ return -EWOULDBLOCK;
+ if (ret == sizeof(s)) {
+ read_lock_bh(&sk->sk_callback_lock);
+ /* RACE: the connection can be closed in the meantime */
+ if (likely(connected(con)))
+ ret = tipc_conn_rcv_sub(srv, con, &s);
+ read_unlock_bh(&sk->sk_callback_lock);
+ if (!ret)
+ return 0;
+ }
+
+ tipc_conn_close(con);
+ return ret;
+}
+
+static void tipc_conn_recv_work(struct work_struct *work)
+{
+ struct tipc_conn *con = container_of(work, struct tipc_conn, rwork);
+ int count = 0;
+
+ while (connected(con)) {
+ if (tipc_conn_rcv_from_sock(con))
+ break;
+
+ /* Don't flood Rx machine */
+ if (++count >= MAX_RECV_MSG_COUNT) {
+ cond_resched();
+ count = 0;
+ }
+ }
+ conn_put(con);
+}
+
+/* tipc_conn_data_ready - interrupt callback indicating the socket has data
+ * The queued work is launched into tipc_recv_work()->tipc_conn_rcv_from_sock()
+ */
+static void tipc_conn_data_ready(struct sock *sk)
+{
+ struct tipc_conn *con;
+
+ trace_sk_data_ready(sk);
+
+ read_lock_bh(&sk->sk_callback_lock);
+ con = sk->sk_user_data;
+ if (connected(con)) {
+ conn_get(con);
+ if (!queue_work(con->server->rcv_wq, &con->rwork))
+ conn_put(con);
+ }
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void tipc_topsrv_accept(struct work_struct *work)
+{
+ struct tipc_topsrv *srv = container_of(work, struct tipc_topsrv, awork);
+ struct socket *newsock, *lsock;
+ struct tipc_conn *con;
+ struct sock *newsk;
+ int ret;
+
+ spin_lock_bh(&srv->idr_lock);
+ if (!srv->listener) {
+ spin_unlock_bh(&srv->idr_lock);
+ return;
+ }
+ lsock = srv->listener;
+ spin_unlock_bh(&srv->idr_lock);
+
+ while (1) {
+ ret = kernel_accept(lsock, &newsock, O_NONBLOCK);
+ if (ret < 0)
+ return;
+ con = tipc_conn_alloc(srv, newsock);
+ if (IS_ERR(con)) {
+ ret = PTR_ERR(con);
+ sock_release(newsock);
+ return;
+ }
+ /* Register callbacks */
+ newsk = newsock->sk;
+ write_lock_bh(&newsk->sk_callback_lock);
+ newsk->sk_data_ready = tipc_conn_data_ready;
+ newsk->sk_write_space = tipc_conn_write_space;
+ newsk->sk_user_data = con;
+ write_unlock_bh(&newsk->sk_callback_lock);
+
+ /* Wake up receive process in case of 'SYN+' message */
+ newsk->sk_data_ready(newsk);
+ conn_put(con);
+ }
+}
+
+/* tipc_topsrv_listener_data_ready - interrupt callback with connection request
+ * The queued job is launched into tipc_topsrv_accept()
+ */
+static void tipc_topsrv_listener_data_ready(struct sock *sk)
+{
+ struct tipc_topsrv *srv;
+
+ trace_sk_data_ready(sk);
+
+ read_lock_bh(&sk->sk_callback_lock);
+ srv = sk->sk_user_data;
+ if (srv)
+ queue_work(srv->rcv_wq, &srv->awork);
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
+static int tipc_topsrv_create_listener(struct tipc_topsrv *srv)
+{
+ struct socket *lsock = NULL;
+ struct sockaddr_tipc saddr;
+ struct sock *sk;
+ int rc;
+
+ rc = sock_create_kern(srv->net, AF_TIPC, SOCK_SEQPACKET, 0, &lsock);
+ if (rc < 0)
+ return rc;
+
+ srv->listener = lsock;
+ sk = lsock->sk;
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_data_ready = tipc_topsrv_listener_data_ready;
+ sk->sk_user_data = srv;
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ lock_sock(sk);
+ rc = tsk_set_importance(sk, TIPC_CRITICAL_IMPORTANCE);
+ release_sock(sk);
+ if (rc < 0)
+ goto err;
+
+ saddr.family = AF_TIPC;
+ saddr.addrtype = TIPC_SERVICE_RANGE;
+ saddr.addr.nameseq.type = TIPC_TOP_SRV;
+ saddr.addr.nameseq.lower = TIPC_TOP_SRV;
+ saddr.addr.nameseq.upper = TIPC_TOP_SRV;
+ saddr.scope = TIPC_NODE_SCOPE;
+
+ rc = tipc_sk_bind(lsock, (struct sockaddr *)&saddr, sizeof(saddr));
+ if (rc < 0)
+ goto err;
+ rc = kernel_listen(lsock, 0);
+ if (rc < 0)
+ goto err;
+
+ /* As server's listening socket owner and creator is the same module,
+ * we have to decrease TIPC module reference count to guarantee that
+ * it remains zero after the server socket is created, otherwise,
+ * executing "rmmod" command is unable to make TIPC module deleted
+ * after TIPC module is inserted successfully.
+ *
+ * However, the reference count is ever increased twice in
+ * sock_create_kern(): one is to increase the reference count of owner
+ * of TIPC socket's proto_ops struct; another is to increment the
+ * reference count of owner of TIPC proto struct. Therefore, we must
+ * decrement the module reference count twice to ensure that it keeps
+ * zero after server's listening socket is created. Of course, we
+ * must bump the module reference count twice as well before the socket
+ * is closed.
+ */
+ module_put(lsock->ops->owner);
+ module_put(sk->sk_prot_creator->owner);
+
+ return 0;
+err:
+ sock_release(lsock);
+ return -EINVAL;
+}
+
+bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
+ u32 upper, u32 filter, int *conid)
+{
+ struct tipc_subscr sub;
+ struct tipc_conn *con;
+ int rc;
+
+ sub.seq.type = type;
+ sub.seq.lower = lower;
+ sub.seq.upper = upper;
+ sub.timeout = TIPC_WAIT_FOREVER;
+ sub.filter = filter;
+ *(u64 *)&sub.usr_handle = (u64)port;
+
+ con = tipc_conn_alloc(tipc_topsrv(net), NULL);
+ if (IS_ERR(con))
+ return false;
+
+ *conid = con->conid;
+ rc = tipc_conn_rcv_sub(tipc_topsrv(net), con, &sub);
+ if (rc)
+ conn_put(con);
+
+ conn_put(con);
+ return !rc;
+}
+
+void tipc_topsrv_kern_unsubscr(struct net *net, int conid)
+{
+ struct tipc_conn *con;
+
+ con = tipc_conn_lookup(tipc_topsrv(net), conid);
+ if (!con)
+ return;
+
+ test_and_clear_bit(CF_CONNECTED, &con->flags);
+ tipc_conn_delete_sub(con, NULL);
+ conn_put(con);
+ conn_put(con);
+}
+
+static void tipc_topsrv_kern_evt(struct net *net, struct tipc_event *evt)
+{
+ u32 port = *(u32 *)&evt->s.usr_handle;
+ u32 self = tipc_own_addr(net);
+ struct sk_buff_head evtq;
+ struct sk_buff *skb;
+
+ skb = tipc_msg_create(TOP_SRV, 0, INT_H_SIZE, sizeof(*evt),
+ self, self, port, port, 0);
+ if (!skb)
+ return;
+ msg_set_dest_droppable(buf_msg(skb), true);
+ memcpy(msg_data(buf_msg(skb)), evt, sizeof(*evt));
+ skb_queue_head_init(&evtq);
+ __skb_queue_tail(&evtq, skb);
+ tipc_loopback_trace(net, &evtq);
+ tipc_sk_rcv(net, &evtq);
+}
+
+static int tipc_topsrv_work_start(struct tipc_topsrv *s)
+{
+ s->rcv_wq = alloc_ordered_workqueue("tipc_rcv", 0);
+ if (!s->rcv_wq) {
+ pr_err("can't start tipc receive workqueue\n");
+ return -ENOMEM;
+ }
+
+ s->send_wq = alloc_ordered_workqueue("tipc_send", 0);
+ if (!s->send_wq) {
+ pr_err("can't start tipc send workqueue\n");
+ destroy_workqueue(s->rcv_wq);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void tipc_topsrv_work_stop(struct tipc_topsrv *s)
+{
+ destroy_workqueue(s->rcv_wq);
+ destroy_workqueue(s->send_wq);
+}
+
+static int tipc_topsrv_start(struct net *net)
+{
+ struct tipc_net *tn = tipc_net(net);
+ const char name[] = "topology_server";
+ struct tipc_topsrv *srv;
+ int ret;
+
+ srv = kzalloc(sizeof(*srv), GFP_ATOMIC);
+ if (!srv)
+ return -ENOMEM;
+
+ srv->net = net;
+ INIT_WORK(&srv->awork, tipc_topsrv_accept);
+
+ strscpy(srv->name, name, sizeof(srv->name));
+ tn->topsrv = srv;
+ atomic_set(&tn->subscription_count, 0);
+
+ spin_lock_init(&srv->idr_lock);
+ idr_init(&srv->conn_idr);
+ srv->idr_in_use = 0;
+
+ ret = tipc_topsrv_work_start(srv);
+ if (ret < 0)
+ goto err_start;
+
+ ret = tipc_topsrv_create_listener(srv);
+ if (ret < 0)
+ goto err_create;
+
+ return 0;
+
+err_create:
+ tipc_topsrv_work_stop(srv);
+err_start:
+ kfree(srv);
+ return ret;
+}
+
+static void tipc_topsrv_stop(struct net *net)
+{
+ struct tipc_topsrv *srv = tipc_topsrv(net);
+ struct socket *lsock = srv->listener;
+ struct tipc_conn *con;
+ int id;
+
+ spin_lock_bh(&srv->idr_lock);
+ for (id = 0; srv->idr_in_use; id++) {
+ con = idr_find(&srv->conn_idr, id);
+ if (con) {
+ conn_get(con);
+ spin_unlock_bh(&srv->idr_lock);
+ tipc_conn_close(con);
+ conn_put(con);
+ spin_lock_bh(&srv->idr_lock);
+ }
+ }
+ __module_get(lsock->ops->owner);
+ __module_get(lsock->sk->sk_prot_creator->owner);
+ srv->listener = NULL;
+ spin_unlock_bh(&srv->idr_lock);
+
+ tipc_topsrv_work_stop(srv);
+ sock_release(lsock);
+ idr_destroy(&srv->conn_idr);
+ kfree(srv);
+}
+
+int __net_init tipc_topsrv_init_net(struct net *net)
+{
+ return tipc_topsrv_start(net);
+}
+
+void __net_exit tipc_topsrv_exit_net(struct net *net)
+{
+ tipc_topsrv_stop(net);
+}
diff --git a/net/tipc/server.h b/net/tipc/topsrv.h
index 34f8055afa3b..c7ea71293748 100644
--- a/net/tipc/server.h
+++ b/net/tipc/topsrv.h
@@ -2,6 +2,7 @@
* net/tipc/server.h: Include file for TIPC server code
*
* Copyright (c) 2012-2013, Wind River Systems
+ * Copyright (c) 2017, Ericsson AB
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,62 +37,18 @@
#ifndef _TIPC_SERVER_H
#define _TIPC_SERVER_H
-#include <linux/idr.h>
-#include <linux/tipc.h>
-#include <net/net_namespace.h>
+#include "core.h"
#define TIPC_SERVER_NAME_LEN 32
+#define TIPC_SUB_CLUSTER_SCOPE 0x20
+#define TIPC_SUB_NODE_SCOPE 0x40
+#define TIPC_SUB_NO_STATUS 0x80
-/**
- * struct tipc_server - TIPC server structure
- * @conn_idr: identifier set of connection
- * @idr_lock: protect the connection identifier set
- * @idr_in_use: amount of allocated identifier entry
- * @net: network namspace instance
- * @rcvbuf_cache: memory cache of server receive buffer
- * @rcv_wq: receive workqueue
- * @send_wq: send workqueue
- * @max_rcvbuf_size: maximum permitted receive message length
- * @tipc_conn_new: callback will be called when new connection is incoming
- * @tipc_conn_release: callback will be called before releasing the connection
- * @tipc_conn_recvmsg: callback will be called when message arrives
- * @saddr: TIPC server address
- * @name: server name
- * @imp: message importance
- * @type: socket type
- */
-struct tipc_server {
- struct idr conn_idr;
- spinlock_t idr_lock;
- int idr_in_use;
- struct net *net;
- struct kmem_cache *rcvbuf_cache;
- struct workqueue_struct *rcv_wq;
- struct workqueue_struct *send_wq;
- int max_rcvbuf_size;
- void *(*tipc_conn_new)(int conid);
- void (*tipc_conn_release)(int conid, void *usr_data);
- void (*tipc_conn_recvmsg)(struct net *net, int conid,
- struct sockaddr_tipc *addr, void *usr_data,
- void *buf, size_t len);
- struct sockaddr_tipc *saddr;
- char name[TIPC_SERVER_NAME_LEN];
- int imp;
- int type;
-};
-
-int tipc_conn_sendmsg(struct tipc_server *s, int conid,
- struct sockaddr_tipc *addr, void *data, size_t len);
-
-/**
- * tipc_conn_terminate - terminate connection with server
- *
- * Note: Must call it in process context since it might sleep
- */
-void tipc_conn_terminate(struct tipc_server *s, int conid);
-
-int tipc_server_start(struct tipc_server *s);
+void tipc_topsrv_queue_evt(struct net *net, int conid,
+ u32 event, struct tipc_event *evt);
-void tipc_server_stop(struct tipc_server *s);
+bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
+ u32 upper, u32 filter, int *conid);
+void tipc_topsrv_kern_unsubscr(struct net *net, int conid);
#endif
diff --git a/net/tipc/trace.c b/net/tipc/trace.c
new file mode 100644
index 000000000000..7d2931521e0e
--- /dev/null
+++ b/net/tipc/trace.c
@@ -0,0 +1,206 @@
+/*
+ * net/tipc/trace.c: TIPC tracepoints code
+ *
+ * Copyright (c) 2018, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "ASIS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+/*
+ * socket tuples for filtering in socket traces:
+ * (portid, sock type, name type, name lower, name upper)
+ */
+unsigned long sysctl_tipc_sk_filter[5] __read_mostly = {0, };
+
+/**
+ * tipc_skb_dump - dump TIPC skb data
+ * @skb: skb to be dumped
+ * @more: dump more?
+ * - false: dump only tipc msg data
+ * - true: dump kernel-related skb data and tipc cb[] array as well
+ * @buf: returned buffer of dump data in format
+ */
+int tipc_skb_dump(struct sk_buff *skb, bool more, char *buf)
+{
+ int i = 0;
+ size_t sz = (more) ? SKB_LMAX : SKB_LMIN;
+ struct tipc_msg *hdr;
+ struct tipc_skb_cb *skbcb;
+
+ if (!skb) {
+ i += scnprintf(buf, sz, "msg: (null)\n");
+ return i;
+ }
+
+ hdr = buf_msg(skb);
+ skbcb = TIPC_SKB_CB(skb);
+
+ /* tipc msg data section */
+ i += scnprintf(buf, sz, "msg: %u", msg_user(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_type(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_hdr_sz(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_data_sz(hdr));
+ i += scnprintf(buf + i, sz - i, " %x", msg_orignode(hdr));
+ i += scnprintf(buf + i, sz - i, " %x", msg_destnode(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_seqno(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_ack(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_bcast_ack(hdr));
+ switch (msg_user(hdr)) {
+ case LINK_PROTOCOL:
+ i += scnprintf(buf + i, sz - i, " %c", msg_net_plane(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_probe(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_peer_stopping(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_session(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_next_sent(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_seq_gap(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_bc_snd_nxt(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_bc_gap(hdr));
+ break;
+ case TIPC_LOW_IMPORTANCE:
+ case TIPC_MEDIUM_IMPORTANCE:
+ case TIPC_HIGH_IMPORTANCE:
+ case TIPC_CRITICAL_IMPORTANCE:
+ case CONN_MANAGER:
+ case SOCK_WAKEUP:
+ i += scnprintf(buf + i, sz - i, " | %u", msg_origport(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_destport(hdr));
+ switch (msg_type(hdr)) {
+ case TIPC_NAMED_MSG:
+ i += scnprintf(buf + i, sz - i, " %u",
+ msg_nametype(hdr));
+ i += scnprintf(buf + i, sz - i, " %u",
+ msg_nameinst(hdr));
+ break;
+ case TIPC_MCAST_MSG:
+ i += scnprintf(buf + i, sz - i, " %u",
+ msg_nametype(hdr));
+ i += scnprintf(buf + i, sz - i, " %u",
+ msg_namelower(hdr));
+ i += scnprintf(buf + i, sz - i, " %u",
+ msg_nameupper(hdr));
+ break;
+ default:
+ break;
+ }
+ i += scnprintf(buf + i, sz - i, " | %u",
+ msg_src_droppable(hdr));
+ i += scnprintf(buf + i, sz - i, " %u",
+ msg_dest_droppable(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_errcode(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_reroute_cnt(hdr));
+ break;
+ default:
+ /* need more? */
+ break;
+ }
+
+ i += scnprintf(buf + i, sz - i, "\n");
+ if (!more)
+ return i;
+
+ /* kernel-related skb data section */
+ i += scnprintf(buf + i, sz - i, "skb: %s",
+ (skb->dev) ? skb->dev->name : "n/a");
+ i += scnprintf(buf + i, sz - i, " %u", skb->len);
+ i += scnprintf(buf + i, sz - i, " %u", skb->data_len);
+ i += scnprintf(buf + i, sz - i, " %u", skb->hdr_len);
+ i += scnprintf(buf + i, sz - i, " %u", skb->truesize);
+ i += scnprintf(buf + i, sz - i, " %u", skb_cloned(skb));
+ i += scnprintf(buf + i, sz - i, " %p", skb->sk);
+ i += scnprintf(buf + i, sz - i, " %u", skb_shinfo(skb)->nr_frags);
+ i += scnprintf(buf + i, sz - i, " %llx",
+ ktime_to_ms(skb_get_ktime(skb)));
+ i += scnprintf(buf + i, sz - i, " %llx\n",
+ ktime_to_ms(skb_hwtstamps(skb)->hwtstamp));
+
+ /* tipc skb cb[] data section */
+ i += scnprintf(buf + i, sz - i, "cb[]: %u", skbcb->bytes_read);
+ i += scnprintf(buf + i, sz - i, " %u", skbcb->orig_member);
+ i += scnprintf(buf + i, sz - i, " %u",
+ jiffies_to_msecs(skbcb->nxt_retr));
+ i += scnprintf(buf + i, sz - i, " %u", skbcb->validated);
+ i += scnprintf(buf + i, sz - i, " %u", skbcb->chain_imp);
+ i += scnprintf(buf + i, sz - i, " %u\n", skbcb->ackers);
+
+ return i;
+}
+
+/**
+ * tipc_list_dump - dump TIPC skb list/queue
+ * @list: list of skbs to be dumped
+ * @more: dump more?
+ * - false: dump only the head & tail skbs
+ * - true: dump the first & last 5 skbs
+ * @buf: returned buffer of dump data in format
+ */
+int tipc_list_dump(struct sk_buff_head *list, bool more, char *buf)
+{
+ int i = 0;
+ size_t sz = (more) ? LIST_LMAX : LIST_LMIN;
+ u32 count, len;
+ struct sk_buff *hskb, *tskb, *skb, *tmp;
+
+ if (!list) {
+ i += scnprintf(buf, sz, "(null)\n");
+ return i;
+ }
+
+ len = skb_queue_len(list);
+ i += scnprintf(buf, sz, "len = %d\n", len);
+
+ if (!len)
+ return i;
+
+ if (!more) {
+ hskb = skb_peek(list);
+ i += scnprintf(buf + i, sz - i, " head ");
+ i += tipc_skb_dump(hskb, false, buf + i);
+ if (len > 1) {
+ tskb = skb_peek_tail(list);
+ i += scnprintf(buf + i, sz - i, " tail ");
+ i += tipc_skb_dump(tskb, false, buf + i);
+ }
+ } else {
+ count = 0;
+ skb_queue_walk_safe(list, skb, tmp) {
+ count++;
+ if (count == 6)
+ i += scnprintf(buf + i, sz - i, " .\n .\n");
+ if (count > 5 && count <= len - 5)
+ continue;
+ i += scnprintf(buf + i, sz - i, " #%d ", count);
+ i += tipc_skb_dump(skb, false, buf + i);
+ }
+ }
+ return i;
+}
diff --git a/net/tipc/trace.h b/net/tipc/trace.h
new file mode 100644
index 000000000000..865142ed0ab4
--- /dev/null
+++ b/net/tipc/trace.h
@@ -0,0 +1,434 @@
+/*
+ * net/tipc/trace.h: TIPC tracepoints
+ *
+ * Copyright (c) 2018, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "ASIS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM tipc
+
+#if !defined(_TIPC_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TIPC_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "core.h"
+#include "link.h"
+#include "socket.h"
+#include "node.h"
+
+#define SKB_LMIN (100)
+#define SKB_LMAX (SKB_LMIN * 2)
+#define LIST_LMIN (SKB_LMIN * 3)
+#define LIST_LMAX (SKB_LMIN * 11)
+#define SK_LMIN (SKB_LMIN * 2)
+#define SK_LMAX (SKB_LMIN * 11)
+#define LINK_LMIN (SKB_LMIN)
+#define LINK_LMAX (SKB_LMIN * 16)
+#define NODE_LMIN (SKB_LMIN)
+#define NODE_LMAX (SKB_LMIN * 11)
+
+#ifndef __TIPC_TRACE_ENUM
+#define __TIPC_TRACE_ENUM
+enum {
+ TIPC_DUMP_NONE = 0,
+
+ TIPC_DUMP_TRANSMQ = 1,
+ TIPC_DUMP_BACKLOGQ = (1 << 1),
+ TIPC_DUMP_DEFERDQ = (1 << 2),
+ TIPC_DUMP_INPUTQ = (1 << 3),
+ TIPC_DUMP_WAKEUP = (1 << 4),
+
+ TIPC_DUMP_SK_SNDQ = (1 << 8),
+ TIPC_DUMP_SK_RCVQ = (1 << 9),
+ TIPC_DUMP_SK_BKLGQ = (1 << 10),
+ TIPC_DUMP_ALL = 0xffffu
+};
+#endif
+
+/* Link & Node FSM states: */
+#define state_sym(val) \
+ __print_symbolic(val, \
+ {(0xe), "ESTABLISHED" },\
+ {(0xe << 4), "ESTABLISHING" },\
+ {(0x1 << 8), "RESET" },\
+ {(0x2 << 12), "RESETTING" },\
+ {(0xd << 16), "PEER_RESET" },\
+ {(0xf << 20), "FAILINGOVER" },\
+ {(0xc << 24), "SYNCHING" },\
+ {(0xdd), "SELF_DOWN_PEER_DOWN" },\
+ {(0xaa), "SELF_UP_PEER_UP" },\
+ {(0xd1), "SELF_DOWN_PEER_LEAVING" },\
+ {(0xac), "SELF_UP_PEER_COMING" },\
+ {(0xca), "SELF_COMING_PEER_UP" },\
+ {(0x1d), "SELF_LEAVING_PEER_DOWN" },\
+ {(0xf0), "FAILINGOVER" },\
+ {(0xcc), "SYNCHING" })
+
+/* Link & Node FSM events: */
+#define evt_sym(val) \
+ __print_symbolic(val, \
+ {(0xec1ab1e), "ESTABLISH_EVT" },\
+ {(0x9eed0e), "PEER_RESET_EVT" },\
+ {(0xfa110e), "FAILURE_EVT" },\
+ {(0x10ca1d0e), "RESET_EVT" },\
+ {(0xfa110bee), "FAILOVER_BEGIN_EVT" },\
+ {(0xfa110ede), "FAILOVER_END_EVT" },\
+ {(0xc1ccbee), "SYNCH_BEGIN_EVT" },\
+ {(0xc1ccede), "SYNCH_END_EVT" },\
+ {(0xece), "SELF_ESTABL_CONTACT_EVT" },\
+ {(0x1ce), "SELF_LOST_CONTACT_EVT" },\
+ {(0x9ece), "PEER_ESTABL_CONTACT_EVT" },\
+ {(0x91ce), "PEER_LOST_CONTACT_EVT" },\
+ {(0xfbe), "FAILOVER_BEGIN_EVT" },\
+ {(0xfee), "FAILOVER_END_EVT" },\
+ {(0xcbe), "SYNCH_BEGIN_EVT" },\
+ {(0xcee), "SYNCH_END_EVT" })
+
+/* Bearer, net device events: */
+#define dev_evt_sym(val) \
+ __print_symbolic(val, \
+ {(NETDEV_CHANGE), "NETDEV_CHANGE" },\
+ {(NETDEV_GOING_DOWN), "NETDEV_GOING_DOWN" },\
+ {(NETDEV_UP), "NETDEV_UP" },\
+ {(NETDEV_CHANGEMTU), "NETDEV_CHANGEMTU" },\
+ {(NETDEV_CHANGEADDR), "NETDEV_CHANGEADDR" },\
+ {(NETDEV_UNREGISTER), "NETDEV_UNREGISTER" },\
+ {(NETDEV_CHANGENAME), "NETDEV_CHANGENAME" })
+
+extern unsigned long sysctl_tipc_sk_filter[5] __read_mostly;
+
+int tipc_skb_dump(struct sk_buff *skb, bool more, char *buf);
+int tipc_list_dump(struct sk_buff_head *list, bool more, char *buf);
+int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf);
+int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf);
+int tipc_node_dump(struct tipc_node *n, bool more, char *buf);
+bool tipc_sk_filtering(struct sock *sk);
+
+DECLARE_EVENT_CLASS(tipc_skb_class,
+
+ TP_PROTO(struct sk_buff *skb, bool more, const char *header),
+
+ TP_ARGS(skb, more, header),
+
+ TP_STRUCT__entry(
+ __string(header, header)
+ __dynamic_array(char, buf, (more) ? SKB_LMAX : SKB_LMIN)
+ ),
+
+ TP_fast_assign(
+ __assign_str(header);
+ tipc_skb_dump(skb, more, __get_str(buf));
+ ),
+
+ TP_printk("%s\n%s", __get_str(header), __get_str(buf))
+)
+
+#define DEFINE_SKB_EVENT(name) \
+DEFINE_EVENT(tipc_skb_class, name, \
+ TP_PROTO(struct sk_buff *skb, bool more, const char *header), \
+ TP_ARGS(skb, more, header))
+DEFINE_SKB_EVENT(tipc_skb_dump);
+DEFINE_SKB_EVENT(tipc_proto_build);
+DEFINE_SKB_EVENT(tipc_proto_rcv);
+
+DECLARE_EVENT_CLASS(tipc_list_class,
+
+ TP_PROTO(struct sk_buff_head *list, bool more, const char *header),
+
+ TP_ARGS(list, more, header),
+
+ TP_STRUCT__entry(
+ __string(header, header)
+ __dynamic_array(char, buf, (more) ? LIST_LMAX : LIST_LMIN)
+ ),
+
+ TP_fast_assign(
+ __assign_str(header);
+ tipc_list_dump(list, more, __get_str(buf));
+ ),
+
+ TP_printk("%s\n%s", __get_str(header), __get_str(buf))
+);
+
+#define DEFINE_LIST_EVENT(name) \
+DEFINE_EVENT(tipc_list_class, name, \
+ TP_PROTO(struct sk_buff_head *list, bool more, const char *header), \
+ TP_ARGS(list, more, header))
+DEFINE_LIST_EVENT(tipc_list_dump);
+
+DECLARE_EVENT_CLASS(tipc_sk_class,
+
+ TP_PROTO(struct sock *sk, struct sk_buff *skb, u16 dqueues,
+ const char *header),
+
+ TP_ARGS(sk, skb, dqueues, header),
+
+ TP_STRUCT__entry(
+ __string(header, header)
+ __field(u32, portid)
+ __dynamic_array(char, buf, (dqueues) ? SK_LMAX : SK_LMIN)
+ __dynamic_array(char, skb_buf, (skb) ? SKB_LMIN : 1)
+ ),
+
+ TP_fast_assign(
+ __assign_str(header);
+ __entry->portid = tipc_sock_get_portid(sk);
+ tipc_sk_dump(sk, dqueues, __get_str(buf));
+ if (skb)
+ tipc_skb_dump(skb, false, __get_str(skb_buf));
+ else
+ *(__get_str(skb_buf)) = '\0';
+ ),
+
+ TP_printk("<%u> %s\n%s%s", __entry->portid, __get_str(header),
+ __get_str(skb_buf), __get_str(buf))
+);
+
+#define DEFINE_SK_EVENT_FILTER(name) \
+DEFINE_EVENT_CONDITION(tipc_sk_class, name, \
+ TP_PROTO(struct sock *sk, struct sk_buff *skb, u16 dqueues, \
+ const char *header), \
+ TP_ARGS(sk, skb, dqueues, header), \
+ TP_CONDITION(tipc_sk_filtering(sk)))
+DEFINE_SK_EVENT_FILTER(tipc_sk_dump);
+DEFINE_SK_EVENT_FILTER(tipc_sk_create);
+DEFINE_SK_EVENT_FILTER(tipc_sk_sendmcast);
+DEFINE_SK_EVENT_FILTER(tipc_sk_sendmsg);
+DEFINE_SK_EVENT_FILTER(tipc_sk_sendstream);
+DEFINE_SK_EVENT_FILTER(tipc_sk_poll);
+DEFINE_SK_EVENT_FILTER(tipc_sk_filter_rcv);
+DEFINE_SK_EVENT_FILTER(tipc_sk_advance_rx);
+DEFINE_SK_EVENT_FILTER(tipc_sk_rej_msg);
+DEFINE_SK_EVENT_FILTER(tipc_sk_drop_msg);
+DEFINE_SK_EVENT_FILTER(tipc_sk_release);
+DEFINE_SK_EVENT_FILTER(tipc_sk_shutdown);
+
+#define DEFINE_SK_EVENT_FILTER_COND(name, cond) \
+DEFINE_EVENT_CONDITION(tipc_sk_class, name, \
+ TP_PROTO(struct sock *sk, struct sk_buff *skb, u16 dqueues, \
+ const char *header), \
+ TP_ARGS(sk, skb, dqueues, header), \
+ TP_CONDITION(tipc_sk_filtering(sk) && (cond)))
+DEFINE_SK_EVENT_FILTER_COND(tipc_sk_overlimit1, tipc_sk_overlimit1(sk, skb));
+DEFINE_SK_EVENT_FILTER_COND(tipc_sk_overlimit2, tipc_sk_overlimit2(sk, skb));
+
+DECLARE_EVENT_CLASS(tipc_link_class,
+
+ TP_PROTO(struct tipc_link *l, u16 dqueues, const char *header),
+
+ TP_ARGS(l, dqueues, header),
+
+ TP_STRUCT__entry(
+ __string(header, header)
+ __array(char, name, TIPC_MAX_LINK_NAME)
+ __dynamic_array(char, buf, (dqueues) ? LINK_LMAX : LINK_LMIN)
+ ),
+
+ TP_fast_assign(
+ __assign_str(header);
+ memcpy(__entry->name, tipc_link_name(l), TIPC_MAX_LINK_NAME);
+ tipc_link_dump(l, dqueues, __get_str(buf));
+ ),
+
+ TP_printk("<%s> %s\n%s", __entry->name, __get_str(header),
+ __get_str(buf))
+);
+
+#define DEFINE_LINK_EVENT(name) \
+DEFINE_EVENT(tipc_link_class, name, \
+ TP_PROTO(struct tipc_link *l, u16 dqueues, const char *header), \
+ TP_ARGS(l, dqueues, header))
+DEFINE_LINK_EVENT(tipc_link_dump);
+DEFINE_LINK_EVENT(tipc_link_conges);
+DEFINE_LINK_EVENT(tipc_link_timeout);
+DEFINE_LINK_EVENT(tipc_link_reset);
+
+#define DEFINE_LINK_EVENT_COND(name, cond) \
+DEFINE_EVENT_CONDITION(tipc_link_class, name, \
+ TP_PROTO(struct tipc_link *l, u16 dqueues, const char *header), \
+ TP_ARGS(l, dqueues, header), \
+ TP_CONDITION(cond))
+DEFINE_LINK_EVENT_COND(tipc_link_too_silent, tipc_link_too_silent(l));
+
+DECLARE_EVENT_CLASS(tipc_link_transmq_class,
+
+ TP_PROTO(struct tipc_link *r, u16 f, u16 t, struct sk_buff_head *tq),
+
+ TP_ARGS(r, f, t, tq),
+
+ TP_STRUCT__entry(
+ __array(char, name, TIPC_MAX_LINK_NAME)
+ __field(u16, from)
+ __field(u16, to)
+ __field(u32, len)
+ __field(u16, fseqno)
+ __field(u16, lseqno)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->name, tipc_link_name(r), TIPC_MAX_LINK_NAME);
+ __entry->from = f;
+ __entry->to = t;
+ __entry->len = skb_queue_len(tq);
+ __entry->fseqno = __entry->len ?
+ msg_seqno(buf_msg(skb_peek(tq))) : 0;
+ __entry->lseqno = __entry->len ?
+ msg_seqno(buf_msg(skb_peek_tail(tq))) : 0;
+ ),
+
+ TP_printk("<%s> retrans req: [%u-%u] transmq: %u [%u-%u]\n",
+ __entry->name, __entry->from, __entry->to,
+ __entry->len, __entry->fseqno, __entry->lseqno)
+);
+
+DEFINE_EVENT_CONDITION(tipc_link_transmq_class, tipc_link_retrans,
+ TP_PROTO(struct tipc_link *r, u16 f, u16 t, struct sk_buff_head *tq),
+ TP_ARGS(r, f, t, tq),
+ TP_CONDITION(less_eq(f, t))
+);
+
+DEFINE_EVENT_PRINT(tipc_link_transmq_class, tipc_link_bc_ack,
+ TP_PROTO(struct tipc_link *r, u16 f, u16 t, struct sk_buff_head *tq),
+ TP_ARGS(r, f, t, tq),
+ TP_printk("<%s> acked: %u gap: %u transmq: %u [%u-%u]\n",
+ __entry->name, __entry->from, __entry->to,
+ __entry->len, __entry->fseqno, __entry->lseqno)
+);
+
+DECLARE_EVENT_CLASS(tipc_node_class,
+
+ TP_PROTO(struct tipc_node *n, bool more, const char *header),
+
+ TP_ARGS(n, more, header),
+
+ TP_STRUCT__entry(
+ __string(header, header)
+ __field(u32, addr)
+ __dynamic_array(char, buf, (more) ? NODE_LMAX : NODE_LMIN)
+ ),
+
+ TP_fast_assign(
+ __assign_str(header);
+ __entry->addr = tipc_node_get_addr(n);
+ tipc_node_dump(n, more, __get_str(buf));
+ ),
+
+ TP_printk("<%x> %s\n%s", __entry->addr, __get_str(header),
+ __get_str(buf))
+);
+
+#define DEFINE_NODE_EVENT(name) \
+DEFINE_EVENT(tipc_node_class, name, \
+ TP_PROTO(struct tipc_node *n, bool more, const char *header), \
+ TP_ARGS(n, more, header))
+DEFINE_NODE_EVENT(tipc_node_dump);
+DEFINE_NODE_EVENT(tipc_node_create);
+DEFINE_NODE_EVENT(tipc_node_delete);
+DEFINE_NODE_EVENT(tipc_node_lost_contact);
+DEFINE_NODE_EVENT(tipc_node_timeout);
+DEFINE_NODE_EVENT(tipc_node_link_up);
+DEFINE_NODE_EVENT(tipc_node_link_down);
+DEFINE_NODE_EVENT(tipc_node_reset_links);
+DEFINE_NODE_EVENT(tipc_node_check_state);
+
+DECLARE_EVENT_CLASS(tipc_fsm_class,
+
+ TP_PROTO(const char *name, u32 os, u32 ns, int evt),
+
+ TP_ARGS(name, os, ns, evt),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(u32, os)
+ __field(u32, ns)
+ __field(u32, evt)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name);
+ __entry->os = os;
+ __entry->ns = ns;
+ __entry->evt = evt;
+ ),
+
+ TP_printk("<%s> %s--(%s)->%s\n", __get_str(name),
+ state_sym(__entry->os), evt_sym(__entry->evt),
+ state_sym(__entry->ns))
+);
+
+#define DEFINE_FSM_EVENT(fsm_name) \
+DEFINE_EVENT(tipc_fsm_class, fsm_name, \
+ TP_PROTO(const char *name, u32 os, u32 ns, int evt), \
+ TP_ARGS(name, os, ns, evt))
+DEFINE_FSM_EVENT(tipc_link_fsm);
+DEFINE_FSM_EVENT(tipc_node_fsm);
+
+TRACE_EVENT(tipc_l2_device_event,
+
+ TP_PROTO(struct net_device *dev, struct tipc_bearer *b,
+ unsigned long evt),
+
+ TP_ARGS(dev, b, evt),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev->name)
+ __string(b_name, b->name)
+ __field(unsigned long, evt)
+ __field(u8, b_up)
+ __field(u8, carrier)
+ __field(u8, oper)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name);
+ __assign_str(b_name);
+ __entry->evt = evt;
+ __entry->b_up = test_bit(0, &b->up);
+ __entry->carrier = netif_carrier_ok(dev);
+ __entry->oper = netif_oper_up(dev);
+ ),
+
+ TP_printk("%s on: <%s>/<%s> oper: %s carrier: %s bearer: %s\n",
+ dev_evt_sym(__entry->evt), __get_str(dev_name),
+ __get_str(b_name), (__entry->oper) ? "up" : "down",
+ (__entry->carrier) ? "ok" : "notok",
+ (__entry->b_up) ? "up" : "down")
+);
+
+#endif /* _TIPC_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index ecca64fc6a6f..b85ab0fb3b8c 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -44,12 +44,15 @@
#include <net/sock.h>
#include <net/ip.h>
#include <net/udp_tunnel.h>
-#include <net/addrconf.h>
+#include <net/ipv6_stubs.h>
#include <linux/tipc_netlink.h>
#include "core.h"
+#include "addr.h"
+#include "net.h"
#include "bearer.h"
#include "netlink.h"
#include "msg.h"
+#include "udp_media.h"
/* IANA assigned UDP port */
#define UDP_PORT_DEFAULT 6118
@@ -61,6 +64,11 @@
*
* This is the bearer level originating address used in neighbor discovery
* messages, and all fields should be in network byte order
+ *
+ * @proto: Ethernet protocol in use
+ * @port: port being used
+ * @ipv4: IPv4 address of neighbor
+ * @ipv6: IPv6 address of neighbor
*/
struct udp_media_addr {
__be16 proto;
@@ -74,6 +82,7 @@ struct udp_media_addr {
/* struct udp_replicast - container for UDP remote addresses */
struct udp_replicast {
struct udp_media_addr addr;
+ struct dst_cache dst_cache;
struct rcu_head rcu;
struct list_head list;
};
@@ -84,6 +93,7 @@ struct udp_replicast {
* @ubsock: bearer associated socket
* @ifindex: local address scope
* @work: used to schedule deferred work on a bearer
+ * @rcast: associated udp_replicast container
*/
struct udp_bearer {
struct tipc_bearer __rcu *bearer;
@@ -125,8 +135,11 @@ static int tipc_udp_addr2str(struct tipc_media_addr *a, char *buf, int size)
snprintf(buf, size, "%pI4:%u", &ua->ipv4, ntohs(ua->port));
else if (ntohs(ua->proto) == ETH_P_IPV6)
snprintf(buf, size, "%pI6:%u", &ua->ipv6, ntohs(ua->port));
- else
+ else {
pr_err("Invalid UDP media address\n");
+ return 1;
+ }
+
return 0;
}
@@ -156,52 +169,64 @@ static int tipc_udp_addr2msg(char *msg, struct tipc_media_addr *a)
/* tipc_send_msg - enqueue a send request */
static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
struct udp_bearer *ub, struct udp_media_addr *src,
- struct udp_media_addr *dst)
+ struct udp_media_addr *dst, struct dst_cache *cache)
{
- int ttl, err = 0;
- struct rtable *rt;
+ struct dst_entry *ndst;
+ int ttl, err;
+ local_bh_disable();
+ ndst = dst_cache_get(cache);
if (dst->proto == htons(ETH_P_IP)) {
- struct flowi4 fl = {
- .daddr = dst->ipv4.s_addr,
- .saddr = src->ipv4.s_addr,
- .flowi4_mark = skb->mark,
- .flowi4_proto = IPPROTO_UDP
- };
- rt = ip_route_output_key(net, &fl);
- if (IS_ERR(rt)) {
- err = PTR_ERR(rt);
- goto tx_error;
+ struct rtable *rt = dst_rtable(ndst);
+
+ if (!rt) {
+ struct flowi4 fl = {
+ .daddr = dst->ipv4.s_addr,
+ .saddr = src->ipv4.s_addr,
+ .flowi4_mark = skb->mark,
+ .flowi4_proto = IPPROTO_UDP
+ };
+ rt = ip_route_output_key(net, &fl);
+ if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
+ goto tx_error;
+ }
+ dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
}
- skb->dev = rt->dst.dev;
ttl = ip4_dst_hoplimit(&rt->dst);
udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb, src->ipv4.s_addr,
dst->ipv4.s_addr, 0, ttl, 0, src->port,
- dst->port, false, true);
+ dst->port, false, true, 0);
#if IS_ENABLED(CONFIG_IPV6)
} else {
- struct dst_entry *ndst;
- struct flowi6 fl6 = {
- .flowi6_oif = ub->ifindex,
- .daddr = dst->ipv6,
- .saddr = src->ipv6,
- .flowi6_proto = IPPROTO_UDP
- };
- err = ipv6_stub->ipv6_dst_lookup(net, ub->ubsock->sk, &ndst,
- &fl6);
- if (err)
- goto tx_error;
+ if (!ndst) {
+ struct flowi6 fl6 = {
+ .flowi6_oif = ub->ifindex,
+ .daddr = dst->ipv6,
+ .saddr = src->ipv6,
+ .flowi6_proto = IPPROTO_UDP
+ };
+ ndst = ipv6_stub->ipv6_dst_lookup_flow(net,
+ ub->ubsock->sk,
+ &fl6, NULL);
+ if (IS_ERR(ndst)) {
+ err = PTR_ERR(ndst);
+ goto tx_error;
+ }
+ dst_cache_set_ip6(cache, ndst, &fl6.saddr);
+ }
ttl = ip6_dst_hoplimit(ndst);
- err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb,
- ndst->dev, &src->ipv6,
- &dst->ipv6, 0, ttl, 0, src->port,
- dst->port, false);
+ udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, NULL,
+ &src->ipv6, &dst->ipv6, 0, ttl, 0,
+ src->port, dst->port, false, 0);
#endif
}
- return err;
+ local_bh_enable();
+ return 0;
tx_error:
+ local_bh_enable();
kfree_skb(skb);
return err;
}
@@ -223,14 +248,15 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
}
skb_set_inner_protocol(skb, htons(ETH_P_TIPC));
- ub = rcu_dereference_rtnl(b->media_ptr);
+ ub = rcu_dereference(b->media_ptr);
if (!ub) {
err = -ENODEV;
goto out;
}
if (addr->broadcast != TIPC_REPLICAST_SUPPORT)
- return tipc_udp_xmit(net, skb, ub, src, dst);
+ return tipc_udp_xmit(net, skb, ub, src, dst,
+ &ub->rcast.dst_cache);
/* Replicast, send an skb to each configured IP address */
list_for_each_entry_rcu(rcast, &ub->rcast.list, list) {
@@ -242,11 +268,10 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
goto out;
}
- err = tipc_udp_xmit(net, _skb, ub, src, &rcast->addr);
- if (err) {
- kfree_skb(_skb);
+ err = tipc_udp_xmit(net, _skb, ub, src, &rcast->addr,
+ &rcast->dst_cache);
+ if (err)
goto out;
- }
}
err = 0;
out:
@@ -288,6 +313,11 @@ static int tipc_udp_rcast_add(struct tipc_bearer *b,
if (!rcast)
return -ENOMEM;
+ if (dst_cache_init(&rcast->dst_cache, GFP_ATOMIC)) {
+ kfree(rcast);
+ return -ENOMEM;
+ }
+
memcpy(&rcast->addr, addr, sizeof(struct udp_media_addr));
if (ntohs(addr->proto) == ETH_P_IP)
@@ -354,29 +384,22 @@ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
skb_pull(skb, sizeof(struct udphdr));
hdr = buf_msg(skb);
- rcu_read_lock();
- b = rcu_dereference_rtnl(ub->bearer);
+ b = rcu_dereference(ub->bearer);
if (!b)
- goto rcu_out;
+ goto out;
if (b && test_bit(0, &b->up)) {
+ TIPC_SKB_CB(skb)->flags = 0;
tipc_rcv(sock_net(sk), skb, b);
- rcu_read_unlock();
return 0;
}
if (unlikely(msg_user(hdr) == LINK_CONFIG)) {
err = tipc_udp_rcast_disc(b, skb);
if (err)
- goto rcu_out;
+ goto out;
}
- tipc_rcv(sock_net(sk), skb, b);
- rcu_read_unlock();
- return 0;
-
-rcu_out:
- rcu_read_unlock();
out:
kfree_skb(skb);
return 0;
@@ -394,8 +417,10 @@ static int enable_mcast(struct udp_bearer *ub, struct udp_media_addr *remote)
err = ip_mc_join_group(sk, &mreqn);
#if IS_ENABLED(CONFIG_IPV6)
} else {
+ lock_sock(sk);
err = ipv6_stub->ipv6_sock_mc_join(sk, ub->ifindex,
&remote->ipv6);
+ release_sock(sk);
#endif
}
return err;
@@ -443,21 +468,17 @@ int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb)
int i;
if (!bid && !skip_cnt) {
+ struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs;
struct net *net = sock_net(skb->sk);
struct nlattr *battrs[TIPC_NLA_BEARER_MAX + 1];
- struct nlattr **attrs;
char *bname;
- err = tipc_nlmsg_parse(cb->nlh, &attrs);
- if (err)
- return err;
-
if (!attrs[TIPC_NLA_BEARER])
return -EINVAL;
- err = nla_parse_nested(battrs, TIPC_NLA_BEARER_MAX,
- attrs[TIPC_NLA_BEARER],
- tipc_nl_bearer_policy, NULL);
+ err = nla_parse_nested_deprecated(battrs, TIPC_NLA_BEARER_MAX,
+ attrs[TIPC_NLA_BEARER],
+ tipc_nl_bearer_policy, NULL);
if (err)
return err;
@@ -468,7 +489,7 @@ int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb)
rtnl_lock();
b = tipc_bearer_find(net, bname);
- if (!b) {
+ if (!b || b->bcast_addr.media_id != TIPC_MEDIA_TYPE_UDP) {
rtnl_unlock();
return -EINVAL;
}
@@ -479,13 +500,13 @@ int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb)
rtnl_lock();
b = rtnl_dereference(tn->bearer_list[bid]);
- if (!b) {
+ if (!b || b->bcast_addr.media_id != TIPC_MEDIA_TYPE_UDP) {
rtnl_unlock();
return -EINVAL;
}
}
- ub = rcu_dereference_rtnl(b->media_ptr);
+ ub = rtnl_dereference(b->media_ptr);
if (!ub) {
rtnl_unlock();
return -EINVAL;
@@ -527,11 +548,11 @@ int tipc_udp_nl_add_bearer_data(struct tipc_nl_msg *msg, struct tipc_bearer *b)
struct udp_bearer *ub;
struct nlattr *nest;
- ub = rcu_dereference_rtnl(b->media_ptr);
+ ub = rtnl_dereference(b->media_ptr);
if (!ub)
return -ENODEV;
- nest = nla_nest_start(msg->skb, TIPC_NLA_BEARER_UDP_OPTS);
+ nest = nla_nest_start_noflag(msg->skb, TIPC_NLA_BEARER_UDP_OPTS);
if (!nest)
goto msg_full;
@@ -556,7 +577,7 @@ msg_full:
/**
* tipc_parse_udp_addr - build udp media address from netlink data
- * @nlattr: netlink attribute containing sockaddr storage aligned address
+ * @nla: netlink attribute containing sockaddr storage aligned address
* @addr: tipc media address to fill with address, port and protocol type
* @scope_id: IPv6 scope id pointer, not NULL indicates it's required
*/
@@ -609,8 +630,7 @@ int tipc_udp_nl_bearer_add(struct tipc_bearer *b, struct nlattr *attr)
struct nlattr *opts[TIPC_NLA_UDP_MAX + 1];
struct udp_media_addr *dst;
- if (nla_parse_nested(opts, TIPC_NLA_UDP_MAX, attr,
- tipc_nl_udp_policy, NULL))
+ if (nla_parse_nested_deprecated(opts, TIPC_NLA_UDP_MAX, attr, tipc_nl_udp_policy, NULL))
return -EINVAL;
if (!opts[TIPC_NLA_UDP_REMOTE])
@@ -651,6 +671,9 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
struct udp_port_cfg udp_conf = {0};
struct udp_tunnel_sock_cfg tuncfg = {NULL};
struct nlattr *opts[TIPC_NLA_UDP_MAX + 1];
+ u8 node_id[NODE_ID_LEN] = {0,};
+ struct net_device *dev;
+ int rmcast = 0;
ub = kzalloc(sizeof(*ub), GFP_ATOMIC);
if (!ub)
@@ -661,9 +684,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
if (!attrs[TIPC_NLA_BEARER_UDP_OPTS])
goto err;
- if (nla_parse_nested(opts, TIPC_NLA_UDP_MAX,
- attrs[TIPC_NLA_BEARER_UDP_OPTS],
- tipc_nl_udp_policy, NULL))
+ if (nla_parse_nested_deprecated(opts, TIPC_NLA_UDP_MAX, attrs[TIPC_NLA_BEARER_UDP_OPTS], tipc_nl_udp_policy, NULL))
goto err;
if (!opts[TIPC_NLA_UDP_LOCAL] || !opts[TIPC_NLA_UDP_REMOTE]) {
@@ -681,36 +702,64 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
if (err)
goto err;
+ if (remote.proto != local.proto) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ /* Checking remote ip address */
+ rmcast = tipc_udp_is_mcast_addr(&remote);
+
+ /* Autoconfigure own node identity if needed */
+ if (!tipc_own_id(net)) {
+ memcpy(node_id, local.ipv6.in6_u.u6_addr8, 16);
+ tipc_net_init(net, node_id, 0);
+ }
+ if (!tipc_own_id(net)) {
+ pr_warn("Failed to set node id, please configure manually\n");
+ err = -EINVAL;
+ goto err;
+ }
+
b->bcast_addr.media_id = TIPC_MEDIA_TYPE_UDP;
b->bcast_addr.broadcast = TIPC_BROADCAST_SUPPORT;
rcu_assign_pointer(b->media_ptr, ub);
rcu_assign_pointer(ub->bearer, b);
tipc_udp_media_addr_set(&b->addr, &local);
if (local.proto == htons(ETH_P_IP)) {
- struct net_device *dev;
-
dev = __ip_dev_find(net, local.ipv4.s_addr, false);
if (!dev) {
err = -ENODEV;
goto err;
}
udp_conf.family = AF_INET;
- udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+
+ /* Switch to use ANY to receive packets from group */
+ if (rmcast)
+ udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+ else
+ udp_conf.local_ip.s_addr = local.ipv4.s_addr;
udp_conf.use_udp_checksums = false;
ub->ifindex = dev->ifindex;
- if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
- sizeof(struct udphdr))) {
- err = -EINVAL;
- goto err;
- }
- b->mtu = dev->mtu - sizeof(struct iphdr)
- - sizeof(struct udphdr);
+ b->encap_hlen = sizeof(struct iphdr) + sizeof(struct udphdr);
+ b->mtu = b->media->mtu;
#if IS_ENABLED(CONFIG_IPV6)
} else if (local.proto == htons(ETH_P_IPV6)) {
+ dev = ub->ifindex ? __dev_get_by_index(net, ub->ifindex) : NULL;
+ dev = ipv6_dev_find(net, &local.ipv6, dev);
+ if (!dev) {
+ err = -ENODEV;
+ goto err;
+ }
udp_conf.family = AF_INET6;
udp_conf.use_udp6_tx_checksums = true;
udp_conf.use_udp6_rx_checksums = true;
- udp_conf.local_ip6 = in6addr_any;
+ if (rmcast)
+ udp_conf.local_ip6 = in6addr_any;
+ else
+ udp_conf.local_ip6 = local.ipv6;
+ ub->ifindex = dev->ifindex;
+ b->encap_hlen = sizeof(struct ipv6hdr) + sizeof(struct udphdr);
b->mtu = 1280;
#endif
} else {
@@ -727,22 +776,28 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
tuncfg.encap_destroy = NULL;
setup_udp_tunnel_sock(net, ub->ubsock, &tuncfg);
- /**
+ err = dst_cache_init(&ub->rcast.dst_cache, GFP_ATOMIC);
+ if (err)
+ goto free;
+
+ /*
* The bcast media address port is used for all peers and the ip
* is used if it's a multicast address.
*/
memcpy(&b->bcast_addr.value, &remote, sizeof(remote));
- if (tipc_udp_is_mcast_addr(&remote))
+ if (rmcast)
err = enable_mcast(ub, &remote);
else
err = tipc_udp_rcast_add(b, &remote);
if (err)
- goto err;
+ goto free;
return 0;
+
+free:
+ dst_cache_destroy(&ub->rcast.dst_cache);
+ udp_tunnel_sock_release(ub->ubsock);
err:
- if (ub->ubsock)
- udp_tunnel_sock_release(ub->ubsock);
kfree(ub);
return err;
}
@@ -752,15 +807,22 @@ static void cleanup_bearer(struct work_struct *work)
{
struct udp_bearer *ub = container_of(work, struct udp_bearer, work);
struct udp_replicast *rcast, *tmp;
+ struct tipc_net *tn;
list_for_each_entry_safe(rcast, tmp, &ub->rcast.list, list) {
+ dst_cache_destroy(&rcast->dst_cache);
list_del_rcu(&rcast->list);
kfree_rcu(rcast, rcu);
}
- if (ub->ubsock)
- udp_tunnel_sock_release(ub->ubsock);
+ tn = tipc_net(sock_net(ub->ubsock->sk));
+
+ dst_cache_destroy(&ub->rcast.dst_cache);
+ udp_tunnel_sock_release(ub->ubsock);
+
+ /* Note: could use a call_rcu() to avoid another synchronize_net() */
synchronize_net();
+ atomic_dec(&tn->wq_count);
kfree(ub);
}
@@ -769,16 +831,16 @@ static void tipc_udp_disable(struct tipc_bearer *b)
{
struct udp_bearer *ub;
- ub = rcu_dereference_rtnl(b->media_ptr);
+ ub = rtnl_dereference(b->media_ptr);
if (!ub) {
pr_err("UDP bearer instance not found\n");
return;
}
- if (ub->ubsock)
- sock_set_flag(ub->ubsock->sk, SOCK_DEAD);
+ sock_set_flag(ub->ubsock->sk, SOCK_DEAD);
RCU_INIT_POINTER(ub->bearer, NULL);
/* sock_release need to be done outside of rtnl lock */
+ atomic_inc(&tipc_net(sock_net(ub->ubsock->sk))->wq_count);
INIT_WORK(&ub->work, cleanup_bearer);
schedule_work(&ub->work);
}
@@ -792,7 +854,9 @@ struct tipc_media udp_media_info = {
.msg2addr = tipc_udp_msg2addr,
.priority = TIPC_DEF_LINK_PRI,
.tolerance = TIPC_DEF_LINK_TOL,
- .window = TIPC_DEF_LINK_WIN,
+ .min_win = TIPC_DEF_LINK_WIN,
+ .max_win = TIPC_DEF_LINK_WIN,
+ .mtu = TIPC_DEF_LINK_UDP_MTU,
.type_id = TIPC_MEDIA_TYPE_UDP,
.hwaddr_len = 0,
.name = "udp"
diff --git a/net/tipc/udp_media.h b/net/tipc/udp_media.h
index 281bbae87726..e7455cc73e16 100644
--- a/net/tipc/udp_media.h
+++ b/net/tipc/udp_media.h
@@ -38,9 +38,23 @@
#ifndef _TIPC_UDP_MEDIA_H
#define _TIPC_UDP_MEDIA_H
+#include <linux/ip.h>
+#include <linux/udp.h>
+
int tipc_udp_nl_bearer_add(struct tipc_bearer *b, struct nlattr *attr);
int tipc_udp_nl_add_bearer_data(struct tipc_nl_msg *msg, struct tipc_bearer *b);
int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb);
+/* check if configured MTU is too low for tipc headers */
+static inline bool tipc_udp_mtu_bad(u32 mtu)
+{
+ if (mtu >= (TIPC_MIN_BEARER_MTU + sizeof(struct iphdr) +
+ sizeof(struct udphdr)))
+ return false;
+
+ pr_warn("MTU too low for tipc bearer\n");
+ return true;
+}
+
#endif
#endif