summaryrefslogtreecommitdiff
path: root/net/sctp/associola.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sctp/associola.c')
-rw-r--r--net/sctp/associola.c343
1 files changed, 169 insertions, 174 deletions
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 40ec83679d6e..5793d71852b8 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/* SCTP kernel implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
@@ -9,22 +10,6 @@
*
* This module provides the abstraction for an SCTP association.
*
- * This SCTP implementation is free software;
- * you can redistribute it and/or modify it under the terms of
- * the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This SCTP implementation is distributed in the hope that it
- * will be useful, but WITHOUT ANY WARRANTY; without even the implied
- * ************************
- * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING. If not, see
- * <http://www.gnu.org/licenses/>.
- *
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <linux-sctp@vger.kernel.org>
@@ -63,13 +48,12 @@ static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
/* 1st Level Abstractions. */
/* Initialize a new association from provided memory. */
-static struct sctp_association *sctp_association_init(struct sctp_association *asoc,
- const struct sctp_endpoint *ep,
- const struct sock *sk,
- sctp_scope_t scope,
- gfp_t gfp)
+static struct sctp_association *sctp_association_init(
+ struct sctp_association *asoc,
+ const struct sctp_endpoint *ep,
+ const struct sock *sk,
+ enum sctp_scope scope, gfp_t gfp)
{
- struct net *net = sock_net(sk);
struct sctp_sock *sp;
struct sctp_paramhdr *p;
int i;
@@ -80,6 +64,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
/* Discarding const is appropriate here. */
asoc->ep = (struct sctp_endpoint *)ep;
asoc->base.sk = (struct sock *)sk;
+ asoc->base.net = sock_net(sk);
sctp_endpoint_hold(asoc->ep);
sock_hold(asoc->base.sk);
@@ -101,7 +86,9 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
* socket values.
*/
asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
- asoc->pf_retrans = net->sctp.pf_retrans;
+ asoc->pf_retrans = sp->pf_retrans;
+ asoc->ps_retrans = sp->ps_retrans;
+ asoc->pf_expose = sp->pf_expose;
asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
@@ -111,12 +98,15 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
* sock configured value.
*/
asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
+ asoc->probe_interval = msecs_to_jiffies(sp->probe_interval);
+
+ asoc->encap_port = sp->encap_port;
/* Initialize path max retrans value. */
asoc->pathmaxrxt = sp->pathmaxrxt;
- /* Initialize default path MTU. */
- asoc->pathmtu = sp->pathmtu;
+ asoc->flowlabel = sp->flowlabel;
+ asoc->dscp = sp->dscp;
/* Set association default SACK delay */
asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
@@ -132,6 +122,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
*/
asoc->max_burst = sp->max_burst;
+ asoc->subscribe = sp->subscribe;
+
/* initialize association timers */
asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
@@ -145,12 +137,12 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
= 5 * asoc->rto_max;
asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
- asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
+ (unsigned long)sp->autoclose * HZ;
/* Initializes the timers */
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
- setup_timer(&asoc->timers[i], sctp_timer_events[i],
- (unsigned long)asoc);
+ timer_setup(&asoc->timers[i], sctp_timer_events[i], 0);
/* Pull default initialization values from the sock options.
* Note: This assumes that the values have already been
@@ -228,14 +220,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->peer.sack_needed = 1;
asoc->peer.sack_generation = 1;
- /* Assume that the peer will tell us if he recognizes ASCONF
- * as part of INIT exchange.
- * The sctp_addip_noauth option is there for backward compatibility
- * and will revert old behavior.
- */
- if (net->sctp.addip_noauth)
- asoc->peer.asconf_capable = 1;
-
/* Create an input queue. */
sctp_inq_init(&asoc->base.inqueue);
sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
@@ -243,12 +227,14 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
/* Create an output queue. */
sctp_outq_init(asoc, &asoc->outqueue);
- if (!sctp_ulpq_init(&asoc->ulpq, asoc))
- goto fail_init;
+ sctp_ulpq_init(&asoc->ulpq, asoc);
- if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams,
- 0, gfp))
- goto fail_init;
+ if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, 0, gfp))
+ goto stream_free;
+
+ /* Initialize default path MTU. */
+ asoc->pathmtu = sp->pathmtu;
+ sctp_assoc_update_frag_point(asoc);
/* Assume that peer would support both address types unless we are
* told otherwise.
@@ -271,8 +257,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
goto stream_free;
asoc->active_key_id = ep->active_key_id;
- asoc->prsctp_enable = ep->prsctp_enable;
- asoc->reconf_enable = ep->reconf_enable;
asoc->strreset_enable = ep->strreset_enable;
/* Save the hmacs and chunks list into this association */
@@ -293,7 +277,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
stream_free:
sctp_stream_free(&asoc->stream);
-fail_init:
sock_put(asoc->base.sk);
sctp_endpoint_put(asoc->ep);
return NULL;
@@ -301,9 +284,8 @@ fail_init:
/* Allocate and initialize a new association */
struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
- const struct sock *sk,
- sctp_scope_t scope,
- gfp_t gfp)
+ const struct sock *sk,
+ enum sctp_scope scope, gfp_t gfp)
{
struct sctp_association *asoc;
@@ -346,7 +328,7 @@ void sctp_association_free(struct sctp_association *asoc)
* socket.
*/
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
- sk->sk_ack_backlog--;
+ sk_acceptq_removed(sk);
}
/* Mark as dead, so other users can know this structure is
@@ -380,7 +362,7 @@ void sctp_association_free(struct sctp_association *asoc)
* on our state.
*/
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
- if (del_timer(&asoc->timers[i]))
+ if (timer_delete(&asoc->timers[i]))
sctp_association_put(asoc);
}
@@ -433,7 +415,7 @@ static void sctp_association_destroy(struct sctp_association *asoc)
WARN_ON(atomic_read(&asoc->rmem_alloc));
- kfree(asoc);
+ kfree_rcu(asoc, rcu);
SCTP_DBG_OBJCNT_DEC(assoc);
}
@@ -451,6 +433,8 @@ void sctp_assoc_set_primary(struct sctp_association *asoc,
changeover = 1 ;
asoc->peer.primary_path = transport;
+ sctp_ulpevent_notify_peer_addr_change(transport,
+ SCTP_ADDR_MADE_PRIM, 0);
/* Set a default msg_name for events. */
memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
@@ -498,8 +482,9 @@ void sctp_assoc_set_primary(struct sctp_association *asoc,
void sctp_assoc_rm_peer(struct sctp_association *asoc,
struct sctp_transport *peer)
{
- struct list_head *pos;
- struct sctp_transport *transport;
+ struct sctp_transport *transport;
+ struct list_head *pos;
+ struct sctp_chunk *ch;
pr_debug("%s: association:%p addr:%pISpc\n",
__func__, asoc, &peer->ipaddr.sa);
@@ -563,7 +548,6 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
*/
if (!list_empty(&peer->transmitted)) {
struct sctp_transport *active = asoc->peer.active_path;
- struct sctp_chunk *ch;
/* Reset the transport of each chunk on this list */
list_for_each_entry(ch, &peer->transmitted,
@@ -585,8 +569,13 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
sctp_transport_hold(active);
}
+ list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list)
+ if (ch->transport == peer)
+ ch->transport = NULL;
+
asoc->peer.transport_count--;
+ sctp_ulpevent_notify_peer_addr_change(peer, SCTP_ADDR_REMOVED, 0);
sctp_transport_free(peer);
}
@@ -596,7 +585,6 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
const gfp_t gfp,
const int peer_state)
{
- struct net *net = sock_net(asoc->base.sk);
struct sctp_transport *peer;
struct sctp_sock *sp;
unsigned short port;
@@ -626,7 +614,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
return peer;
}
- peer = sctp_transport_new(net, addr, gfp);
+ peer = sctp_transport_new(asoc->base.net, addr, gfp);
if (!peer)
return NULL;
@@ -636,12 +624,17 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
* association configured value.
*/
peer->hbinterval = asoc->hbinterval;
+ peer->probe_interval = asoc->probe_interval;
+
+ peer->encap_port = asoc->encap_port;
/* Set the path max_retrans. */
peer->pathmaxrxt = asoc->pathmaxrxt;
/* And the partial failure retrans threshold */
peer->pf_retrans = asoc->pf_retrans;
+ /* And the primary path switchover retrans threshold */
+ peer->ps_retrans = asoc->ps_retrans;
/* Initialize the peer's SACK delay timeout based on the
* association configured value.
@@ -649,38 +642,37 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
peer->sackdelay = asoc->sackdelay;
peer->sackfreq = asoc->sackfreq;
+ if (addr->sa.sa_family == AF_INET6) {
+ __be32 info = addr->v6.sin6_flowinfo;
+
+ if (info) {
+ peer->flowlabel = ntohl(info & IPV6_FLOWLABEL_MASK);
+ peer->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
+ } else {
+ peer->flowlabel = asoc->flowlabel;
+ }
+ }
+ peer->dscp = asoc->dscp;
+
/* Enable/disable heartbeat, SACK delay, and path MTU discovery
* based on association setting.
*/
peer->param_flags = asoc->param_flags;
- sctp_transport_route(peer, NULL, sp);
-
/* Initialize the pmtu of the transport. */
- if (peer->param_flags & SPP_PMTUD_DISABLE) {
- if (asoc->pathmtu)
- peer->pathmtu = asoc->pathmtu;
- else
- peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
- }
+ sctp_transport_route(peer, NULL, sp);
/* If this is the first transport addr on this association,
* initialize the association PMTU to the peer's PMTU.
* If not and the current association PMTU is higher than the new
* peer's PMTU, reset the association PMTU to the new peer's PMTU.
*/
- if (asoc->pathmtu)
- asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu);
- else
- asoc->pathmtu = peer->pathmtu;
-
- pr_debug("%s: association:%p PMTU set to %d\n", __func__, asoc,
- asoc->pathmtu);
+ sctp_assoc_set_pmtu(asoc, asoc->pathmtu ?
+ min_t(int, peer->pathmtu, asoc->pathmtu) :
+ peer->pathmtu);
peer->pmtu_pending = 0;
- asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
-
/* The asoc->peer.port might not be meaningful yet, but
* initialize the packet structure anyway.
*/
@@ -722,10 +714,14 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
return NULL;
}
+ sctp_transport_pl_reset(peer);
+
/* Attach the remote transport to our asoc. */
list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
asoc->peer.transport_count++;
+ sctp_ulpevent_notify_peer_addr_change(peer, SCTP_ADDR_ADDED, 0);
+
/* If we do not yet have a primary path, set one. */
if (!asoc->peer.primary_path) {
sctp_assoc_set_primary(asoc, peer);
@@ -740,24 +736,6 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
return peer;
}
-/* Delete a transport address from an association. */
-void sctp_assoc_del_peer(struct sctp_association *asoc,
- const union sctp_addr *addr)
-{
- struct list_head *pos;
- struct list_head *temp;
- struct sctp_transport *transport;
-
- list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
- transport = list_entry(pos, struct sctp_transport, transports);
- if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
- /* Do book keeping for removing the peer and free it. */
- sctp_assoc_rm_peer(asoc, transport);
- break;
- }
- }
-}
-
/* Lookup a transport by address. */
struct sctp_transport *sctp_assoc_lookup_paddr(
const struct sctp_association *asoc,
@@ -797,12 +775,10 @@ void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
*/
void sctp_assoc_control_transport(struct sctp_association *asoc,
struct sctp_transport *transport,
- sctp_transport_cmd_t command,
+ enum sctp_transport_cmd command,
sctp_sn_error_t error)
{
- struct sctp_ulpevent *event;
- struct sockaddr_storage addr;
- int spc_state = 0;
+ int spc_state = SCTP_ADDR_AVAILABLE;
bool ulp_notify = true;
/* Record the transition on the transport. */
@@ -812,20 +788,15 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
* to heartbeat success, report the SCTP_ADDR_CONFIRMED
* state to the user, otherwise report SCTP_ADDR_AVAILABLE.
*/
- if (SCTP_UNCONFIRMED == transport->state &&
- SCTP_HEARTBEAT_SUCCESS == error)
- spc_state = SCTP_ADDR_CONFIRMED;
- else
- spc_state = SCTP_ADDR_AVAILABLE;
- /* Don't inform ULP about transition from PF to
- * active state and set cwnd to 1 MTU, see SCTP
- * Quick failover draft section 5.1, point 5
- */
- if (transport->state == SCTP_PF) {
+ if (transport->state == SCTP_PF &&
+ asoc->pf_expose != SCTP_PF_EXPOSE_ENABLE)
ulp_notify = false;
- transport->cwnd = asoc->pathmtu;
- }
+ else if (transport->state == SCTP_UNCONFIRMED &&
+ error == SCTP_HEARTBEAT_SUCCESS)
+ spc_state = SCTP_ADDR_CONFIRMED;
+
transport->state = SCTP_ACTIVE;
+ sctp_transport_pl_reset(transport);
break;
case SCTP_TRANSPORT_DOWN:
@@ -833,19 +804,22 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
* to inactive state. Also, release the cached route since
* there may be a better route next time.
*/
- if (transport->state != SCTP_UNCONFIRMED)
+ if (transport->state != SCTP_UNCONFIRMED) {
transport->state = SCTP_INACTIVE;
- else {
+ sctp_transport_pl_reset(transport);
+ spc_state = SCTP_ADDR_UNREACHABLE;
+ } else {
sctp_transport_dst_release(transport);
ulp_notify = false;
}
-
- spc_state = SCTP_ADDR_UNREACHABLE;
break;
case SCTP_TRANSPORT_PF:
transport->state = SCTP_PF;
- ulp_notify = false;
+ if (asoc->pf_expose != SCTP_PF_EXPOSE_ENABLE)
+ ulp_notify = false;
+ else
+ spc_state = SCTP_ADDR_POTENTIALLY_FAILED;
break;
default:
@@ -855,16 +829,9 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
/* Generate and send a SCTP_PEER_ADDR_CHANGE notification
* to the user.
*/
- if (ulp_notify) {
- memset(&addr, 0, sizeof(struct sockaddr_storage));
- memcpy(&addr, &transport->ipaddr,
- transport->af_specific->sockaddr_len);
-
- event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
- 0, spc_state, error, GFP_ATOMIC);
- if (event)
- sctp_ulpq_tail_event(&asoc->ulpq, event);
- }
+ if (ulp_notify)
+ sctp_ulpevent_notify_peer_addr_change(transport,
+ spc_state, error);
/* Select new active and retran paths. */
sctp_select_active_and_retran_path(asoc);
@@ -990,44 +957,20 @@ out:
return match;
}
-/* Is this the association we are looking for? */
-struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
- struct net *net,
- const union sctp_addr *laddr,
- const union sctp_addr *paddr)
-{
- struct sctp_transport *transport;
-
- if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
- (htons(asoc->peer.port) == paddr->v4.sin_port) &&
- net_eq(sock_net(asoc->base.sk), net)) {
- transport = sctp_assoc_lookup_paddr(asoc, paddr);
- if (!transport)
- goto out;
-
- if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
- sctp_sk(asoc->base.sk)))
- goto out;
- }
- transport = NULL;
-
-out:
- return transport;
-}
-
/* Do delayed input processing. This is scheduled by sctp_rcv(). */
static void sctp_assoc_bh_rcv(struct work_struct *work)
{
struct sctp_association *asoc =
container_of(work, struct sctp_association,
base.inqueue.immediate);
- struct net *net = sock_net(asoc->base.sk);
+ struct net *net = asoc->base.net;
+ union sctp_subtype subtype;
struct sctp_endpoint *ep;
struct sctp_chunk *chunk;
struct sctp_inq *inqueue;
- int state;
- sctp_subtype_t subtype;
+ int first_time = 1; /* is this the first time through the loop */
int error = 0;
+ int state;
/* The association should be held so we should be safe. */
ep = asoc->ep;
@@ -1038,6 +981,30 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
state = asoc->state;
subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
+ /* If the first chunk in the packet is AUTH, do special
+ * processing specified in Section 6.3 of SCTP-AUTH spec
+ */
+ if (first_time && subtype.chunk == SCTP_CID_AUTH) {
+ struct sctp_chunkhdr *next_hdr;
+
+ next_hdr = sctp_inq_peek(inqueue);
+ if (!next_hdr)
+ goto normal;
+
+ /* If the next chunk is COOKIE-ECHO, skip the AUTH
+ * chunk while saving a pointer to it so we can do
+ * Authentication later (during cookie-echo
+ * processing).
+ */
+ if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
+ chunk->auth_chunk = skb_clone(chunk->skb,
+ GFP_ATOMIC);
+ chunk->auth = 1;
+ continue;
+ }
+ }
+
+normal:
/* SCTP-AUTH, Section 6.3:
* The receiver has a list of chunk types which it expects
* to be received only after an AUTH-chunk. This list has
@@ -1076,6 +1043,9 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
/* If there is an error on chunk, discard this packet. */
if (error && chunk)
chunk->pdiscard = 1;
+
+ if (first_time)
+ first_time = 0;
}
sctp_association_put(asoc);
}
@@ -1093,7 +1063,7 @@ void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
/* Decrement the backlog value for a TCP-style socket. */
if (sctp_style(oldsk, TCP))
- oldsk->sk_ack_backlog--;
+ sk_acceptq_removed(oldsk);
/* Release references to the old endpoint and the sock. */
sctp_endpoint_put(assoc->ep);
@@ -1172,8 +1142,7 @@ int sctp_assoc_update(struct sctp_association *asoc,
/* Add any peer addresses from the new association. */
list_for_each_entry(trans, &new->peer.transport_addr_list,
transports)
- if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr) &&
- !sctp_assoc_add_peer(asoc, &trans->ipaddr,
+ if (!sctp_assoc_add_peer(asoc, &trans->ipaddr,
GFP_ATOMIC, trans->state))
return -ENOMEM;
@@ -1371,7 +1340,7 @@ static void sctp_select_active_and_retran_path(struct sctp_association *asoc)
}
/* We did not find anything useful for a possible retransmission
- * path; either primary path that we found is the the same as
+ * path; either primary path that we found is the same as
* the current one, or we didn't generally find an active one.
*/
if (trans_sec == NULL)
@@ -1408,6 +1377,31 @@ sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
}
}
+void sctp_assoc_update_frag_point(struct sctp_association *asoc)
+{
+ int frag = sctp_mtu_payload(sctp_sk(asoc->base.sk), asoc->pathmtu,
+ sctp_datachk_len(&asoc->stream));
+
+ if (asoc->user_frag)
+ frag = min_t(int, frag, asoc->user_frag);
+
+ frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN -
+ sctp_datachk_len(&asoc->stream));
+
+ asoc->frag_point = SCTP_TRUNC4(frag);
+}
+
+void sctp_assoc_set_pmtu(struct sctp_association *asoc, __u32 pmtu)
+{
+ if (asoc->pathmtu != pmtu) {
+ asoc->pathmtu = pmtu;
+ sctp_assoc_update_frag_point(asoc);
+ }
+
+ pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc,
+ asoc->pathmtu, asoc->frag_point);
+}
+
/* Update the association's pmtu and frag_point by going through all the
* transports. This routine is called when a transport's PMTU has changed.
*/
@@ -1420,30 +1414,24 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
return;
/* Get the lowest pmtu of all the transports. */
- list_for_each_entry(t, &asoc->peer.transport_addr_list,
- transports) {
+ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
if (t->pmtu_pending && t->dst) {
- sctp_transport_update_pmtu(
- t, SCTP_TRUNC4(dst_mtu(t->dst)));
+ sctp_transport_update_pmtu(t,
+ atomic_read(&t->mtu_info));
t->pmtu_pending = 0;
}
if (!pmtu || (t->pathmtu < pmtu))
pmtu = t->pathmtu;
}
- if (pmtu) {
- asoc->pathmtu = pmtu;
- asoc->frag_point = sctp_frag_point(asoc, pmtu);
- }
-
- pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc,
- asoc->pathmtu, asoc->frag_point);
+ sctp_assoc_set_pmtu(asoc, pmtu);
}
/* Should we send a SACK to update our peer? */
static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
{
- struct net *net = sock_net(asoc->base.sk);
+ struct net *net = asoc->base.net;
+
switch (asoc->state) {
case SCTP_STATE_ESTABLISHED:
case SCTP_STATE_SHUTDOWN_PENDING:
@@ -1515,7 +1503,7 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
/* Stop the SACK timer. */
timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
- if (del_timer(timer))
+ if (timer_delete(timer))
sctp_association_put(asoc);
}
}
@@ -1538,7 +1526,7 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
/* If we've reached or overflowed our receive buffer, announce
* a 0 rwnd if rwnd would still be positive. Store the
- * the potential pressure overflow so that the window can be restored
+ * potential pressure overflow so that the window can be restored
* back to original value.
*/
if (rx_count >= asoc->base.sk->sk_rcvbuf)
@@ -1564,20 +1552,23 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
* local endpoint and the remote peer.
*/
int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
- sctp_scope_t scope, gfp_t gfp)
+ enum sctp_scope scope, gfp_t gfp)
{
+ struct sock *sk = asoc->base.sk;
int flags;
/* Use scoping rules to determine the subset of addresses from
* the endpoint.
*/
- flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
+ flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
+ if (!inet_v6_ipv6only(sk))
+ flags |= SCTP_ADDR4_ALLOWED;
if (asoc->peer.ipv4_address)
flags |= SCTP_ADDR4_PEERSUPP;
if (asoc->peer.ipv6_address)
flags |= SCTP_ADDR6_PEERSUPP;
- return sctp_bind_addr_copy(sock_net(asoc->base.sk),
+ return sctp_bind_addr_copy(asoc->base.net,
&asoc->base.bind_addr,
&asoc->ep->base.bind_addr,
scope, gfp, flags);
@@ -1588,9 +1579,10 @@ int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
struct sctp_cookie *cookie,
gfp_t gfp)
{
- int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
+ struct sctp_init_chunk *peer_init = (struct sctp_init_chunk *)(cookie + 1);
+ int var_size2 = ntohs(peer_init->chunk_hdr.length);
int var_size3 = cookie->raw_addr_list_len;
- __u8 *raw = (__u8 *)cookie->peer_init + var_size2;
+ __u8 *raw = (__u8 *)peer_init + var_size2;
return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
asoc->ep->base.bind_addr.port, gfp);
@@ -1623,8 +1615,11 @@ int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
if (preload)
idr_preload(gfp);
spin_lock_bh(&sctp_assocs_id_lock);
- /* 0 is not a valid assoc_id, must be >= 1 */
- ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, 1, 0, GFP_NOWAIT);
+ /* 0, 1, 2 are used as SCTP_FUTURE_ASSOC, SCTP_CURRENT_ASSOC and
+ * SCTP_ALL_ASSOC, so an available id must be > SCTP_ALL_ASSOC.
+ */
+ ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, SCTP_ALL_ASSOC + 1, 0,
+ GFP_NOWAIT);
spin_unlock_bh(&sctp_assocs_id_lock);
if (preload)
idr_preload_end();