summaryrefslogtreecommitdiff
path: root/net/ipv6/ip6_flowlabel.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv6/ip6_flowlabel.c')
-rw-r--r--net/ipv6/ip6_flowlabel.c584
1 files changed, 320 insertions, 264 deletions
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 46e88433ec7d..60d0be47a9f3 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ip6_flowlabel.c IPv6 flowlabel manager.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*/
@@ -15,33 +11,27 @@
#include <linux/socket.h>
#include <linux/net.h>
#include <linux/netdevice.h>
-#include <linux/if_arp.h>
#include <linux/in6.h>
-#include <linux/route.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/pid_namespace.h>
+#include <linux/jump_label_ratelimit.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/ipv6.h>
-#include <net/ndisc.h>
-#include <net/protocol.h>
-#include <net/ip6_route.h>
-#include <net/addrconf.h>
#include <net/rawv6.h>
-#include <net/icmp.h>
#include <net/transp_v6.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#define FL_MIN_LINGER 6 /* Minimal linger. It is set to 6sec specified
in old IPv6 RFC. Well, it was reasonable value.
*/
-#define FL_MAX_LINGER 60 /* Maximal linger timeout */
+#define FL_MAX_LINGER 150 /* Maximal linger timeout */
/* FL hash table */
@@ -53,8 +43,8 @@
static atomic_t fl_size = ATOMIC_INIT(0);
static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
-static void ip6_fl_gc(unsigned long dummy);
-static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc, 0, 0);
+static void ip6_fl_gc(struct timer_list *unused);
+static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc);
/* FL hash table lock: it protects only of GC */
@@ -64,19 +54,22 @@ static DEFINE_SPINLOCK(ip6_fl_lock);
static DEFINE_SPINLOCK(ip6_sk_fl_lock);
+DEFINE_STATIC_KEY_DEFERRED_FALSE(ipv6_flowlabel_exclusive, HZ);
+EXPORT_SYMBOL(ipv6_flowlabel_exclusive);
+
#define for_each_fl_rcu(hash, fl) \
- for (fl = rcu_dereference_bh(fl_ht[(hash)]); \
+ for (fl = rcu_dereference(fl_ht[(hash)]); \
fl != NULL; \
- fl = rcu_dereference_bh(fl->next))
+ fl = rcu_dereference(fl->next))
#define for_each_fl_continue_rcu(fl) \
- for (fl = rcu_dereference_bh(fl->next); \
+ for (fl = rcu_dereference(fl->next); \
fl != NULL; \
- fl = rcu_dereference_bh(fl->next))
+ fl = rcu_dereference(fl->next))
-#define for_each_sk_fl_rcu(np, sfl) \
- for (sfl = rcu_dereference_bh(np->ipv6_fl_list); \
+#define for_each_sk_fl_rcu(sk, sfl) \
+ for (sfl = rcu_dereference(inet_sk(sk)->ipv6_fl_list); \
sfl != NULL; \
- sfl = rcu_dereference_bh(sfl->next))
+ sfl = rcu_dereference(sfl->next))
static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
{
@@ -93,24 +86,41 @@ static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
{
struct ip6_flowlabel *fl;
- rcu_read_lock_bh();
+ rcu_read_lock();
fl = __fl_lookup(net, label);
if (fl && !atomic_inc_not_zero(&fl->users))
fl = NULL;
- rcu_read_unlock_bh();
+ rcu_read_unlock();
return fl;
}
+static bool fl_shared_exclusive(struct ip6_flowlabel *fl)
+{
+ return fl->share == IPV6_FL_S_EXCL ||
+ fl->share == IPV6_FL_S_PROCESS ||
+ fl->share == IPV6_FL_S_USER;
+}
+
+static void fl_free_rcu(struct rcu_head *head)
+{
+ struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu);
+
+ if (fl->share == IPV6_FL_S_PROCESS)
+ put_pid(fl->owner.pid);
+ kfree(fl->opt);
+ kfree(fl);
+}
+
static void fl_free(struct ip6_flowlabel *fl)
{
- if (fl) {
- if (fl->share == IPV6_FL_S_PROCESS)
- put_pid(fl->owner.pid);
- release_net(fl->fl_net);
- kfree(fl->opt);
- kfree_rcu(fl, rcu);
- }
+ if (!fl)
+ return;
+
+ if (fl_shared_exclusive(fl) || fl->opt)
+ static_branch_slow_dec_deferred(&ipv6_flowlabel_exclusive);
+
+ call_rcu(&fl->rcu, fl_free_rcu);
}
static void fl_release(struct ip6_flowlabel *fl)
@@ -135,7 +145,7 @@ static void fl_release(struct ip6_flowlabel *fl)
spin_unlock_bh(&ip6_fl_lock);
}
-static void ip6_fl_gc(unsigned long dummy)
+static void ip6_fl_gc(struct timer_list *unused)
{
int i;
unsigned long now = jiffies;
@@ -143,7 +153,7 @@ static void ip6_fl_gc(unsigned long dummy)
spin_lock(&ip6_fl_lock);
- for (i=0; i<=FL_HASH_MASK; i++) {
+ for (i = 0; i <= FL_HASH_MASK; i++) {
struct ip6_flowlabel *fl;
struct ip6_flowlabel __rcu **flp;
@@ -179,7 +189,7 @@ static void __net_exit ip6_fl_purge(struct net *net)
{
int i;
- spin_lock(&ip6_fl_lock);
+ spin_lock_bh(&ip6_fl_lock);
for (i = 0; i <= FL_HASH_MASK; i++) {
struct ip6_flowlabel *fl;
struct ip6_flowlabel __rcu **flp;
@@ -197,7 +207,7 @@ static void __net_exit ip6_fl_purge(struct net *net)
flp = &fl->next;
}
}
- spin_unlock(&ip6_fl_lock);
+ spin_unlock_bh(&ip6_fl_lock);
}
static struct ip6_flowlabel *fl_intern(struct net *net,
@@ -207,13 +217,14 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
fl->label = label & IPV6_FLOWLABEL_MASK;
+ rcu_read_lock();
spin_lock_bh(&ip6_fl_lock);
if (label == 0) {
for (;;) {
- fl->label = htonl(net_random())&IPV6_FLOWLABEL_MASK;
+ fl->label = htonl(get_random_u32())&IPV6_FLOWLABEL_MASK;
if (fl->label) {
lfl = __fl_lookup(net, fl->label);
- if (lfl == NULL)
+ if (!lfl)
break;
}
}
@@ -227,9 +238,10 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
* with the same label can only appear on another sock
*/
lfl = __fl_lookup(net, fl->label);
- if (lfl != NULL) {
+ if (lfl) {
atomic_inc(&lfl->users);
spin_unlock_bh(&ip6_fl_lock);
+ rcu_read_unlock();
return lfl;
}
}
@@ -239,6 +251,7 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
atomic_inc(&fl_size);
spin_unlock_bh(&ip6_fl_lock);
+ rcu_read_unlock();
return NULL;
}
@@ -246,41 +259,39 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
/* Socket flowlabel lists */
-struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label)
+struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label)
{
struct ipv6_fl_socklist *sfl;
- struct ipv6_pinfo *np = inet6_sk(sk);
label &= IPV6_FLOWLABEL_MASK;
- rcu_read_lock_bh();
- for_each_sk_fl_rcu(np, sfl) {
+ rcu_read_lock();
+ for_each_sk_fl_rcu(sk, sfl) {
struct ip6_flowlabel *fl = sfl->fl;
- if (fl->label == label) {
+
+ if (fl->label == label && atomic_inc_not_zero(&fl->users)) {
fl->lastuse = jiffies;
- atomic_inc(&fl->users);
- rcu_read_unlock_bh();
+ rcu_read_unlock();
return fl;
}
}
- rcu_read_unlock_bh();
+ rcu_read_unlock();
return NULL;
}
-
-EXPORT_SYMBOL_GPL(fl6_sock_lookup);
+EXPORT_SYMBOL_GPL(__fl6_sock_lookup);
void fl6_free_socklist(struct sock *sk)
{
- struct ipv6_pinfo *np = inet6_sk(sk);
+ struct inet_sock *inet = inet_sk(sk);
struct ipv6_fl_socklist *sfl;
- if (!rcu_access_pointer(np->ipv6_fl_list))
+ if (!rcu_access_pointer(inet->ipv6_fl_list))
return;
spin_lock_bh(&ip6_sk_fl_lock);
- while ((sfl = rcu_dereference_protected(np->ipv6_fl_list,
+ while ((sfl = rcu_dereference_protected(inet->ipv6_fl_list,
lockdep_is_held(&ip6_sk_fl_lock))) != NULL) {
- np->ipv6_fl_list = sfl->next;
+ inet->ipv6_fl_list = sfl->next;
spin_unlock_bh(&ip6_sk_fl_lock);
fl_release(sfl->fl);
@@ -300,16 +311,16 @@ void fl6_free_socklist(struct sock *sk)
following rthdr.
*/
-struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
- struct ip6_flowlabel * fl,
- struct ipv6_txoptions * fopt)
+struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
+ struct ip6_flowlabel *fl,
+ struct ipv6_txoptions *fopt)
{
- struct ipv6_txoptions * fl_opt = fl->opt;
+ struct ipv6_txoptions *fl_opt = fl->opt;
- if (fopt == NULL || fopt->opt_flen == 0)
+ if (!fopt || fopt->opt_flen == 0)
return fl_opt;
- if (fl_opt != NULL) {
+ if (fl_opt) {
opt_space->hopopt = fl_opt->hopopt;
opt_space->dst0opt = fl_opt->dst0opt;
opt_space->srcrt = fl_opt->srcrt;
@@ -324,6 +335,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
}
opt_space->dst1opt = fopt->dst1opt;
opt_space->opt_flen = fopt->opt_flen;
+ opt_space->tot_len = fopt->tot_len;
return opt_space;
}
EXPORT_SYMBOL_GPL(fl6_merge_options);
@@ -345,6 +357,8 @@ static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned lo
expires = check_linger(expires);
if (!expires)
return -EPERM;
+
+ spin_lock_bh(&ip6_fl_lock);
fl->lastuse = jiffies;
if (time_before(fl->linger, linger))
fl->linger = linger;
@@ -352,12 +366,14 @@ static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned lo
expires = fl->linger;
if (time_before(fl->expires, fl->lastuse + expires))
fl->expires = fl->lastuse + expires;
+ spin_unlock_bh(&ip6_fl_lock);
+
return 0;
}
static struct ip6_flowlabel *
fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
- char __user *optval, int optlen, int *err_p)
+ sockptr_t optval, int optlen, int *err_p)
{
struct ip6_flowlabel *fl = NULL;
int olen;
@@ -371,31 +387,32 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
err = -ENOMEM;
fl = kzalloc(sizeof(*fl), GFP_KERNEL);
- if (fl == NULL)
+ if (!fl)
goto done;
if (olen > 0) {
struct msghdr msg;
struct flowi6 flowi6;
- int junk;
+ struct ipcm6_cookie ipc6;
err = -ENOMEM;
fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
- if (fl->opt == NULL)
+ if (!fl->opt)
goto done;
memset(fl->opt, 0, sizeof(*fl->opt));
fl->opt->tot_len = sizeof(*fl->opt) + olen;
err = -EFAULT;
- if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
+ if (copy_from_sockptr_offset(fl->opt + 1, optval,
+ CMSG_ALIGN(sizeof(*freq)), olen))
goto done;
msg.msg_controllen = olen;
- msg.msg_control = (void*)(fl->opt+1);
+ msg.msg_control = (void *)(fl->opt+1);
memset(&flowi6, 0, sizeof(flowi6));
- err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
- &junk, &junk, &junk);
+ ipc6.opt = fl->opt;
+ err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6);
if (err)
goto done;
err = -EINVAL;
@@ -407,7 +424,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
}
}
- fl->fl_net = hold_net(net);
+ fl->fl_net = net;
fl->expires = jiffies;
err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
if (err)
@@ -435,26 +452,34 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
err = -EINVAL;
goto done;
}
+ if (fl_shared_exclusive(fl) || fl->opt) {
+ WRITE_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl, 1);
+ static_branch_deferred_inc(&ipv6_flowlabel_exclusive);
+ }
return fl;
done:
- fl_free(fl);
+ if (fl) {
+ kfree(fl->opt);
+ kfree(fl);
+ }
*err_p = err;
return NULL;
}
static int mem_check(struct sock *sk)
{
- struct ipv6_pinfo *np = inet6_sk(sk);
- struct ipv6_fl_socklist *sfl;
int room = FL_MAX_SIZE - atomic_read(&fl_size);
+ struct ipv6_fl_socklist *sfl;
int count = 0;
if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
return 0;
- for_each_sk_fl_rcu(np, sfl)
+ rcu_read_lock();
+ for_each_sk_fl_rcu(sk, sfl)
count++;
+ rcu_read_unlock();
if (room <= 0 ||
((count >= FL_MAX_PER_SOCK ||
@@ -465,197 +490,259 @@ static int mem_check(struct sock *sk)
return 0;
}
-static bool ipv6_hdr_cmp(struct ipv6_opt_hdr *h1, struct ipv6_opt_hdr *h2)
+static inline void fl_link(struct sock *sk, struct ipv6_fl_socklist *sfl,
+ struct ip6_flowlabel *fl)
{
- if (h1 == h2)
- return false;
- if (h1 == NULL || h2 == NULL)
- return true;
- if (h1->hdrlen != h2->hdrlen)
- return true;
- return memcmp(h1+1, h2+1, ((h1->hdrlen+1)<<3) - sizeof(*h1));
+ struct inet_sock *inet = inet_sk(sk);
+
+ spin_lock_bh(&ip6_sk_fl_lock);
+ sfl->fl = fl;
+ sfl->next = inet->ipv6_fl_list;
+ rcu_assign_pointer(inet->ipv6_fl_list, sfl);
+ spin_unlock_bh(&ip6_sk_fl_lock);
}
-static bool ipv6_opt_cmp(struct ipv6_txoptions *o1, struct ipv6_txoptions *o2)
+int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
+ int flags)
{
- if (o1 == o2)
- return false;
- if (o1 == NULL || o2 == NULL)
- return true;
- if (o1->opt_nflen != o2->opt_nflen)
- return true;
- if (ipv6_hdr_cmp(o1->hopopt, o2->hopopt))
- return true;
- if (ipv6_hdr_cmp(o1->dst0opt, o2->dst0opt))
- return true;
- if (ipv6_hdr_cmp((struct ipv6_opt_hdr *)o1->srcrt, (struct ipv6_opt_hdr *)o2->srcrt))
- return true;
- return false;
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct ipv6_fl_socklist *sfl;
+
+ if (flags & IPV6_FL_F_REMOTE) {
+ freq->flr_label = np->rcv_flowinfo & IPV6_FLOWLABEL_MASK;
+ return 0;
+ }
+
+ if (inet6_test_bit(REPFLOW, sk)) {
+ freq->flr_label = np->flow_label;
+ return 0;
+ }
+
+ rcu_read_lock();
+
+ for_each_sk_fl_rcu(sk, sfl) {
+ if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) {
+ spin_lock_bh(&ip6_fl_lock);
+ freq->flr_label = sfl->fl->label;
+ freq->flr_dst = sfl->fl->dst;
+ freq->flr_share = sfl->fl->share;
+ freq->flr_expires = (sfl->fl->expires - jiffies) / HZ;
+ freq->flr_linger = sfl->fl->linger / HZ;
+
+ spin_unlock_bh(&ip6_fl_lock);
+ rcu_read_unlock();
+ return 0;
+ }
+ }
+ rcu_read_unlock();
+
+ return -ENOENT;
}
-static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
- struct ip6_flowlabel *fl)
+#define socklist_dereference(__sflp) \
+ rcu_dereference_protected(__sflp, lockdep_is_held(&ip6_sk_fl_lock))
+
+static int ipv6_flowlabel_put(struct sock *sk, struct in6_flowlabel_req *freq)
{
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct ipv6_fl_socklist __rcu **sflp;
+ struct ipv6_fl_socklist *sfl;
+
+ if (freq->flr_flags & IPV6_FL_F_REFLECT) {
+ if (sk->sk_protocol != IPPROTO_TCP)
+ return -ENOPROTOOPT;
+ if (!inet6_test_bit(REPFLOW, sk))
+ return -ESRCH;
+ np->flow_label = 0;
+ inet6_clear_bit(REPFLOW, sk);
+ return 0;
+ }
+
spin_lock_bh(&ip6_sk_fl_lock);
- sfl->fl = fl;
- sfl->next = np->ipv6_fl_list;
- rcu_assign_pointer(np->ipv6_fl_list, sfl);
+ for (sflp = &inet_sk(sk)->ipv6_fl_list;
+ (sfl = socklist_dereference(*sflp)) != NULL;
+ sflp = &sfl->next) {
+ if (sfl->fl->label == freq->flr_label)
+ goto found;
+ }
spin_unlock_bh(&ip6_sk_fl_lock);
+ return -ESRCH;
+found:
+ if (freq->flr_label == (np->flow_label & IPV6_FLOWLABEL_MASK))
+ np->flow_label &= ~IPV6_FLOWLABEL_MASK;
+ *sflp = sfl->next;
+ spin_unlock_bh(&ip6_sk_fl_lock);
+ fl_release(sfl->fl);
+ kfree_rcu(sfl, rcu);
+ return 0;
}
-int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
+static int ipv6_flowlabel_renew(struct sock *sk, struct in6_flowlabel_req *freq)
{
- int uninitialized_var(err);
struct net *net = sock_net(sk);
- struct ipv6_pinfo *np = inet6_sk(sk);
- struct in6_flowlabel_req freq;
- struct ipv6_fl_socklist *sfl1=NULL;
struct ipv6_fl_socklist *sfl;
- struct ipv6_fl_socklist __rcu **sflp;
- struct ip6_flowlabel *fl, *fl1 = NULL;
-
+ int err;
- if (optlen < sizeof(freq))
- return -EINVAL;
+ rcu_read_lock();
+ for_each_sk_fl_rcu(sk, sfl) {
+ if (sfl->fl->label == freq->flr_label) {
+ err = fl6_renew(sfl->fl, freq->flr_linger,
+ freq->flr_expires);
+ rcu_read_unlock();
+ return err;
+ }
+ }
+ rcu_read_unlock();
- if (copy_from_user(&freq, optval, sizeof(freq)))
- return -EFAULT;
+ if (freq->flr_share == IPV6_FL_S_NONE &&
+ ns_capable(net->user_ns, CAP_NET_ADMIN)) {
+ struct ip6_flowlabel *fl = fl_lookup(net, freq->flr_label);
- switch (freq.flr_action) {
- case IPV6_FL_A_PUT:
- spin_lock_bh(&ip6_sk_fl_lock);
- for (sflp = &np->ipv6_fl_list;
- (sfl = rcu_dereference(*sflp))!=NULL;
- sflp = &sfl->next) {
- if (sfl->fl->label == freq.flr_label) {
- if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
- np->flow_label &= ~IPV6_FLOWLABEL_MASK;
- *sflp = rcu_dereference(sfl->next);
- spin_unlock_bh(&ip6_sk_fl_lock);
- fl_release(sfl->fl);
- kfree_rcu(sfl, rcu);
- return 0;
- }
+ if (fl) {
+ err = fl6_renew(fl, freq->flr_linger,
+ freq->flr_expires);
+ fl_release(fl);
+ return err;
}
- spin_unlock_bh(&ip6_sk_fl_lock);
- return -ESRCH;
+ }
+ return -ESRCH;
+}
- case IPV6_FL_A_RENEW:
- rcu_read_lock_bh();
- for_each_sk_fl_rcu(np, sfl) {
- if (sfl->fl->label == freq.flr_label) {
- err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
- rcu_read_unlock_bh();
- return err;
- }
- }
- rcu_read_unlock_bh();
-
- if (freq.flr_share == IPV6_FL_S_NONE &&
- ns_capable(net->user_ns, CAP_NET_ADMIN)) {
- fl = fl_lookup(net, freq.flr_label);
- if (fl) {
- err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
- fl_release(fl);
- return err;
- }
+static int ipv6_flowlabel_get(struct sock *sk, struct in6_flowlabel_req *freq,
+ sockptr_t optval, int optlen)
+{
+ struct ipv6_fl_socklist *sfl, *sfl1 = NULL;
+ struct ip6_flowlabel *fl, *fl1 = NULL;
+ struct net *net = sock_net(sk);
+ int err;
+
+ if (freq->flr_flags & IPV6_FL_F_REFLECT) {
+ if (net->ipv6.sysctl.flowlabel_consistency) {
+ net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
+ return -EPERM;
}
- return -ESRCH;
- case IPV6_FL_A_GET:
- if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
- return -EINVAL;
+ if (sk->sk_protocol != IPPROTO_TCP)
+ return -ENOPROTOOPT;
+ inet6_set_bit(REPFLOW, sk);
+ return 0;
+ }
- fl = fl_create(net, sk, &freq, optval, optlen, &err);
- if (fl == NULL)
- return err;
- sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
+ if (freq->flr_label & ~IPV6_FLOWLABEL_MASK)
+ return -EINVAL;
+ if (net->ipv6.sysctl.flowlabel_state_ranges &&
+ (freq->flr_label & IPV6_FLOWLABEL_STATELESS_FLAG))
+ return -ERANGE;
- if (freq.flr_label) {
- err = -EEXIST;
- rcu_read_lock_bh();
- for_each_sk_fl_rcu(np, sfl) {
- if (sfl->fl->label == freq.flr_label) {
- if (freq.flr_flags&IPV6_FL_F_EXCL) {
- rcu_read_unlock_bh();
- goto done;
- }
- fl1 = sfl->fl;
- atomic_inc(&fl1->users);
- break;
+ fl = fl_create(net, sk, freq, optval, optlen, &err);
+ if (!fl)
+ return err;
+
+ sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
+
+ if (freq->flr_label) {
+ err = -EEXIST;
+ rcu_read_lock();
+ for_each_sk_fl_rcu(sk, sfl) {
+ if (sfl->fl->label == freq->flr_label) {
+ if (freq->flr_flags & IPV6_FL_F_EXCL) {
+ rcu_read_unlock();
+ goto done;
}
+ fl1 = sfl->fl;
+ if (!atomic_inc_not_zero(&fl1->users))
+ fl1 = NULL;
+ break;
}
- rcu_read_unlock_bh();
+ }
+ rcu_read_unlock();
- if (fl1 == NULL)
- fl1 = fl_lookup(net, freq.flr_label);
- if (fl1) {
+ if (!fl1)
+ fl1 = fl_lookup(net, freq->flr_label);
+ if (fl1) {
recheck:
- err = -EEXIST;
- if (freq.flr_flags&IPV6_FL_F_EXCL)
- goto release;
- err = -EPERM;
- if (fl1->share == IPV6_FL_S_EXCL ||
- fl1->share != fl->share ||
- ((fl1->share == IPV6_FL_S_PROCESS) &&
- (fl1->owner.pid == fl->owner.pid)) ||
- ((fl1->share == IPV6_FL_S_USER) &&
- uid_eq(fl1->owner.uid, fl->owner.uid)))
- goto release;
-
- err = -EINVAL;
- if (!ipv6_addr_equal(&fl1->dst, &fl->dst) ||
- ipv6_opt_cmp(fl1->opt, fl->opt))
- goto release;
-
- err = -ENOMEM;
- if (sfl1 == NULL)
- goto release;
- if (fl->linger > fl1->linger)
- fl1->linger = fl->linger;
- if ((long)(fl->expires - fl1->expires) > 0)
- fl1->expires = fl->expires;
- fl_link(np, sfl1, fl1);
- fl_free(fl);
- return 0;
+ err = -EEXIST;
+ if (freq->flr_flags&IPV6_FL_F_EXCL)
+ goto release;
+ err = -EPERM;
+ if (fl1->share == IPV6_FL_S_EXCL ||
+ fl1->share != fl->share ||
+ ((fl1->share == IPV6_FL_S_PROCESS) &&
+ (fl1->owner.pid != fl->owner.pid)) ||
+ ((fl1->share == IPV6_FL_S_USER) &&
+ !uid_eq(fl1->owner.uid, fl->owner.uid)))
+ goto release;
+
+ err = -ENOMEM;
+ if (!sfl1)
+ goto release;
+ if (fl->linger > fl1->linger)
+ fl1->linger = fl->linger;
+ if ((long)(fl->expires - fl1->expires) > 0)
+ fl1->expires = fl->expires;
+ fl_link(sk, sfl1, fl1);
+ fl_free(fl);
+ return 0;
release:
- fl_release(fl1);
- goto done;
- }
- }
- err = -ENOENT;
- if (!(freq.flr_flags&IPV6_FL_F_CREATE))
+ fl_release(fl1);
goto done;
+ }
+ }
+ err = -ENOENT;
+ if (!(freq->flr_flags & IPV6_FL_F_CREATE))
+ goto done;
- err = -ENOMEM;
- if (sfl1 == NULL || (err = mem_check(sk)) != 0)
- goto done;
+ err = -ENOMEM;
+ if (!sfl1)
+ goto done;
- fl1 = fl_intern(net, fl, freq.flr_label);
- if (fl1 != NULL)
- goto recheck;
+ err = mem_check(sk);
+ if (err != 0)
+ goto done;
- if (!freq.flr_label) {
- if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
- &fl->label, sizeof(fl->label))) {
- /* Intentionally ignore fault. */
- }
- }
+ fl1 = fl_intern(net, fl, freq->flr_label);
+ if (fl1)
+ goto recheck;
- fl_link(np, sfl1, fl);
- return 0;
+ if (!freq->flr_label) {
+ size_t offset = offsetof(struct in6_flowlabel_req, flr_label);
- default:
- return -EINVAL;
+ if (copy_to_sockptr_offset(optval, offset, &fl->label,
+ sizeof(fl->label))) {
+ /* Intentionally ignore fault. */
+ }
}
+ fl_link(sk, sfl1, fl);
+ return 0;
done:
fl_free(fl);
kfree(sfl1);
return err;
}
+int ipv6_flowlabel_opt(struct sock *sk, sockptr_t optval, int optlen)
+{
+ struct in6_flowlabel_req freq;
+
+ if (optlen < sizeof(freq))
+ return -EINVAL;
+ if (copy_from_sockptr(&freq, optval, sizeof(freq)))
+ return -EFAULT;
+
+ switch (freq.flr_action) {
+ case IPV6_FL_A_PUT:
+ return ipv6_flowlabel_put(sk, &freq);
+ case IPV6_FL_A_RENEW:
+ return ipv6_flowlabel_renew(sk, &freq);
+ case IPV6_FL_A_GET:
+ return ipv6_flowlabel_get(sk, &freq, optval, optlen);
+ default:
+ return -EINVAL;
+ }
+}
+
#ifdef CONFIG_PROC_FS
struct ip6fl_iter_state {
@@ -719,7 +806,11 @@ static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
- rcu_read_lock_bh();
+ struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
+
+ state->pid_ns = proc_pid_ns(file_inode(seq->file)->i_sb);
+
+ rcu_read_lock();
return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
@@ -738,16 +829,15 @@ static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void ip6fl_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
- rcu_read_unlock_bh();
+ rcu_read_unlock();
}
static int ip6fl_seq_show(struct seq_file *seq, void *v)
{
struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
- if (v == SEQ_START_TOKEN)
- seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n",
- "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
- else {
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(seq, "Label S Owner Users Linger Expires Dst Opt\n");
+ } else {
struct ip6_flowlabel *fl = v;
seq_printf(seq,
"%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
@@ -774,45 +864,10 @@ static const struct seq_operations ip6fl_seq_ops = {
.show = ip6fl_seq_show,
};
-static int ip6fl_seq_open(struct inode *inode, struct file *file)
-{
- struct seq_file *seq;
- struct ip6fl_iter_state *state;
- int err;
-
- err = seq_open_net(inode, file, &ip6fl_seq_ops,
- sizeof(struct ip6fl_iter_state));
-
- if (!err) {
- seq = file->private_data;
- state = ip6fl_seq_private(seq);
- rcu_read_lock();
- state->pid_ns = get_pid_ns(task_active_pid_ns(current));
- rcu_read_unlock();
- }
- return err;
-}
-
-static int ip6fl_seq_release(struct inode *inode, struct file *file)
-{
- struct seq_file *seq = file->private_data;
- struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
- put_pid_ns(state->pid_ns);
- return seq_release_net(inode, file);
-}
-
-static const struct file_operations ip6fl_seq_fops = {
- .owner = THIS_MODULE,
- .open = ip6fl_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = ip6fl_seq_release,
-};
-
static int __net_init ip6_flowlabel_proc_init(struct net *net)
{
- if (!proc_create("ip6_flowlabel", S_IRUGO, net->proc_net,
- &ip6fl_seq_fops))
+ if (!proc_create_net("ip6_flowlabel", 0444, net->proc_net,
+ &ip6fl_seq_ops, sizeof(struct ip6fl_iter_state)))
return -ENOMEM;
return 0;
}
@@ -849,6 +904,7 @@ int ip6_flowlabel_init(void)
void ip6_flowlabel_cleanup(void)
{
- del_timer(&ip6_fl_gc_timer);
+ static_key_deferred_flush(&ipv6_flowlabel_exclusive);
+ timer_delete(&ip6_fl_gc_timer);
unregister_pernet_subsys(&ip6_flowlabel_net_ops);
}