summaryrefslogtreecommitdiff
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/act_ct.c5
-rw-r--r--net/sched/act_mirred.c6
-rw-r--r--net/sched/cls_api.c5
-rw-r--r--net/sched/cls_flower.c8
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_cbq.c79
-rw-r--r--net/sched/sch_generic.c11
-rw-r--r--net/sched/sch_taprio.c5
8 files changed, 20 insertions, 101 deletions
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index e013253b10d1..d55afb8d14be 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -277,7 +277,7 @@ static struct nf_flowtable_type flowtable_ct = {
.owner = THIS_MODULE,
};
-static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
+static int tcf_ct_flow_table_get(struct net *net, struct tcf_ct_params *params)
{
struct tcf_ct_flow_table *ct_ft;
int err = -ENOMEM;
@@ -303,6 +303,7 @@ static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
err = nf_flow_table_init(&ct_ft->nf_ft);
if (err)
goto err_init;
+ write_pnet(&ct_ft->nf_ft.net, net);
__module_get(THIS_MODULE);
out_unlock:
@@ -1391,7 +1392,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
if (err)
goto cleanup;
- err = tcf_ct_flow_table_get(params);
+ err = tcf_ct_flow_table_get(net, params);
if (err)
goto cleanup;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index ebb92fb072ab..a1d70cf86843 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -79,7 +79,7 @@ static void tcf_mirred_release(struct tc_action *a)
/* last reference to action, no need to lock */
dev = rcu_dereference_protected(m->tcfm_dev, 1);
- dev_put_track(dev, &m->tcfm_dev_tracker);
+ netdev_put(dev, &m->tcfm_dev_tracker);
}
static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
@@ -181,7 +181,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
mac_header_xmit = dev_is_mac_header_xmit(ndev);
odev = rcu_replace_pointer(m->tcfm_dev, ndev,
lockdep_is_held(&m->tcf_lock));
- dev_put_track(odev, &m->tcfm_dev_tracker);
+ netdev_put(odev, &m->tcfm_dev_tracker);
netdev_tracker_alloc(ndev, &m->tcfm_dev_tracker, GFP_ATOMIC);
m->tcfm_mac_header_xmit = mac_header_xmit;
}
@@ -402,7 +402,7 @@ static int mirred_device_event(struct notifier_block *unused,
list_for_each_entry(m, &mirred_list, tcfm_list) {
spin_lock_bh(&m->tcf_lock);
if (tcf_mirred_dev_dereference(m) == dev) {
- dev_put_track(dev, &m->tcfm_dev_tracker);
+ netdev_put(dev, &m->tcfm_dev_tracker);
/* Note : no rcu grace period necessary, as
* net_device are already rcu protected.
*/
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index ac366c99086f..790d6809be81 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -194,7 +194,7 @@ EXPORT_SYMBOL(register_tcf_proto_ops);
static struct workqueue_struct *tc_filter_wq;
-int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
+void unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
{
struct tcf_proto_ops *t;
int rc = -ENOENT;
@@ -214,7 +214,8 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
}
}
write_unlock(&cls_mod_lock);
- return rc;
+
+ WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc);
}
EXPORT_SYMBOL(unregister_tcf_proto_ops);
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index dcca70144dff..1a1e34480b7e 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -63,13 +63,7 @@ struct fl_flow_key {
struct flow_dissector_key_ip ip;
struct flow_dissector_key_ip enc_ip;
struct flow_dissector_key_enc_opts enc_opts;
- union {
- struct flow_dissector_key_ports tp;
- struct {
- struct flow_dissector_key_ports tp_min;
- struct flow_dissector_key_ports tp_max;
- };
- } tp_range;
+ struct flow_dissector_key_ports_range tp_range;
struct flow_dissector_key_ct ct;
struct flow_dissector_key_hash hash;
struct flow_dissector_key_num_of_vlans num_of_vlans;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index e3c0e8ea2dbb..bf87b50837a8 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1292,7 +1292,7 @@ err_out5:
if (ops->destroy)
ops->destroy(sch);
err_out3:
- dev_put_track(dev, &sch->dev_tracker);
+ netdev_put(dev, &sch->dev_tracker);
qdisc_free(sch);
err_out2:
module_put(ops->owner);
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 02d9f0dfe356..599e26fc2fa8 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -149,7 +149,6 @@ struct cbq_sched_data {
psched_time_t now; /* Cached timestamp */
unsigned int pmask;
- struct hrtimer delay_timer;
struct qdisc_watchdog watchdog; /* Watchdog timer,
started when CBQ has
backlog, but cannot
@@ -441,81 +440,6 @@ static void cbq_overlimit(struct cbq_class *cl)
}
}
-static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
- psched_time_t now)
-{
- struct cbq_class *cl;
- struct cbq_class *cl_prev = q->active[prio];
- psched_time_t sched = now;
-
- if (cl_prev == NULL)
- return 0;
-
- do {
- cl = cl_prev->next_alive;
- if (now - cl->penalized > 0) {
- cl_prev->next_alive = cl->next_alive;
- cl->next_alive = NULL;
- cl->cpriority = cl->priority;
- cl->delayed = 0;
- cbq_activate_class(cl);
-
- if (cl == q->active[prio]) {
- q->active[prio] = cl_prev;
- if (cl == q->active[prio]) {
- q->active[prio] = NULL;
- return 0;
- }
- }
-
- cl = cl_prev->next_alive;
- } else if (sched - cl->penalized > 0)
- sched = cl->penalized;
- } while ((cl_prev = cl) != q->active[prio]);
-
- return sched - now;
-}
-
-static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
-{
- struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
- delay_timer);
- struct Qdisc *sch = q->watchdog.qdisc;
- psched_time_t now;
- psched_tdiff_t delay = 0;
- unsigned int pmask;
-
- now = psched_get_time();
-
- pmask = q->pmask;
- q->pmask = 0;
-
- while (pmask) {
- int prio = ffz(~pmask);
- psched_tdiff_t tmp;
-
- pmask &= ~(1<<prio);
-
- tmp = cbq_undelay_prio(q, prio, now);
- if (tmp > 0) {
- q->pmask |= 1<<prio;
- if (tmp < delay || delay == 0)
- delay = tmp;
- }
- }
-
- if (delay) {
- ktime_t time;
-
- time = 0;
- time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
- hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
- }
-
- __netif_schedule(qdisc_root(sch));
- return HRTIMER_NORESTART;
-}
-
/*
* It is mission critical procedure.
*
@@ -1034,7 +958,6 @@ cbq_reset(struct Qdisc *sch)
q->tx_class = NULL;
q->tx_borrowed = NULL;
qdisc_watchdog_cancel(&q->watchdog);
- hrtimer_cancel(&q->delay_timer);
q->toplevel = TC_CBQ_MAXLEVEL;
q->now = psched_get_time();
@@ -1162,8 +1085,6 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
int err;
qdisc_watchdog_init(&q->watchdog, sch);
- hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
- q->delay_timer.function = cbq_undelay;
err = cbq_opt_parse(tb, opt, extack);
if (err < 0)
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index dba0b3e24af5..cc6eabee2830 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -541,7 +541,7 @@ static void dev_watchdog(struct timer_list *t)
spin_unlock(&dev->tx_global_lock);
if (release)
- dev_put_track(dev, &dev->watchdog_dev_tracker);
+ netdev_put(dev, &dev->watchdog_dev_tracker);
}
void __netdev_watchdog_up(struct net_device *dev)
@@ -551,7 +551,8 @@ void __netdev_watchdog_up(struct net_device *dev)
dev->watchdog_timeo = 5*HZ;
if (!mod_timer(&dev->watchdog_timer,
round_jiffies(jiffies + dev->watchdog_timeo)))
- dev_hold_track(dev, &dev->watchdog_dev_tracker, GFP_ATOMIC);
+ netdev_hold(dev, &dev->watchdog_dev_tracker,
+ GFP_ATOMIC);
}
}
EXPORT_SYMBOL_GPL(__netdev_watchdog_up);
@@ -565,7 +566,7 @@ static void dev_watchdog_down(struct net_device *dev)
{
netif_tx_lock_bh(dev);
if (del_timer(&dev->watchdog_timer))
- dev_put_track(dev, &dev->watchdog_dev_tracker);
+ netdev_put(dev, &dev->watchdog_dev_tracker);
netif_tx_unlock_bh(dev);
}
@@ -975,7 +976,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev_queue = dev_queue;
- dev_hold_track(dev, &sch->dev_tracker, GFP_KERNEL);
+ netdev_hold(dev, &sch->dev_tracker, GFP_KERNEL);
refcount_set(&sch->refcnt, 1);
return sch;
@@ -1067,7 +1068,7 @@ static void qdisc_destroy(struct Qdisc *qdisc)
ops->destroy(qdisc);
module_put(ops->owner);
- dev_put_track(qdisc_dev(qdisc), &qdisc->dev_tracker);
+ netdev_put(qdisc_dev(qdisc), &qdisc->dev_tracker);
trace_qdisc_destroy(qdisc);
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index b9c71a304d39..0b941dd63d26 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
+#include <linux/time.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
@@ -176,7 +177,7 @@ static ktime_t get_interval_end_time(struct sched_gate_list *sched,
static int length_to_duration(struct taprio_sched *q, int len)
{
- return div_u64(len * atomic64_read(&q->picos_per_byte), 1000);
+ return div_u64(len * atomic64_read(&q->picos_per_byte), PSEC_PER_NSEC);
}
/* Returns the entry corresponding to next available interval. If
@@ -551,7 +552,7 @@ static struct sk_buff *taprio_peek(struct Qdisc *sch)
static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry)
{
atomic_set(&entry->budget,
- div64_u64((u64)entry->interval * 1000,
+ div64_u64((u64)entry->interval * PSEC_PER_NSEC,
atomic64_read(&q->picos_per_byte)));
}