diff options
Diffstat (limited to 'net/sched/sch_hfsc.c')
| -rw-r--r-- | net/sched/sch_hfsc.c | 294 |
1 files changed, 145 insertions, 149 deletions
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index b52f74610dc7..d8fd35da32a7 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -110,14 +110,12 @@ enum hfsc_class_flags { struct hfsc_class { struct Qdisc_class_common cl_common; - unsigned int refcnt; /* usage count */ - struct gnet_stats_basic_packed bstats; + struct gnet_stats_basic_sync bstats; struct gnet_stats_queue qstats; struct net_rate_estimator __rcu *rate_est; struct tcf_proto __rcu *filter_list; /* filter list */ struct tcf_block *block; - unsigned int filter_cnt; /* filter count */ unsigned int level; /* class level in hierarchy */ struct hfsc_sched *sched; /* scheduler data */ @@ -177,6 +175,11 @@ struct hfsc_sched { #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */ +static bool cl_in_el_or_vttree(struct hfsc_class *cl) +{ + return ((cl->cl_flags & HFSC_FSC) && cl->cl_nactive) || + ((cl->cl_flags & HFSC_RSC) && !RB_EMPTY_NODE(&cl->el_node)); +} /* * eligible tree holds backlogged classes being sorted by their eligible times. @@ -205,7 +208,10 @@ eltree_insert(struct hfsc_class *cl) static inline void eltree_remove(struct hfsc_class *cl) { - rb_erase(&cl->el_node, &cl->sched->eligible); + if (!RB_EMPTY_NODE(&cl->el_node)) { + rb_erase(&cl->el_node, &cl->sched->eligible); + RB_CLEAR_NODE(&cl->el_node); + } } static inline void @@ -830,54 +836,6 @@ update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time) } static void -set_active(struct hfsc_class *cl, unsigned int len) -{ - if (cl->cl_flags & HFSC_RSC) - init_ed(cl, len); - if (cl->cl_flags & HFSC_FSC) - init_vf(cl, len); - -} - -static void -set_passive(struct hfsc_class *cl) -{ - if (cl->cl_flags & HFSC_RSC) - eltree_remove(cl); - - /* - * vttree is now handled in update_vf() so that update_vf(cl, 0, 0) - * needs to be called explicitly to remove a class from vttree. - */ -} - -static unsigned int -qdisc_peek_len(struct Qdisc *sch) -{ - struct sk_buff *skb; - unsigned int len; - - skb = sch->ops->peek(sch); - if (unlikely(skb == NULL)) { - qdisc_warn_nonwc("qdisc_peek_len", sch); - return 0; - } - len = qdisc_pkt_len(skb); - - return len; -} - -static void -hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl) -{ - unsigned int len = cl->qdisc->q.qlen; - unsigned int backlog = cl->qdisc->qstats.backlog; - - qdisc_reset(cl->qdisc); - qdisc_tree_reduce_backlog(cl->qdisc, len, backlog); -} - -static void hfsc_adjust_levels(struct hfsc_class *cl) { struct hfsc_class *p; @@ -936,6 +894,14 @@ hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc, cl->cl_flags |= HFSC_USC; } +static void +hfsc_upgrade_rt(struct hfsc_class *cl) +{ + cl->cl_fsc = cl->cl_rsc; + rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total); + cl->cl_flags |= HFSC_FSC; +} + static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = { [TCA_HFSC_RSC] = { .len = sizeof(struct tc_service_curve) }, [TCA_HFSC_FSC] = { .len = sizeof(struct tc_service_curve) }, @@ -944,7 +910,8 @@ static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = { static int hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, - struct nlattr **tca, unsigned long *arg) + struct nlattr **tca, unsigned long *arg, + struct netlink_ext_ack *extack) { struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl = (struct hfsc_class *)*arg; @@ -958,7 +925,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (opt == NULL) return -EINVAL; - err = nla_parse_nested(tb, TCA_HFSC_MAX, opt, hfsc_policy, NULL); + err = nla_parse_nested_deprecated(tb, TCA_HFSC_MAX, opt, hfsc_policy, + NULL); if (err < 0) return err; @@ -981,6 +949,9 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, } if (cl != NULL) { + int old_flags; + int len = 0; + if (parentid) { if (cl->cl_parent && cl->cl_parent->cl_common.classid != parentid) @@ -994,13 +965,15 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est, NULL, - qdisc_root_sleeping_running(sch), + true, tca[TCA_RATE]); if (err) return err; } sch_tree_lock(sch); + old_flags = cl->cl_flags; + if (rsc != NULL) hfsc_change_rsc(cl, rsc, cur_time); if (fsc != NULL) @@ -1008,11 +981,26 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (usc != NULL) hfsc_change_usc(cl, usc, cur_time); + if (cl->qdisc->q.qlen != 0) + len = qdisc_peek_len(cl->qdisc); + /* Check queue length again since some qdisc implementations + * (e.g., netem/codel) might empty the queue during the peek + * operation. + */ if (cl->qdisc->q.qlen != 0) { - if (cl->cl_flags & HFSC_RSC) - update_ed(cl, qdisc_peek_len(cl->qdisc)); - if (cl->cl_flags & HFSC_FSC) - update_vf(cl, 0, cur_time); + if (cl->cl_flags & HFSC_RSC) { + if (old_flags & HFSC_RSC) + update_ed(cl, len); + else + init_ed(cl, len); + } + + if (cl->cl_flags & HFSC_FSC) { + if (old_flags & HFSC_FSC) + update_vf(cl, 0, cur_time); + else + init_vf(cl, len); + } } sch_tree_unlock(sch); @@ -1041,7 +1029,9 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (cl == NULL) return -ENOBUFS; - err = tcf_block_get(&cl->block, &cl->filter_list); + RB_CLEAR_NODE(&cl->el_node); + + err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); if (err) { kfree(cl); return err; @@ -1049,9 +1039,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (tca[TCA_RATE]) { err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est, - NULL, - qdisc_root_sleeping_running(sch), - tca[TCA_RATE]); + NULL, true, tca[TCA_RATE]); if (err) { tcf_block_put(cl->block); kfree(cl); @@ -1067,11 +1055,10 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, hfsc_change_usc(cl, usc, 0); cl->cl_common.classid = classid; - cl->refcnt = 1; cl->sched = q; cl->cl_parent = parent; - cl->qdisc = qdisc_create_dflt(sch->dev_queue, - &pfifo_qdisc_ops, classid); + cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, + classid, NULL); if (cl->qdisc == NULL) cl->qdisc = &noop_qdisc; else @@ -1081,10 +1068,16 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, cl->cf_tree = RB_ROOT; sch_tree_lock(sch); + /* Check if the inner class is a misconfigured 'rt' */ + if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) { + NL_SET_ERR_MSG(extack, + "Forced curve change on parent 'rt' to 'sc'"); + hfsc_upgrade_rt(parent); + } qdisc_class_hash_insert(&q->clhash, &cl->cl_common); list_add_tail(&cl->siblings, &parent->children); if (parent->level == 0) - hfsc_purge_queue(sch, parent); + qdisc_purge_queue(parent->qdisc); hfsc_adjust_levels(parent); sch_tree_unlock(sch); @@ -1100,36 +1093,36 @@ hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl) struct hfsc_sched *q = qdisc_priv(sch); tcf_block_put(cl->block); - qdisc_destroy(cl->qdisc); + qdisc_put(cl->qdisc); gen_kill_estimator(&cl->rate_est); if (cl != &q->root) kfree(cl); } static int -hfsc_delete_class(struct Qdisc *sch, unsigned long arg) +hfsc_delete_class(struct Qdisc *sch, unsigned long arg, + struct netlink_ext_ack *extack) { struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl = (struct hfsc_class *)arg; - if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root) + if (cl->level > 0 || qdisc_class_in_use(&cl->cl_common) || + cl == &q->root) { + NL_SET_ERR_MSG(extack, "HFSC class in use"); return -EBUSY; + } sch_tree_lock(sch); list_del(&cl->siblings); hfsc_adjust_levels(cl->cl_parent); - hfsc_purge_queue(sch, cl); + qdisc_purge_queue(cl->qdisc); qdisc_class_hash_remove(&q->clhash, &cl->cl_common); - BUG_ON(--cl->refcnt == 0); - /* - * This shouldn't happen: we "hold" one cops->get() when called - * from tc_ctl_tclass; the destroy method is done from cops->put(). - */ - sch_tree_unlock(sch); + + hfsc_destroy_class(sch, cl); return 0; } @@ -1150,13 +1143,14 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; head = &q->root; tcf = rcu_dereference_bh(q->root.filter_list); - while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) { + while (tcf && (result = tcf_classify(skb, NULL, tcf, &res, false)) >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_QUEUED: case TC_ACT_STOLEN: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; + fallthrough; case TC_ACT_SHOT: return NULL; } @@ -1179,7 +1173,8 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) } /* classification failed, try default class */ - cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); + cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), + READ_ONCE(q->defcls)), sch); if (cl == NULL || cl->level > 0) return NULL; @@ -1188,7 +1183,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) static int hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, - struct Qdisc **old) + struct Qdisc **old, struct netlink_ext_ack *extack) { struct hfsc_class *cl = (struct hfsc_class *)arg; @@ -1196,7 +1191,7 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, return -EINVAL; if (new == NULL) { new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, - cl->cl_common.classid); + cl->cl_common.classid, NULL); if (new == NULL) new = &noop_qdisc; } @@ -1221,30 +1216,19 @@ hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg) { struct hfsc_class *cl = (struct hfsc_class *)arg; - if (cl->qdisc->q.qlen == 0) { + /* vttree is now handled in update_vf() so that update_vf(cl, 0, 0) + * needs to be called explicitly to remove a class from vttree. + */ + if (cl->cl_nactive) update_vf(cl, 0, 0); - set_passive(cl); - } + if (cl->cl_flags & HFSC_RSC) + eltree_remove(cl); } static unsigned long -hfsc_get_class(struct Qdisc *sch, u32 classid) +hfsc_search_class(struct Qdisc *sch, u32 classid) { - struct hfsc_class *cl = hfsc_find_class(classid, sch); - - if (cl != NULL) - cl->refcnt++; - - return (unsigned long)cl; -} - -static void -hfsc_put_class(struct Qdisc *sch, unsigned long arg) -{ - struct hfsc_class *cl = (struct hfsc_class *)arg; - - if (--cl->refcnt == 0) - hfsc_destroy_class(sch, cl); + return (unsigned long)hfsc_find_class(classid, sch); } static unsigned long @@ -1256,7 +1240,7 @@ hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid) if (cl != NULL) { if (p != NULL && p->level <= cl->level) return 0; - cl->filter_cnt++; + qdisc_class_get(&cl->cl_common); } return (unsigned long)cl; @@ -1267,10 +1251,11 @@ hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg) { struct hfsc_class *cl = (struct hfsc_class *)arg; - cl->filter_cnt--; + qdisc_class_put(&cl->cl_common); } -static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg) +static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg, + struct netlink_ext_ack *extack) { struct hfsc_sched *q = qdisc_priv(sch); struct hfsc_class *cl = (struct hfsc_class *)arg; @@ -1332,7 +1317,7 @@ hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb, if (cl->level == 0) tcm->tcm_info = cl->qdisc->handle; - nest = nla_nest_start(skb, TCA_OPTIONS); + nest = nla_nest_start_noflag(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; if (hfsc_dump_curves(skb, cl) < 0) @@ -1350,16 +1335,17 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, { struct hfsc_class *cl = (struct hfsc_class *)arg; struct tc_hfsc_stats xstats; + __u32 qlen; - cl->qstats.backlog = cl->qdisc->qstats.backlog; + qdisc_qstats_qlen_backlog(cl->qdisc, &qlen, &cl->qstats.backlog); xstats.level = cl->level; xstats.period = cl->cl_vtperiod; xstats.work = cl->cl_total; xstats.rtwork = cl->cl_cumul; - if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 || + if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || - gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0) + gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0) return -1; return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); @@ -1380,15 +1366,8 @@ hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg) for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) { - if (arg->count < arg->skip) { - arg->count++; - continue; - } - if (arg->fn(sch, (unsigned long)cl, arg) < 0) { - arg->stop = 1; + if (!tc_qdisc_stats_dump(sch, (unsigned long)cl, arg)) return; - } - arg->count++; } } } @@ -1407,18 +1386,21 @@ hfsc_schedule_watchdog(struct Qdisc *sch) if (next_time == 0 || next_time > q->root.cl_cfmin) next_time = q->root.cl_cfmin; } - WARN_ON(next_time == 0); - qdisc_watchdog_schedule(&q->watchdog, next_time); + if (next_time) + qdisc_watchdog_schedule(&q->watchdog, next_time); } static int -hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) +hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) { struct hfsc_sched *q = qdisc_priv(sch); struct tc_hfsc_qopt *qopt; int err; - if (opt == NULL || nla_len(opt) < sizeof(*qopt)) + qdisc_watchdog_init(&q->watchdog, sch); + + if (!opt || nla_len(opt) < sizeof(*qopt)) return -EINVAL; qopt = nla_data(opt); @@ -1428,11 +1410,15 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) return err; q->eligible = RB_ROOT; + err = tcf_block_get(&q->root.block, &q->root.filter_list, sch, extack); + if (err) + return err; + + gnet_stats_basic_sync_init(&q->root.bstats); q->root.cl_common.classid = sch->handle; - q->root.refcnt = 1; q->root.sched = q; q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, - sch->handle); + sch->handle, NULL); if (q->root.qdisc == NULL) q->root.qdisc = &noop_qdisc; else @@ -1444,24 +1430,21 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) qdisc_class_hash_insert(&q->clhash, &q->root.cl_common); qdisc_class_hash_grow(sch, &q->clhash); - qdisc_watchdog_init(&q->watchdog, sch); - return 0; } static int -hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt) +hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) { struct hfsc_sched *q = qdisc_priv(sch); struct tc_hfsc_qopt *qopt; - if (opt == NULL || nla_len(opt) < sizeof(*qopt)) + if (nla_len(opt) < sizeof(*qopt)) return -EINVAL; qopt = nla_data(opt); - sch_tree_lock(sch); - q->defcls = qopt->defcls; - sch_tree_unlock(sch); + WRITE_ONCE(q->defcls, qopt->defcls); return 0; } @@ -1509,8 +1492,6 @@ hfsc_reset_qdisc(struct Qdisc *sch) } q->eligible = RB_ROOT; qdisc_watchdog_cancel(&q->watchdog); - sch->qstats.backlog = 0; - sch->q.qlen = 0; } static void @@ -1522,8 +1503,10 @@ hfsc_destroy_qdisc(struct Qdisc *sch) unsigned int i; for (i = 0; i < q->clhash.hashsize; i++) { - hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) + hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) { tcf_block_put(cl->block); + cl->block = NULL; + } } for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], @@ -1541,7 +1524,7 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) unsigned char *b = skb_tail_pointer(skb); struct tc_hfsc_qopt qopt; - qopt.defcls = q->defcls; + qopt.defcls = READ_ONCE(q->defcls); if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) goto nla_put_failure; return skb->len; @@ -1554,8 +1537,10 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) static int hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { + unsigned int len = qdisc_pkt_len(skb); struct hfsc_class *cl; - int uninitialized_var(err); + int err; + bool first; cl = hfsc_classify(skb, sch, &err); if (cl == NULL) { @@ -1565,6 +1550,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) return err; } + first = !cl->qdisc->q.qlen; err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { @@ -1574,8 +1560,14 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) return err; } - if (cl->qdisc->q.qlen == 1) { - set_active(cl, qdisc_pkt_len(skb)); + sch->qstats.backlog += len; + sch->q.qlen++; + + if (first && !cl_in_el_or_vttree(cl)) { + if (cl->cl_flags & HFSC_RSC) + init_ed(cl, len); + if (cl->cl_flags & HFSC_FSC) + init_vf(cl, len); /* * If this is the first packet, isolate the head so an eventual * head drop before the first dequeue operation has no chance @@ -1586,9 +1578,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) } - qdisc_qstats_backlog_inc(sch, skb); - sch->q.qlen++; - return NET_XMIT_SUCCESS; } @@ -1639,18 +1628,24 @@ hfsc_dequeue(struct Qdisc *sch) if (realtime) cl->cl_cumul += qdisc_pkt_len(skb); - if (cl->qdisc->q.qlen != 0) { - if (cl->cl_flags & HFSC_RSC) { + if (cl->cl_flags & HFSC_RSC) { + if (cl->qdisc->q.qlen != 0) { /* update ed */ next_len = qdisc_peek_len(cl->qdisc); - if (realtime) - update_ed(cl, next_len); - else - update_d(cl, next_len); + /* Check queue length again since some qdisc implementations + * (e.g., netem/codel) might empty the queue during the peek + * operation. + */ + if (cl->qdisc->q.qlen != 0) { + if (realtime) + update_ed(cl, next_len); + else + update_d(cl, next_len); + } + } else { + /* the class becomes passive */ + eltree_remove(cl); } - } else { - /* the class becomes passive */ - set_passive(cl); } qdisc_bstats_update(sch, skb); @@ -1666,8 +1661,7 @@ static const struct Qdisc_class_ops hfsc_class_ops = { .graft = hfsc_graft_class, .leaf = hfsc_class_leaf, .qlen_notify = hfsc_qlen_notify, - .get = hfsc_get_class, - .put = hfsc_put_class, + .find = hfsc_search_class, .bind_tcf = hfsc_bind_tcf, .unbind_tcf = hfsc_unbind_tcf, .tcf_block = hfsc_tcf_block, @@ -1690,6 +1684,7 @@ static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = { .priv_size = sizeof(struct hfsc_sched), .owner = THIS_MODULE }; +MODULE_ALIAS_NET_SCH("hfsc"); static int __init hfsc_init(void) @@ -1704,5 +1699,6 @@ hfsc_cleanup(void) } MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Hierarchical Fair Service Curve scheduler"); module_init(hfsc_init); module_exit(hfsc_cleanup); |
