diff options
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/act_api.c | 8 | ||||
-rw-r--r-- | net/sched/act_csum.c | 3 | ||||
-rw-r--r-- | net/sched/act_mirred.c | 2 | ||||
-rw-r--r-- | net/sched/act_mpls.c | 2 | ||||
-rw-r--r-- | net/sched/act_pedit.c | 85 | ||||
-rw-r--r-- | net/sched/act_tunnel_key.c | 5 | ||||
-rw-r--r-- | net/sched/cls_api.c | 1 | ||||
-rw-r--r-- | net/sched/cls_flower.c | 2 | ||||
-rw-r--r-- | net/sched/em_meta.c | 2 | ||||
-rw-r--r-- | net/sched/sch_api.c | 6 | ||||
-rw-r--r-- | net/sched/sch_cake.c | 6 | ||||
-rw-r--r-- | net/sched/sch_fq.c | 6 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 10 | ||||
-rw-r--r-- | net/sched/sch_htb.c | 17 | ||||
-rw-r--r-- | net/sched/sch_mqprio.c | 196 | ||||
-rw-r--r-- | net/sched/sch_mqprio_lib.c | 14 | ||||
-rw-r--r-- | net/sched/sch_mqprio_lib.h | 2 | ||||
-rw-r--r-- | net/sched/sch_pie.c | 2 | ||||
-rw-r--r-- | net/sched/sch_qfq.c | 34 | ||||
-rw-r--r-- | net/sched/sch_taprio.c | 77 |
20 files changed, 345 insertions, 135 deletions
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 296fc1afedd8..f7887f42d542 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -453,7 +453,7 @@ static size_t tcf_action_shared_attrs_size(const struct tc_action *act) + nla_total_size_64bit(sizeof(u64)) /* TCA_STATS_QUEUE */ + nla_total_size_64bit(sizeof(struct gnet_stats_queue)) - + nla_total_size(0) /* TCA_OPTIONS nested */ + + nla_total_size(0) /* TCA_ACT_OPTIONS nested */ + nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */ } @@ -480,7 +480,7 @@ tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a, bool from_act) unsigned char *b = skb_tail_pointer(skb); struct tc_cookie *cookie; - if (nla_put_string(skb, TCA_KIND, a->ops->kind)) + if (nla_put_string(skb, TCA_ACT_KIND, a->ops->kind)) goto nla_put_failure; if (tcf_action_copy_stats(skb, a, 0)) goto nla_put_failure; @@ -598,7 +598,7 @@ static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, nest = nla_nest_start_noflag(skb, 0); if (nest == NULL) goto nla_put_failure; - if (nla_put_string(skb, TCA_KIND, ops->kind)) + if (nla_put_string(skb, TCA_ACT_KIND, ops->kind)) goto nla_put_failure; ret = 0; @@ -1189,7 +1189,7 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count)) goto nla_put_failure; - nest = nla_nest_start_noflag(skb, TCA_OPTIONS); + nest = nla_nest_start_noflag(skb, TCA_ACT_OPTIONS); if (nest == NULL) goto nla_put_failure; err = tcf_action_dump_old(skb, a, bind, ref); diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 95e9304024b7..8ed285023a40 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -376,8 +376,7 @@ static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl, sctph->checksum = sctp_compute_cksum(skb, skb_network_offset(skb) + ihl); - skb->ip_summed = CHECKSUM_NONE; - skb->csum_not_inet = 0; + skb_reset_csum_not_inet(skb); return 1; } diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 8037ec9b1d31..ec43764e92e7 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -295,7 +295,7 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb, at_nh = skb->data == skb_network_header(skb); if (at_nh != expects_nh) { mac_len = skb_at_tc_ingress(skb) ? skb->mac_len : - skb_network_header(skb) - skb_mac_header(skb); + skb_network_offset(skb); if (expects_nh) { /* target device/action expect data at nh */ skb_pull_rcsum(skb2, mac_len); diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c index 809f7928a1be..1010dc632874 100644 --- a/net/sched/act_mpls.c +++ b/net/sched/act_mpls.c @@ -69,7 +69,7 @@ TC_INDIRECT_SCOPE int tcf_mpls_act(struct sk_buff *skb, skb_push_rcsum(skb, skb->mac_len); mac_len = skb->mac_len; } else { - mac_len = skb_network_header(skb) - skb_mac_header(skb); + mac_len = skb_network_offset(skb); } ret = READ_ONCE(m->tcf_action); diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 4559a1507ea5..fb93d4c1faca 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -30,12 +30,13 @@ static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = { }; static const struct nla_policy pedit_key_ex_policy[TCA_PEDIT_KEY_EX_MAX + 1] = { - [TCA_PEDIT_KEY_EX_HTYPE] = { .type = NLA_U16 }, - [TCA_PEDIT_KEY_EX_CMD] = { .type = NLA_U16 }, + [TCA_PEDIT_KEY_EX_HTYPE] = + NLA_POLICY_MAX(NLA_U16, TCA_PEDIT_HDR_TYPE_MAX), + [TCA_PEDIT_KEY_EX_CMD] = NLA_POLICY_MAX(NLA_U16, TCA_PEDIT_CMD_MAX), }; static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla, - u8 n) + u8 n, struct netlink_ext_ack *extack) { struct tcf_pedit_key_ex *keys_ex; struct tcf_pedit_key_ex *k; @@ -56,12 +57,14 @@ static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla, struct nlattr *tb[TCA_PEDIT_KEY_EX_MAX + 1]; if (!n) { + NL_SET_ERR_MSG_MOD(extack, "Can't parse more extended keys than requested"); err = -EINVAL; goto err_out; } n--; if (nla_type(ka) != TCA_PEDIT_KEY_EX) { + NL_SET_ERR_MSG_ATTR(extack, ka, "Unknown attribute, expected extended key"); err = -EINVAL; goto err_out; } @@ -72,25 +75,26 @@ static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla, if (err) goto err_out; - if (!tb[TCA_PEDIT_KEY_EX_HTYPE] || - !tb[TCA_PEDIT_KEY_EX_CMD]) { + if (NL_REQ_ATTR_CHECK(extack, nla, tb, TCA_PEDIT_KEY_EX_HTYPE)) { + NL_SET_ERR_MSG(extack, "Missing required attribute"); err = -EINVAL; goto err_out; } - k->htype = nla_get_u16(tb[TCA_PEDIT_KEY_EX_HTYPE]); - k->cmd = nla_get_u16(tb[TCA_PEDIT_KEY_EX_CMD]); - - if (k->htype > TCA_PEDIT_HDR_TYPE_MAX || - k->cmd > TCA_PEDIT_CMD_MAX) { + if (NL_REQ_ATTR_CHECK(extack, nla, tb, TCA_PEDIT_KEY_EX_CMD)) { + NL_SET_ERR_MSG(extack, "Missing required attribute"); err = -EINVAL; goto err_out; } + k->htype = nla_get_u16(tb[TCA_PEDIT_KEY_EX_HTYPE]); + k->cmd = nla_get_u16(tb[TCA_PEDIT_KEY_EX_CMD]); + k++; } if (n) { + NL_SET_ERR_MSG_MOD(extack, "Not enough extended keys to parse"); err = -EINVAL; goto err_out; } @@ -222,7 +226,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, } nparms->tcfp_keys_ex = - tcf_pedit_keys_ex_parse(tb[TCA_PEDIT_KEYS_EX], parm->nkeys); + tcf_pedit_keys_ex_parse(tb[TCA_PEDIT_KEYS_EX], parm->nkeys, extack); if (IS_ERR(nparms->tcfp_keys_ex)) { ret = PTR_ERR(nparms->tcfp_keys_ex); goto out_free; @@ -247,8 +251,16 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, memcpy(nparms->tcfp_keys, parm->keys, ksize); for (i = 0; i < nparms->tcfp_nkeys; ++i) { + u32 offmask = nparms->tcfp_keys[i].offmask; u32 cur = nparms->tcfp_keys[i].off; + /* The AT option can be added to static offsets in the datapath */ + if (!offmask && cur % 4) { + NL_SET_ERR_MSG_MOD(extack, "Offsets must be on 32bit boundaries"); + ret = -EINVAL; + goto put_chain; + } + /* sanitize the shift value for any later use */ nparms->tcfp_keys[i].shift = min_t(size_t, BITS_PER_TYPE(int) - 1, @@ -257,7 +269,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, /* The AT option can read a single byte, we can bound the actual * value with uchar max. */ - cur += (0xff & nparms->tcfp_keys[i].offmask) >> nparms->tcfp_keys[i].shift; + cur += (0xff & offmask) >> nparms->tcfp_keys[i].shift; /* Each key touches 4 bytes starting from the computed offset */ nparms->tcfp_off_max_hint = @@ -313,37 +325,28 @@ static bool offset_valid(struct sk_buff *skb, int offset) return true; } -static int pedit_skb_hdr_offset(struct sk_buff *skb, - enum pedit_header_type htype, int *hoffset) +static void pedit_skb_hdr_offset(struct sk_buff *skb, + enum pedit_header_type htype, int *hoffset) { - int ret = -EINVAL; - + /* 'htype' is validated in the netlink parsing */ switch (htype) { case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: - if (skb_mac_header_was_set(skb)) { + if (skb_mac_header_was_set(skb)) *hoffset = skb_mac_offset(skb); - ret = 0; - } break; case TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK: case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: *hoffset = skb_network_offset(skb); - ret = 0; break; case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: - if (skb_transport_header_was_set(skb)) { + if (skb_transport_header_was_set(skb)) *hoffset = skb_transport_offset(skb); - ret = 0; - } break; default: - ret = -EINVAL; break; } - - return ret; } TC_INDIRECT_SCOPE int tcf_pedit_act(struct sk_buff *skb, @@ -376,10 +379,9 @@ TC_INDIRECT_SCOPE int tcf_pedit_act(struct sk_buff *skb, for (i = parms->tcfp_nkeys; i > 0; i--, tkey++) { int offset = tkey->off; + int hoffset = 0; u32 *ptr, hdata; - int hoffset; u32 val; - int rc; if (tkey_ex) { htype = tkey_ex->htype; @@ -388,36 +390,30 @@ TC_INDIRECT_SCOPE int tcf_pedit_act(struct sk_buff *skb, tkey_ex++; } - rc = pedit_skb_hdr_offset(skb, htype, &hoffset); - if (rc) { - pr_info("tc action pedit bad header type specified (0x%x)\n", - htype); - goto bad; - } + pedit_skb_hdr_offset(skb, htype, &hoffset); if (tkey->offmask) { u8 *d, _d; if (!offset_valid(skb, hoffset + tkey->at)) { - pr_info("tc action pedit 'at' offset %d out of bounds\n", - hoffset + tkey->at); + pr_info_ratelimited("tc action pedit 'at' offset %d out of bounds\n", + hoffset + tkey->at); goto bad; } d = skb_header_pointer(skb, hoffset + tkey->at, sizeof(_d), &_d); if (!d) goto bad; - offset += (*d & tkey->offmask) >> tkey->shift; - } - if (offset % 4) { - pr_info("tc action pedit offset must be on 32 bit boundaries\n"); - goto bad; + offset += (*d & tkey->offmask) >> tkey->shift; + if (offset % 4) { + pr_info_ratelimited("tc action pedit offset must be on 32 bit boundaries\n"); + goto bad; + } } if (!offset_valid(skb, hoffset + offset)) { - pr_info("tc action pedit offset %d out of bounds\n", - hoffset + offset); + pr_info_ratelimited("tc action pedit offset %d out of bounds\n", hoffset + offset); goto bad; } @@ -434,8 +430,7 @@ TC_INDIRECT_SCOPE int tcf_pedit_act(struct sk_buff *skb, val = (*ptr + tkey->val) & ~tkey->mask; break; default: - pr_info("tc action pedit bad command (%d)\n", - cmd); + pr_info_ratelimited("tc action pedit bad command (%d)\n", cmd); goto bad; } diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 2d12d2626415..0c8aa7e686ea 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -420,6 +420,9 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM])) flags &= ~TUNNEL_CSUM; + if (nla_get_flag(tb[TCA_TUNNEL_KEY_NO_FRAG])) + flags |= TUNNEL_DONT_FRAGMENT; + if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT]) dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]); @@ -747,6 +750,8 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a, key->tp_dst)) || nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM, !(key->tun_flags & TUNNEL_CSUM)) || + ((key->tun_flags & TUNNEL_DONT_FRAGMENT) && + nla_put_flag(skb, TCA_TUNNEL_KEY_NO_FRAG)) || tunnel_key_opts_dump(skb, info)) goto nla_put_failure; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 35785a36c802..3c3629c9e7b6 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -3211,6 +3211,7 @@ int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action, #ifdef CONFIG_NET_CLS_ACT exts->type = 0; exts->nr_actions = 0; + exts->miss_cookie_node = NULL; /* Note: we do not own yet a reference on net. * This reference might be taken later from tcf_exts_get_net(). */ diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 475fe222a855..cc49256d5318 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -1057,7 +1057,7 @@ static void fl_set_key_pppoe(struct nlattr **tb, * because ETH_P_PPP_SES was stored in basic.n_proto * which might get overwritten by ppp_proto * or might be set to 0, the role of key_val::type - * is simmilar to vlan_key::tpid + * is similar to vlan_key::tpid */ key_val->type = htons(ETH_P_PPP_SES); key_mask->type = cpu_to_be16(~0); diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 49bae3d5006b..af85a73c4c54 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -44,7 +44,7 @@ * be provided for non-numeric types. * * Additionally, type dependent modifiers such as shift operators - * or mask may be applied to extend the functionaliy. As of now, + * or mask may be applied to extend the functionality. As of now, * the variable length type supports shifting the byte string to * the right, eating up any number of octets and thus supporting * wildcard interface name comparisons such as "ppp%" matching diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index aba789c30a2e..fdb8f429333d 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -639,14 +639,16 @@ void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires, return; if (hrtimer_is_queued(&wd->timer)) { + u64 softexpires; + + softexpires = ktime_to_ns(hrtimer_get_softexpires(&wd->timer)); /* If timer is already set in [expires, expires + delta_ns], * do not reprogram it. */ - if (wd->last_expires - expires <= delta_ns) + if (softexpires - expires <= delta_ns) return; } - wd->last_expires = expires; hrtimer_start_range_ns(&wd->timer, ns_to_ktime(expires), delta_ns, diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 7970217b565a..891e007d5c0b 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -1360,7 +1360,7 @@ static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb) return cake_calc_overhead(q, len, off); /* borrowed from qdisc_pkt_len_init() */ - hdr_len = skb_transport_header(skb) - skb_mac_header(skb); + hdr_len = skb_transport_offset(skb); /* + transport layer */ if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | @@ -1368,14 +1368,14 @@ static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb) const struct tcphdr *th; struct tcphdr _tcphdr; - th = skb_header_pointer(skb, skb_transport_offset(skb), + th = skb_header_pointer(skb, hdr_len, sizeof(_tcphdr), &_tcphdr); if (likely(th)) hdr_len += __tcp_hdrlen(th); } else { struct udphdr _udphdr; - if (skb_header_pointer(skb, skb_transport_offset(skb), + if (skb_header_pointer(skb, hdr_len, sizeof(_udphdr), &_udphdr)) hdr_len += sizeof(struct udphdr); } diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 48d14fb90ba0..f59a2cb2c803 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -779,13 +779,17 @@ static int fq_resize(struct Qdisc *sch, u32 log) return 0; } +static struct netlink_range_validation iq_range = { + .max = INT_MAX, +}; + static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = { [TCA_FQ_UNSPEC] = { .strict_start_type = TCA_FQ_TIMER_SLACK }, [TCA_FQ_PLIMIT] = { .type = NLA_U32 }, [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 }, [TCA_FQ_QUANTUM] = { .type = NLA_U32 }, - [TCA_FQ_INITIAL_QUANTUM] = { .type = NLA_U32 }, + [TCA_FQ_INITIAL_QUANTUM] = NLA_POLICY_FULL_RANGE(NLA_U32, &iq_range), [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 }, [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 }, [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 }, diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index a9aadc4e6858..37e41f972f69 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -502,7 +502,7 @@ static void dev_watchdog(struct timer_list *t) if (netif_device_present(dev) && netif_running(dev) && netif_carrier_ok(dev)) { - int some_queue_timedout = 0; + unsigned int timedout_ms = 0; unsigned int i; unsigned long trans_start; @@ -514,16 +514,16 @@ static void dev_watchdog(struct timer_list *t) if (netif_xmit_stopped(txq) && time_after(jiffies, (trans_start + dev->watchdog_timeo))) { - some_queue_timedout = 1; + timedout_ms = jiffies_to_msecs(jiffies - trans_start); atomic_long_inc(&txq->trans_timeout); break; } } - if (unlikely(some_queue_timedout)) { + if (unlikely(timedout_ms)) { trace_net_dev_xmit_timeout(dev, i); - WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", - dev->name, netdev_drivername(dev), i); + WARN_ONCE(1, "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out %u ms\n", + dev->name, netdev_drivername(dev), i, timedout_ms); netif_freeze_queues(dev); dev->netdev_ops->ndo_tx_timeout(dev, i); netif_unfreeze_queues(dev); diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 92f2975b6a82..8aef7dd9fb88 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1786,7 +1786,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, goto failure; err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy, - NULL); + extack); if (err < 0) goto failure; @@ -1858,7 +1858,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, /* check maximal depth */ if (parent && parent->parent && parent->parent->level < 2) { - pr_err("htb: tree is too deep\n"); + NL_SET_ERR_MSG_MOD(extack, "tree is too deep"); goto failure; } err = -ENOBUFS; @@ -1917,8 +1917,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, }; err = htb_offload(dev, &offload_opt); if (err) { - pr_err("htb: TC_HTB_LEAF_ALLOC_QUEUE failed with err = %d\n", - err); + NL_SET_ERR_MSG_WEAK(extack, + "Failed to offload TC_HTB_LEAF_ALLOC_QUEUE"); goto err_kill_estimator; } dev_queue = netdev_get_tx_queue(dev, offload_opt.qid); @@ -1937,8 +1937,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, }; err = htb_offload(dev, &offload_opt); if (err) { - pr_err("htb: TC_HTB_LEAF_TO_INNER failed with err = %d\n", - err); + NL_SET_ERR_MSG_WEAK(extack, + "Failed to offload TC_HTB_LEAF_TO_INNER"); htb_graft_helper(dev_queue, old_q); goto err_kill_estimator; } @@ -2067,8 +2067,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, qdisc_put(parent_qdisc); if (warn) - pr_warn("HTB: quantum of class %X is %s. Consider r2q change.\n", - cl->common.classid, (warn == -1 ? "small" : "big")); + NL_SET_ERR_MSG_FMT_MOD(extack, + "quantum of class %X is %s. Consider r2q change.", + cl->common.classid, (warn == -1 ? "small" : "big")); qdisc_class_hash_grow(sch, &q->clhash); diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index 48ed87b91086..dc5a0ff50b14 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -5,6 +5,7 @@ * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com> */ +#include <linux/ethtool_netlink.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/kernel.h> @@ -27,15 +28,19 @@ struct mqprio_sched { u32 flags; u64 min_rate[TC_QOPT_MAX_QUEUE]; u64 max_rate[TC_QOPT_MAX_QUEUE]; + u32 fp[TC_QOPT_MAX_QUEUE]; }; static int mqprio_enable_offload(struct Qdisc *sch, const struct tc_mqprio_qopt *qopt, struct netlink_ext_ack *extack) { - struct tc_mqprio_qopt_offload mqprio = {.qopt = *qopt}; struct mqprio_sched *priv = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); + struct tc_mqprio_qopt_offload mqprio = { + .qopt = *qopt, + .extack = extack, + }; int err, i; switch (priv->mode) { @@ -60,6 +65,8 @@ static int mqprio_enable_offload(struct Qdisc *sch, return -EINVAL; } + mqprio_fp_to_offload(priv->fp, &mqprio); + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQPRIO, &mqprio); if (err) @@ -133,91 +140,193 @@ static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt, /* If ndo_setup_tc is not present then hardware doesn't support offload * and we should return an error. */ - if (qopt->hw && !dev->netdev_ops->ndo_setup_tc) + if (qopt->hw && !dev->netdev_ops->ndo_setup_tc) { + NL_SET_ERR_MSG(extack, + "Device does not support hardware offload"); return -EINVAL; + } return 0; } +static const struct +nla_policy mqprio_tc_entry_policy[TCA_MQPRIO_TC_ENTRY_MAX + 1] = { + [TCA_MQPRIO_TC_ENTRY_INDEX] = NLA_POLICY_MAX(NLA_U32, + TC_QOPT_MAX_QUEUE), + [TCA_MQPRIO_TC_ENTRY_FP] = NLA_POLICY_RANGE(NLA_U32, + TC_FP_EXPRESS, + TC_FP_PREEMPTIBLE), +}; + static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = { [TCA_MQPRIO_MODE] = { .len = sizeof(u16) }, [TCA_MQPRIO_SHAPER] = { .len = sizeof(u16) }, [TCA_MQPRIO_MIN_RATE64] = { .type = NLA_NESTED }, [TCA_MQPRIO_MAX_RATE64] = { .type = NLA_NESTED }, + [TCA_MQPRIO_TC_ENTRY] = { .type = NLA_NESTED }, }; -static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, - const struct nla_policy *policy, int len) +static int mqprio_parse_tc_entry(u32 fp[TC_QOPT_MAX_QUEUE], + struct nlattr *opt, + unsigned long *seen_tcs, + struct netlink_ext_ack *extack) { - int nested_len = nla_len(nla) - NLA_ALIGN(len); + struct nlattr *tb[TCA_MQPRIO_TC_ENTRY_MAX + 1]; + int err, tc; + + err = nla_parse_nested(tb, TCA_MQPRIO_TC_ENTRY_MAX, opt, + mqprio_tc_entry_policy, extack); + if (err < 0) + return err; + + if (NL_REQ_ATTR_CHECK(extack, opt, tb, TCA_MQPRIO_TC_ENTRY_INDEX)) { + NL_SET_ERR_MSG(extack, "TC entry index missing"); + return -EINVAL; + } + + tc = nla_get_u32(tb[TCA_MQPRIO_TC_ENTRY_INDEX]); + if (*seen_tcs & BIT(tc)) { + NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_TC_ENTRY_INDEX], + "Duplicate tc entry"); + return -EINVAL; + } + + *seen_tcs |= BIT(tc); - if (nested_len >= nla_attr_size(0)) - return nla_parse_deprecated(tb, maxtype, - nla_data(nla) + NLA_ALIGN(len), - nested_len, policy, NULL); + if (tb[TCA_MQPRIO_TC_ENTRY_FP]) + fp[tc] = nla_get_u32(tb[TCA_MQPRIO_TC_ENTRY_FP]); - memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); return 0; } +static int mqprio_parse_tc_entries(struct Qdisc *sch, struct nlattr *nlattr_opt, + int nlattr_opt_len, + struct netlink_ext_ack *extack) +{ + struct mqprio_sched *priv = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + bool have_preemption = false; + unsigned long seen_tcs = 0; + u32 fp[TC_QOPT_MAX_QUEUE]; + struct nlattr *n; + int tc, rem; + int err = 0; + + for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) + fp[tc] = priv->fp[tc]; + + nla_for_each_attr(n, nlattr_opt, nlattr_opt_len, rem) { + if (nla_type(n) != TCA_MQPRIO_TC_ENTRY) + continue; + + err = mqprio_parse_tc_entry(fp, n, &seen_tcs, extack); + if (err) + goto out; + } + + for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { + priv->fp[tc] = fp[tc]; + if (fp[tc] == TC_FP_PREEMPTIBLE) + have_preemption = true; + } + + if (have_preemption && !ethtool_dev_mm_supported(dev)) { + NL_SET_ERR_MSG(extack, "Device does not support preemption"); + return -EOPNOTSUPP; + } +out: + return err; +} + +/* Parse the other netlink attributes that represent the payload of + * TCA_OPTIONS, which are appended right after struct tc_mqprio_qopt. + */ static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt, - struct nlattr *opt) + struct nlattr *opt, + struct netlink_ext_ack *extack) { + struct nlattr *nlattr_opt = nla_data(opt) + NLA_ALIGN(sizeof(*qopt)); + int nlattr_opt_len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt)); struct mqprio_sched *priv = qdisc_priv(sch); - struct nlattr *tb[TCA_MQPRIO_MAX + 1]; + struct nlattr *tb[TCA_MQPRIO_MAX + 1] = {}; struct nlattr *attr; int i, rem, err; - err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy, - sizeof(*qopt)); - if (err < 0) - return err; + if (nlattr_opt_len >= nla_attr_size(0)) { + err = nla_parse_deprecated(tb, TCA_MQPRIO_MAX, nlattr_opt, + nlattr_opt_len, mqprio_policy, + NULL); + if (err < 0) + return err; + } - if (!qopt->hw) + if (!qopt->hw) { + NL_SET_ERR_MSG(extack, + "mqprio TCA_OPTIONS can only contain netlink attributes in hardware mode"); return -EINVAL; + } if (tb[TCA_MQPRIO_MODE]) { priv->flags |= TC_MQPRIO_F_MODE; - priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]); + priv->mode = nla_get_u16(tb[TCA_MQPRIO_MODE]); } if (tb[TCA_MQPRIO_SHAPER]) { priv->flags |= TC_MQPRIO_F_SHAPER; - priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]); + priv->shaper = nla_get_u16(tb[TCA_MQPRIO_SHAPER]); } if (tb[TCA_MQPRIO_MIN_RATE64]) { - if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) + if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) { + NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MIN_RATE64], + "min_rate accepted only when shaper is in bw_rlimit mode"); return -EINVAL; + } i = 0; nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64], rem) { - if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64) + if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64) { + NL_SET_ERR_MSG_ATTR(extack, attr, + "Attribute type expected to be TCA_MQPRIO_MIN_RATE64"); return -EINVAL; + } if (i >= qopt->num_tc) break; - priv->min_rate[i] = *(u64 *)nla_data(attr); + priv->min_rate[i] = nla_get_u64(attr); i++; } priv->flags |= TC_MQPRIO_F_MIN_RATE; } if (tb[TCA_MQPRIO_MAX_RATE64]) { - if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) + if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) { + NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MAX_RATE64], + "max_rate accepted only when shaper is in bw_rlimit mode"); return -EINVAL; + } i = 0; nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64], rem) { - if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64) + if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64) { + NL_SET_ERR_MSG_ATTR(extack, attr, + "Attribute type expected to be TCA_MQPRIO_MAX_RATE64"); return -EINVAL; + } if (i >= qopt->num_tc) break; - priv->max_rate[i] = *(u64 *)nla_data(attr); + priv->max_rate[i] = nla_get_u64(attr); i++; } priv->flags |= TC_MQPRIO_F_MAX_RATE; } + if (tb[TCA_MQPRIO_TC_ENTRY]) { + err = mqprio_parse_tc_entries(sch, nlattr_opt, nlattr_opt_len, + extack); + if (err) + return err; + } + return 0; } @@ -231,7 +340,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt, int i, err = -EOPNOTSUPP; struct tc_mqprio_qopt *qopt = NULL; struct tc_mqprio_caps caps; - int len; + int len, tc; BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE); BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK); @@ -249,6 +358,9 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt, if (!opt || nla_len(opt) < sizeof(*qopt)) return -EINVAL; + for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) + priv->fp[tc] = TC_FP_EXPRESS; + qdisc_offload_query_caps(dev, TC_SETUP_QDISC_MQPRIO, &caps, sizeof(caps)); @@ -258,7 +370,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt, len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt)); if (len > 0) { - err = mqprio_parse_nlattr(sch, qopt, opt); + err = mqprio_parse_nlattr(sch, qopt, opt, extack); if (err) return err; } @@ -399,6 +511,33 @@ nla_put_failure: return -1; } +static int mqprio_dump_tc_entries(struct mqprio_sched *priv, + struct sk_buff *skb) +{ + struct nlattr *n; + int tc; + + for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { + n = nla_nest_start(skb, TCA_MQPRIO_TC_ENTRY); + if (!n) + return -EMSGSIZE; + + if (nla_put_u32(skb, TCA_MQPRIO_TC_ENTRY_INDEX, tc)) + goto nla_put_failure; + + if (nla_put_u32(skb, TCA_MQPRIO_TC_ENTRY_FP, priv->fp[tc])) + goto nla_put_failure; + + nla_nest_end(skb, n); + } + + return 0; + +nla_put_failure: + nla_nest_cancel(skb, n); + return -EMSGSIZE; +} + static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) { struct net_device *dev = qdisc_dev(sch); @@ -449,6 +588,9 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb) (dump_rates(priv, &opt, skb) != 0)) goto nla_put_failure; + if (mqprio_dump_tc_entries(priv, skb)) + goto nla_put_failure; + return nla_nest_end(skb, nla); nla_put_failure: nlmsg_trim(skb, nla); diff --git a/net/sched/sch_mqprio_lib.c b/net/sched/sch_mqprio_lib.c index c58a533b8ec5..83b3793c4012 100644 --- a/net/sched/sch_mqprio_lib.c +++ b/net/sched/sch_mqprio_lib.c @@ -114,4 +114,18 @@ void mqprio_qopt_reconstruct(struct net_device *dev, struct tc_mqprio_qopt *qopt } EXPORT_SYMBOL_GPL(mqprio_qopt_reconstruct); +void mqprio_fp_to_offload(u32 fp[TC_QOPT_MAX_QUEUE], + struct tc_mqprio_qopt_offload *mqprio) +{ + unsigned long preemptible_tcs = 0; + int tc; + + for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) + if (fp[tc] == TC_FP_PREEMPTIBLE) + preemptible_tcs |= BIT(tc); + + mqprio->preemptible_tcs = preemptible_tcs; +} +EXPORT_SYMBOL_GPL(mqprio_fp_to_offload); + MODULE_LICENSE("GPL"); diff --git a/net/sched/sch_mqprio_lib.h b/net/sched/sch_mqprio_lib.h index 63f725ab8761..079f597072e3 100644 --- a/net/sched/sch_mqprio_lib.h +++ b/net/sched/sch_mqprio_lib.h @@ -14,5 +14,7 @@ int mqprio_validate_qopt(struct net_device *dev, struct tc_mqprio_qopt *qopt, struct netlink_ext_ack *extack); void mqprio_qopt_reconstruct(struct net_device *dev, struct tc_mqprio_qopt *qopt); +void mqprio_fp_to_offload(u32 fp[TC_QOPT_MAX_QUEUE], + struct tc_mqprio_qopt_offload *mqprio); #endif diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index 265c238047a4..2152a56d73f8 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c @@ -319,7 +319,7 @@ void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars, } /* If qdelay is zero and backlog is not, it means backlog is very small, - * so we do not update probabilty in this round. + * so we do not update probability in this round. */ if (qdelay == 0 && backlog != 0) update_prob = false; diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 02098a02943e..dfd9a99e6257 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -113,6 +113,7 @@ #define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */ #define QFQ_MIN_LMAX 512 /* see qfq_slot_insert */ +#define QFQ_MAX_LMAX (1UL << QFQ_MTU_SHIFT) #define QFQ_MAX_AGG_CLASSES 8 /* max num classes per aggregate allowed */ @@ -214,9 +215,14 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid) return container_of(clc, struct qfq_class, common); } +static struct netlink_range_validation lmax_range = { + .min = QFQ_MIN_LMAX, + .max = QFQ_MAX_LMAX, +}; + static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = { - [TCA_QFQ_WEIGHT] = { .type = NLA_U32 }, - [TCA_QFQ_LMAX] = { .type = NLA_U32 }, + [TCA_QFQ_WEIGHT] = NLA_POLICY_RANGE(NLA_U32, 1, QFQ_MAX_WEIGHT), + [TCA_QFQ_LMAX] = NLA_POLICY_FULL_RANGE(NLA_U32, &lmax_range), }; /* @@ -402,23 +408,19 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, int err; int delta_w; - if (tca[TCA_OPTIONS] == NULL) { - pr_notice("qfq: no options\n"); + if (NL_REQ_ATTR_CHECK(extack, NULL, tca, TCA_OPTIONS)) { + NL_SET_ERR_MSG_MOD(extack, "missing options"); return -EINVAL; } err = nla_parse_nested_deprecated(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS], - qfq_policy, NULL); + qfq_policy, extack); if (err < 0) return err; - if (tb[TCA_QFQ_WEIGHT]) { + if (tb[TCA_QFQ_WEIGHT]) weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]); - if (!weight || weight > (1UL << QFQ_MAX_WSHIFT)) { - pr_notice("qfq: invalid weight %u\n", weight); - return -EINVAL; - } - } else + else weight = 1; if (tb[TCA_QFQ_LMAX]) @@ -426,11 +428,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, else lmax = psched_mtu(qdisc_dev(sch)); - if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) { - pr_notice("qfq: invalid max length %u\n", lmax); - return -EINVAL; - } - inv_w = ONE_FP / weight; weight = ONE_FP / inv_w; @@ -442,8 +439,9 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, delta_w = weight - (cl ? cl->agg->class_weight : 0); if (q->wsum + delta_w > QFQ_MAX_WSUM) { - pr_notice("qfq: total weight out of range (%d + %u)\n", - delta_w, q->wsum); + NL_SET_ERR_MSG_FMT_MOD(extack, + "total weight out of range (%d + %u)\n", + delta_w, q->wsum); return -EINVAL; } diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 1f469861eae3..76db9a10ef50 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -7,6 +7,7 @@ */ #include <linux/ethtool.h> +#include <linux/ethtool_netlink.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/kernel.h> @@ -96,6 +97,7 @@ struct taprio_sched { struct list_head taprio_list; int cur_txq[TC_MAX_QUEUE]; u32 max_sdu[TC_MAX_QUEUE]; /* save info from the user */ + u32 fp[TC_QOPT_MAX_QUEUE]; /* only for dump and offloading */ u32 txtime_delay; }; @@ -1002,6 +1004,9 @@ static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { static const struct nla_policy taprio_tc_policy[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { [TCA_TAPRIO_TC_ENTRY_INDEX] = { .type = NLA_U32 }, [TCA_TAPRIO_TC_ENTRY_MAX_SDU] = { .type = NLA_U32 }, + [TCA_TAPRIO_TC_ENTRY_FP] = NLA_POLICY_RANGE(NLA_U32, + TC_FP_EXPRESS, + TC_FP_PREEMPTIBLE), }; static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { @@ -1520,22 +1525,31 @@ static int taprio_enable_offload(struct net_device *dev, return -ENOMEM; } offload->enable = 1; + offload->extack = extack; mqprio_qopt_reconstruct(dev, &offload->mqprio.qopt); + offload->mqprio.extack = extack; taprio_sched_to_offload(dev, sched, offload, &caps); + mqprio_fp_to_offload(q->fp, &offload->mqprio); for (tc = 0; tc < TC_MAX_QUEUE; tc++) offload->max_sdu[tc] = q->max_sdu[tc]; err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); if (err < 0) { - NL_SET_ERR_MSG(extack, - "Device failed to setup taprio offload"); + NL_SET_ERR_MSG_WEAK(extack, + "Device failed to setup taprio offload"); goto done; } q->offloaded = true; done: + /* The offload structure may linger around via a reference taken by the + * device driver, so clear up the netlink extack pointer so that the + * driver isn't tempted to dereference data which stopped being valid + */ + offload->extack = NULL; + offload->mqprio.extack = NULL; taprio_offload_free(offload); return err; @@ -1663,13 +1677,14 @@ out: static int taprio_parse_tc_entry(struct Qdisc *sch, struct nlattr *opt, u32 max_sdu[TC_QOPT_MAX_QUEUE], + u32 fp[TC_QOPT_MAX_QUEUE], unsigned long *seen_tcs, struct netlink_ext_ack *extack) { struct nlattr *tb[TCA_TAPRIO_TC_ENTRY_MAX + 1] = { }; struct net_device *dev = qdisc_dev(sch); - u32 val = 0; int err, tc; + u32 val; err = nla_parse_nested(tb, TCA_TAPRIO_TC_ENTRY_MAX, opt, taprio_tc_policy, extack); @@ -1694,15 +1709,18 @@ static int taprio_parse_tc_entry(struct Qdisc *sch, *seen_tcs |= BIT(tc); - if (tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]) + if (tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]) { val = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_MAX_SDU]); + if (val > dev->max_mtu) { + NL_SET_ERR_MSG_MOD(extack, "TC max SDU exceeds device max MTU"); + return -ERANGE; + } - if (val > dev->max_mtu) { - NL_SET_ERR_MSG_MOD(extack, "TC max SDU exceeds device max MTU"); - return -ERANGE; + max_sdu[tc] = val; } - max_sdu[tc] = val; + if (tb[TCA_TAPRIO_TC_ENTRY_FP]) + fp[tc] = nla_get_u32(tb[TCA_TAPRIO_TC_ENTRY_FP]); return 0; } @@ -1712,29 +1730,51 @@ static int taprio_parse_tc_entries(struct Qdisc *sch, struct netlink_ext_ack *extack) { struct taprio_sched *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); u32 max_sdu[TC_QOPT_MAX_QUEUE]; + bool have_preemption = false; unsigned long seen_tcs = 0; + u32 fp[TC_QOPT_MAX_QUEUE]; struct nlattr *n; int tc, rem; int err = 0; - for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) + for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { max_sdu[tc] = q->max_sdu[tc]; + fp[tc] = q->fp[tc]; + } nla_for_each_nested(n, opt, rem) { if (nla_type(n) != TCA_TAPRIO_ATTR_TC_ENTRY) continue; - err = taprio_parse_tc_entry(sch, n, max_sdu, &seen_tcs, + err = taprio_parse_tc_entry(sch, n, max_sdu, fp, &seen_tcs, extack); if (err) - goto out; + return err; } - for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) + for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { q->max_sdu[tc] = max_sdu[tc]; + q->fp[tc] = fp[tc]; + if (fp[tc] != TC_FP_EXPRESS) + have_preemption = true; + } + + if (have_preemption) { + if (!FULL_OFFLOAD_IS_ENABLED(q->flags)) { + NL_SET_ERR_MSG(extack, + "Preemption only supported with full offload"); + return -EOPNOTSUPP; + } + + if (!ethtool_dev_mm_supported(dev)) { + NL_SET_ERR_MSG(extack, + "Device does not support preemption"); + return -EOPNOTSUPP; + } + } -out: return err; } @@ -2015,7 +2055,7 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt, { struct taprio_sched *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); - int i; + int i, tc; spin_lock_init(&q->current_entry_lock); @@ -2072,6 +2112,9 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt, q->qdiscs[i] = qdisc; } + for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) + q->fp[tc] = TC_FP_EXPRESS; + taprio_detect_broken_mqprio(q); return taprio_change(sch, opt, extack); @@ -2215,6 +2258,7 @@ error_nest: } static int taprio_dump_tc_entries(struct sk_buff *skb, + struct taprio_sched *q, struct sched_gate_list *sched) { struct nlattr *n; @@ -2232,6 +2276,9 @@ static int taprio_dump_tc_entries(struct sk_buff *skb, sched->max_sdu[tc])) goto nla_put_failure; + if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_FP, q->fp[tc])) + goto nla_put_failure; + nla_nest_end(skb, n); } @@ -2273,7 +2320,7 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb) nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay)) goto options_error; - if (oper && taprio_dump_tc_entries(skb, oper)) + if (oper && taprio_dump_tc_entries(skb, q, oper)) goto options_error; if (oper && dump_schedule(skb, oper)) |