diff options
Diffstat (limited to 'drivers/net/ethernet/freescale/dpaa')
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 184 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa/dpaa_eth.h | 20 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c | 52 |
5 files changed, 154 insertions, 108 deletions
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index dcbc598b11c6..23c23cca2620 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -229,7 +229,7 @@ static int dpaa_netdev_init(struct net_device *net_dev, net_dev->max_mtu = dpaa_get_max_mtu(); net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_LLTX | NETIF_F_RXHASH); + NETIF_F_RXHASH); net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA; /* The kernels enables GSO automatically, if we declare NETIF_F_SG. @@ -239,6 +239,7 @@ static int dpaa_netdev_init(struct net_device *net_dev, net_dev->features |= NETIF_F_RXCSUM; net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + net_dev->lltx = true; /* we do not want shared skbs on TX */ net_dev->priv_flags &= ~IFF_TX_SKB_SHARING; @@ -371,6 +372,7 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, void *type_data) { struct dpaa_priv *priv = netdev_priv(net_dev); + int num_txqs_per_tc = dpaa_num_txqs_per_tc(); struct tc_mqprio_qopt *mqprio = type_data; u8 num_tc; int i; @@ -398,12 +400,12 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, netdev_set_num_tc(net_dev, num_tc); for (i = 0; i < num_tc; i++) - netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM, - i * DPAA_TC_TXQ_NUM); + netdev_set_tc_queue(net_dev, i, num_txqs_per_tc, + i * num_txqs_per_tc); out: priv->num_tc = num_tc ? : 1; - netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); + netif_set_real_num_tx_queues(net_dev, priv->num_tc * num_txqs_per_tc); return 0; } @@ -461,6 +463,22 @@ static int dpaa_set_mac_address(struct net_device *net_dev, void *addr) return 0; } +static int dpaa_addr_sync(struct net_device *net_dev, const u8 *addr) +{ + const struct dpaa_priv *priv = netdev_priv(net_dev); + + return priv->mac_dev->add_hash_mac_addr(priv->mac_dev->fman_mac, + (enet_addr_t *)addr); +} + +static int dpaa_addr_unsync(struct net_device *net_dev, const u8 *addr) +{ + const struct dpaa_priv *priv = netdev_priv(net_dev); + + return priv->mac_dev->remove_hash_mac_addr(priv->mac_dev->fman_mac, + (enet_addr_t *)addr); +} + static void dpaa_set_rx_mode(struct net_device *net_dev) { const struct dpaa_priv *priv; @@ -488,9 +506,9 @@ static void dpaa_set_rx_mode(struct net_device *net_dev) err); } - err = priv->mac_dev->set_multi(net_dev, priv->mac_dev); + err = __dev_mc_sync(net_dev, dpaa_addr_sync, dpaa_addr_unsync); if (err < 0) - netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n", + netif_err(priv, drv, net_dev, "dpaa_addr_sync() = %d\n", err); } @@ -649,7 +667,7 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) fq->wq = 6; break; case FQ_TYPE_TX: - switch (idx / DPAA_TC_TXQ_NUM) { + switch (idx / dpaa_num_txqs_per_tc()) { case 0: /* Low priority (best effort) */ fq->wq = 6; @@ -667,8 +685,8 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) fq->wq = 0; break; default: - WARN(1, "Too many TX FQs: more than %d!\n", - DPAA_ETH_TXQ_NUM); + WARN(1, "Too many TX FQs: more than %zu!\n", + dpaa_max_num_txqs()); } break; default: @@ -740,7 +758,8 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list, port_fqs->rx_pcdq = &dpaa_fq[0]; - if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ)) + if (!dpaa_fq_alloc(dev, 0, dpaa_max_num_txqs(), list, + FQ_TYPE_TX_CONF_MQ)) goto fq_alloc_failed; dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR); @@ -755,7 +774,7 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list, port_fqs->tx_defq = &dpaa_fq[0]; - if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX)) + if (!dpaa_fq_alloc(dev, 0, dpaa_max_num_txqs(), list, FQ_TYPE_TX)) goto fq_alloc_failed; return 0; @@ -931,14 +950,18 @@ static inline void dpaa_setup_egress(const struct dpaa_priv *priv, } } -static void dpaa_fq_setup(struct dpaa_priv *priv, - const struct dpaa_fq_cbs *fq_cbs, - struct fman_port *tx_port) +static int dpaa_fq_setup(struct dpaa_priv *priv, + const struct dpaa_fq_cbs *fq_cbs, + struct fman_port *tx_port) { int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu; const cpumask_t *affine_cpus = qman_affine_cpus(); - u16 channels[NR_CPUS]; struct dpaa_fq *fq; + u16 *channels; + + channels = kcalloc(num_possible_cpus(), sizeof(u16), GFP_KERNEL); + if (!channels) + return -ENOMEM; for_each_cpu_and(cpu, affine_cpus, cpu_online_mask) channels[num_portals++] = qman_affine_channel(cpu); @@ -965,11 +988,7 @@ static void dpaa_fq_setup(struct dpaa_priv *priv, case FQ_TYPE_TX: dpaa_setup_egress(priv, fq, tx_port, &fq_cbs->egress_ern); - /* If we have more Tx queues than the number of cores, - * just ignore the extra ones. - */ - if (egress_cnt < DPAA_ETH_TXQ_NUM) - priv->egress_fqs[egress_cnt++] = &fq->fq_base; + priv->egress_fqs[egress_cnt++] = &fq->fq_base; break; case FQ_TYPE_TX_CONF_MQ: priv->conf_fqs[conf_cnt++] = &fq->fq_base; @@ -987,16 +1006,9 @@ static void dpaa_fq_setup(struct dpaa_priv *priv, } } - /* Make sure all CPUs receive a corresponding Tx queue. */ - while (egress_cnt < DPAA_ETH_TXQ_NUM) { - list_for_each_entry(fq, &priv->dpaa_fq_list, list) { - if (fq->fq_type != FQ_TYPE_TX) - continue; - priv->egress_fqs[egress_cnt++] = &fq->fq_base; - if (egress_cnt == DPAA_ETH_TXQ_NUM) - break; - } - } + kfree(channels); + + return 0; } static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv, @@ -1004,7 +1016,7 @@ static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv, { int i; - for (i = 0; i < DPAA_ETH_TXQ_NUM; i++) + for (i = 0; i < dpaa_max_num_txqs(); i++) if (priv->egress_fqs[i] == tx_fq) return i; @@ -1808,7 +1820,6 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, struct page *page, *head_page; struct dpaa_bp *dpaa_bp; void *vaddr, *sg_vaddr; - int frag_off, frag_len; struct sk_buff *skb; dma_addr_t sg_addr; int page_offset; @@ -1851,6 +1862,11 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, * on Tx, if extra headers are added. */ WARN_ON(fd_off != priv->rx_headroom); + /* The offset to data start within the buffer holding + * the SGT should always be equal to the offset to data + * start within the first buffer holding the frame. + */ + WARN_ON_ONCE(fd_off != qm_sg_entry_get_off(&sgt[i])); skb_reserve(skb, fd_off); skb_put(skb, qm_sg_entry_get_len(&sgt[i])); } else { @@ -1864,21 +1880,23 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, page = virt_to_page(sg_vaddr); head_page = virt_to_head_page(sg_vaddr); - /* Compute offset in (possibly tail) page */ + /* Compute offset of sg_vaddr in (possibly tail) page */ page_offset = ((unsigned long)sg_vaddr & (PAGE_SIZE - 1)) + (page_address(page) - page_address(head_page)); - /* page_offset only refers to the beginning of sgt[i]; - * but the buffer itself may have an internal offset. + + /* Non-initial SGT entries should not have a buffer + * offset. */ - frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset; - frag_len = qm_sg_entry_get_len(&sgt[i]); + WARN_ON_ONCE(qm_sg_entry_get_off(&sgt[i])); + /* skb_add_rx_frag() does no checking on the page; if * we pass it a tail page, we'll end up with - * bad page accounting and eventually with segafults. + * bad page accounting and eventually with segfaults. */ - skb_add_rx_frag(skb, i - 1, head_page, frag_off, - frag_len, dpaa_bp->size); + skb_add_rx_frag(skb, i - 1, head_page, page_offset, + qm_sg_entry_get_len(&sgt[i]), + dpaa_bp->size); } /* Update the pool count for the current {cpu x bpool} */ @@ -2263,7 +2281,7 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv, new_xdpf->len = xdpf->len; new_xdpf->headroom = priv->tx_headroom; new_xdpf->frame_sz = DPAA_BP_RAW_SIZE; - new_xdpf->mem.type = MEM_TYPE_PAGE_ORDER0; + new_xdpf->mem_type = MEM_TYPE_PAGE_ORDER0; /* Release the initial buffer */ xdp_return_frame_rx_napi(xdpf); @@ -2277,12 +2295,12 @@ static netdev_tx_t dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) { const int queue_mapping = skb_get_queue_mapping(skb); - bool nonlinear = skb_is_nonlinear(skb); struct rtnl_link_stats64 *percpu_stats; struct dpaa_percpu_priv *percpu_priv; struct netdev_queue *txq; struct dpaa_priv *priv; struct qm_fd fd; + bool nonlinear; int offset = 0; int err = 0; @@ -2292,6 +2310,13 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) qm_fd_clear_fd(&fd); + /* Packet data is always read as 32-bit words, so zero out any part of + * the skb which might be sent if we have to pad the packet + */ + if (__skb_put_padto(skb, ETH_ZLEN, false)) + goto enomem; + + nonlinear = skb_is_nonlinear(skb); if (!nonlinear) { /* We're going to store the skb backpointer at the beginning * of the data buffer, so we need a privately owned skb @@ -2747,7 +2772,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use && !fman_port_get_hash_result_offset(priv->mac_dev->port[RX], &hash_offset)) { - hash = be32_to_cpu(*(u32 *)(vaddr + hash_offset)); + hash = be32_to_cpu(*(__be32 *)(vaddr + hash_offset)); hash_valid = true; } @@ -2995,7 +3020,7 @@ static int dpaa_change_mtu(struct net_device *net_dev, int new_mtu) if (priv->xdp_prog && !xdp_validate_mtu(priv, new_mtu)) return -EINVAL; - net_dev->mtu = new_mtu; + WRITE_ONCE(net_dev->mtu, new_mtu); return 0; } @@ -3064,15 +3089,25 @@ static int dpaa_xdp_xmit(struct net_device *net_dev, int n, return nxmit; } -static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +static int dpaa_hwtstamp_get(struct net_device *dev, + struct kernel_hwtstamp_config *config) { struct dpaa_priv *priv = netdev_priv(dev); - struct hwtstamp_config config; - if (copy_from_user(&config, rq->ifr_data, sizeof(config))) - return -EFAULT; + config->tx_type = priv->tx_tstamp ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + config->rx_filter = priv->rx_tstamp ? HWTSTAMP_FILTER_ALL : + HWTSTAMP_FILTER_NONE; + + return 0; +} - switch (config.tx_type) { +static int dpaa_hwtstamp_set(struct net_device *dev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct dpaa_priv *priv = netdev_priv(dev); + + switch (config->tx_type) { case HWTSTAMP_TX_OFF: /* Couldn't disable rx/tx timestamping separately. * Do nothing here. @@ -3087,7 +3122,7 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return -ERANGE; } - if (config.rx_filter == HWTSTAMP_FILTER_NONE) { + if (config->rx_filter == HWTSTAMP_FILTER_NONE) { /* Couldn't disable rx/tx timestamping separately. * Do nothing here. */ @@ -3096,28 +3131,17 @@ static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true); priv->rx_tstamp = true; /* TS is set for all frame types, not only those requested */ - config.rx_filter = HWTSTAMP_FILTER_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; } - return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; + return 0; } static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) { - int ret = -EINVAL; struct dpaa_priv *priv = netdev_priv(net_dev); - if (cmd == SIOCGMIIREG) { - if (net_dev->phydev) - return phylink_mii_ioctl(priv->mac_dev->phylink, rq, - cmd); - } - - if (cmd == SIOCSHWTSTAMP) - return dpaa_ts_ioctl(net_dev, rq, cmd); - - return ret; + return phylink_mii_ioctl(priv->mac_dev->phylink, rq, cmd); } static const struct net_device_ops dpaa_ops = { @@ -3135,6 +3159,8 @@ static const struct net_device_ops dpaa_ops = { .ndo_change_mtu = dpaa_change_mtu, .ndo_bpf = dpaa_xdp, .ndo_xdp_xmit = dpaa_xdp_xmit, + .ndo_hwtstamp_get = dpaa_hwtstamp_get, + .ndo_hwtstamp_set = dpaa_hwtstamp_set, }; static int dpaa_napi_add(struct net_device *net_dev) @@ -3161,8 +3187,9 @@ static void dpaa_napi_del(struct net_device *net_dev) for_each_possible_cpu(cpu) { percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); - netif_napi_del(&percpu_priv->np.napi); + __netif_napi_del(&percpu_priv->np.napi); } + synchronize_net(); } static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp, @@ -3324,7 +3351,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) /* Allocate this early, so we can store relevant information in * the private area */ - net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM); + net_dev = alloc_etherdev_mq(sizeof(*priv), dpaa_max_num_txqs()); if (!net_dev) { dev_err(dev, "alloc_etherdev_mq() failed\n"); return -ENOMEM; @@ -3339,6 +3366,22 @@ static int dpaa_eth_probe(struct platform_device *pdev) priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT); + priv->egress_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(), + sizeof(*priv->egress_fqs), + GFP_KERNEL); + if (!priv->egress_fqs) { + err = -ENOMEM; + goto free_netdev; + } + + priv->conf_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(), + sizeof(*priv->conf_fqs), + GFP_KERNEL); + if (!priv->conf_fqs) { + err = -ENOMEM; + goto free_netdev; + } + mac_dev = dpaa_mac_dev_get(pdev); if (IS_ERR(mac_dev)) { netdev_err(net_dev, "dpaa_mac_dev_get() failed\n"); @@ -3416,7 +3459,9 @@ static int dpaa_eth_probe(struct platform_device *pdev) */ dpaa_eth_add_channel(priv->channel, &pdev->dev); - dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); + err = dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); + if (err) + goto free_dpaa_bps; /* Create a congestion group for this netdev, with * dynamically-allocated CGR ID. @@ -3462,7 +3507,8 @@ static int dpaa_eth_probe(struct platform_device *pdev) } priv->num_tc = 1; - netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); + netif_set_real_num_tx_queues(net_dev, + priv->num_tc * dpaa_num_txqs_per_tc()); /* Initialize NAPI */ err = dpaa_napi_add(net_dev); @@ -3548,7 +3594,7 @@ static struct platform_driver dpaa_driver = { }, .id_table = dpaa_devtype, .probe = dpaa_eth_probe, - .remove_new = dpaa_remove + .remove = dpaa_remove }; static int __init dpaa_load(void) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h index ac3c8ed57bbe..7ed659eb08de 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h @@ -18,10 +18,6 @@ /* Number of prioritised traffic classes */ #define DPAA_TC_NUM 4 -/* Number of Tx queues per traffic class */ -#define DPAA_TC_TXQ_NUM NR_CPUS -/* Total number of Tx queues */ -#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM) /* More detailed FQ types - used for fine-grained WQ assignments */ enum dpaa_fq_type { @@ -142,8 +138,8 @@ struct dpaa_priv { struct mac_device *mac_dev; struct device *rx_dma_dev; struct device *tx_dma_dev; - struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM]; - struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM]; + struct qman_fq **egress_fqs; + struct qman_fq **conf_fqs; u16 channel; struct list_head dpaa_fq_list; @@ -185,4 +181,16 @@ extern const struct ethtool_ops dpaa_ethtool_ops; /* from dpaa_eth_sysfs.c */ void dpaa_eth_sysfs_remove(struct device *dev); void dpaa_eth_sysfs_init(struct device *dev); + +static inline size_t dpaa_num_txqs_per_tc(void) +{ + return num_possible_cpus(); +} + +/* Total number of Tx queues */ +static inline size_t dpaa_max_num_txqs(void) +{ + return DPAA_TC_NUM * dpaa_num_txqs_per_tc(); +} + #endif /* __DPAA_H */ diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c index 4fee74c024bd..aad470e9caea 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c @@ -35,7 +35,6 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev, u32 last_fqid = 0; ssize_t bytes = 0; char *str; - int i = 0; list_for_each_entry_safe(fq, tmp, &priv->dpaa_fq_list, list) { switch (fq->fq_type) { @@ -85,7 +84,6 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev, prev = fq; prevstr = str; - i++; } if (prev) { diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h index 889f89df9930..9e1d44ae92cc 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h @@ -56,8 +56,8 @@ DECLARE_EVENT_CLASS(dpaa_eth_fd, __entry->fd_format = qm_fd_get_format(fd); __entry->fd_offset = qm_fd_get_offset(fd); __entry->fd_length = qm_fd_get_length(fd); - __entry->fd_status = fd->status; - __assign_str(name, netdev->name); + __entry->fd_status = __be32_to_cpu(fd->status); + __assign_str(name); ), /* This is what gets printed when the trace event is triggered */ diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 5bd0b36d1feb..9986f6e1f587 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -243,38 +243,24 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev, static void dpaa_get_strings(struct net_device *net_dev, u32 stringset, u8 *data) { - unsigned int i, j, num_cpus, size; - char string_cpu[ETH_GSTRING_LEN]; - u8 *strings; + unsigned int i, j, num_cpus; - memset(string_cpu, 0, sizeof(string_cpu)); - strings = data; - num_cpus = num_online_cpus(); - size = DPAA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN; + num_cpus = num_online_cpus(); for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) { - for (j = 0; j < num_cpus; j++) { - snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]", - dpaa_stats_percpu[i], j); - memcpy(strings, string_cpu, ETH_GSTRING_LEN); - strings += ETH_GSTRING_LEN; - } - snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]", - dpaa_stats_percpu[i]); - memcpy(strings, string_cpu, ETH_GSTRING_LEN); - strings += ETH_GSTRING_LEN; - } - for (j = 0; j < num_cpus; j++) { - snprintf(string_cpu, ETH_GSTRING_LEN, - "bpool [CPU %d]", j); - memcpy(strings, string_cpu, ETH_GSTRING_LEN); - strings += ETH_GSTRING_LEN; + for (j = 0; j < num_cpus; j++) + ethtool_sprintf(&data, "%s [CPU %d]", + dpaa_stats_percpu[i], j); + + ethtool_sprintf(&data, "%s [TOTAL]", dpaa_stats_percpu[i]); } - snprintf(string_cpu, ETH_GSTRING_LEN, "bpool [TOTAL]"); - memcpy(strings, string_cpu, ETH_GSTRING_LEN); - strings += ETH_GSTRING_LEN; + for (i = 0; i < num_cpus; i++) + ethtool_sprintf(&data, "bpool [CPU %d]", i); - memcpy(strings, dpaa_stats_global, size); + ethtool_puts(&data, "bpool [TOTAL]"); + + for (i = 0; i < DPAA_STATS_GLOBAL_LEN; i++) + ethtool_puts(&data, dpaa_stats_global[i]); } static int dpaa_get_hash_opts(struct net_device *dev, @@ -394,7 +380,7 @@ static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) } static int dpaa_get_ts_info(struct net_device *net_dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct device *dev = net_dev->dev.parent; struct device_node *mac_node = dev->of_node; @@ -457,12 +443,16 @@ static int dpaa_set_coalesce(struct net_device *dev, struct netlink_ext_ack *extack) { const cpumask_t *cpus = qman_affine_cpus(); - bool needs_revert[NR_CPUS] = {false}; struct qman_portal *portal; u32 period, prev_period; u8 thresh, prev_thresh; + bool *needs_revert; int cpu, res; + needs_revert = kcalloc(num_possible_cpus(), sizeof(bool), GFP_KERNEL); + if (!needs_revert) + return -ENOMEM; + period = c->rx_coalesce_usecs; thresh = c->rx_max_coalesced_frames; @@ -485,6 +475,8 @@ static int dpaa_set_coalesce(struct net_device *dev, needs_revert[cpu] = true; } + kfree(needs_revert); + return 0; revert_values: @@ -498,6 +490,8 @@ revert_values: qman_dqrr_set_ithresh(portal, prev_thresh); } + kfree(needs_revert); + return res; } |