diff options
Diffstat (limited to 'drivers/net/ethernet/intel/idpf/idpf_ethtool.c')
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf_ethtool.c | 254 |
1 files changed, 147 insertions, 107 deletions
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c index 986d429d1175..9bdb309b668e 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -2,6 +2,7 @@ /* Copyright (C) 2023 Intel Corporation */ #include "idpf.h" +#include "idpf_ptp.h" /** * idpf_get_rxnfc - command to get RX flow classification rules @@ -222,14 +223,19 @@ static int idpf_set_channels(struct net_device *netdev, struct ethtool_channels *ch) { struct idpf_vport_config *vport_config; - u16 combined, num_txq, num_rxq; unsigned int num_req_tx_q; unsigned int num_req_rx_q; struct idpf_vport *vport; + u16 num_txq, num_rxq; struct device *dev; int err = 0; u16 idx; + if (ch->rx_count && ch->tx_count) { + netdev_err(netdev, "Dedicated RX or TX channels cannot be used simultaneously\n"); + return -EINVAL; + } + idpf_vport_ctrl_lock(netdev); vport = idpf_netdev_to_vport(netdev); @@ -239,20 +245,6 @@ static int idpf_set_channels(struct net_device *netdev, num_txq = vport_config->user_config.num_req_tx_qs; num_rxq = vport_config->user_config.num_req_rx_qs; - combined = min(num_txq, num_rxq); - - /* these checks are for cases where user didn't specify a particular - * value on cmd line but we get non-zero value anyway via - * get_channels(); look at ethtool.c in ethtool repository (the user - * space part), particularly, do_schannels() routine - */ - if (ch->combined_count == combined) - ch->combined_count = 0; - if (ch->combined_count && ch->rx_count == num_rxq - combined) - ch->rx_count = 0; - if (ch->combined_count && ch->tx_count == num_txq - combined) - ch->tx_count = 0; - num_req_tx_q = ch->combined_count + ch->tx_count; num_req_rx_q = ch->combined_count + ch->rx_count; @@ -376,7 +368,8 @@ static int idpf_set_ringparam(struct net_device *netdev, new_tx_count); if (new_tx_count == vport->txq_desc_count && - new_rx_count == vport->rxq_desc_count) + new_rx_count == vport->rxq_desc_count && + kring->tcp_data_split == idpf_vport_get_hsplit(vport)) goto unlock_mutex; if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) { @@ -445,22 +438,24 @@ struct idpf_stats { .stat_offset = offsetof(_type, _stat) \ } -/* Helper macro for defining some statistics related to queues */ -#define IDPF_QUEUE_STAT(_name, _stat) \ - IDPF_STAT(struct idpf_queue, _name, _stat) +/* Helper macros for defining some statistics related to queues */ +#define IDPF_RX_QUEUE_STAT(_name, _stat) \ + IDPF_STAT(struct idpf_rx_queue, _name, _stat) +#define IDPF_TX_QUEUE_STAT(_name, _stat) \ + IDPF_STAT(struct idpf_tx_queue, _name, _stat) /* Stats associated with a Tx queue */ static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = { - IDPF_QUEUE_STAT("pkts", q_stats.tx.packets), - IDPF_QUEUE_STAT("bytes", q_stats.tx.bytes), - IDPF_QUEUE_STAT("lso_pkts", q_stats.tx.lso_pkts), + IDPF_TX_QUEUE_STAT("pkts", q_stats.packets), + IDPF_TX_QUEUE_STAT("bytes", q_stats.bytes), + IDPF_TX_QUEUE_STAT("lso_pkts", q_stats.lso_pkts), }; /* Stats associated with an Rx queue */ static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = { - IDPF_QUEUE_STAT("pkts", q_stats.rx.packets), - IDPF_QUEUE_STAT("bytes", q_stats.rx.bytes), - IDPF_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rx.rsc_pkts), + IDPF_RX_QUEUE_STAT("pkts", q_stats.packets), + IDPF_RX_QUEUE_STAT("bytes", q_stats.bytes), + IDPF_RX_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rsc_pkts), }; #define IDPF_TX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_tx_queue_stats) @@ -571,8 +566,6 @@ static void idpf_get_stat_strings(struct net_device *netdev, u8 *data) for (i = 0; i < vport_config->max_q.max_rxq; i++) idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats, "rx", i); - - page_pool_ethtool_stats_get_strings(data); } /** @@ -606,7 +599,6 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset) struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_vport_config *vport_config; u16 max_txq, max_rxq; - unsigned int size; if (sset != ETH_SS_STATS) return -EINVAL; @@ -625,11 +617,8 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset) max_txq = vport_config->max_q.max_txq; max_rxq = vport_config->max_q.max_rxq; - size = IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) + + return IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) + (IDPF_RX_QUEUE_STATS_LEN * max_rxq); - size += page_pool_ethtool_stats_get_count(); - - return size; } /** @@ -641,7 +630,7 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset) * Copies the stat data defined by the pointer and stat structure pair into * the memory supplied as data. If the pointer is null, data will be zero'd. */ -static void idpf_add_one_ethtool_stat(u64 *data, void *pstat, +static void idpf_add_one_ethtool_stat(u64 *data, const void *pstat, const struct idpf_stats *stat) { char *p; @@ -679,6 +668,7 @@ static void idpf_add_one_ethtool_stat(u64 *data, void *pstat, * idpf_add_queue_stats - copy queue statistics into supplied buffer * @data: ethtool stats buffer * @q: the queue to copy + * @type: type of the queue * * Queue statistics must be copied while protected by u64_stats_fetch_begin, * so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats @@ -689,19 +679,23 @@ static void idpf_add_one_ethtool_stat(u64 *data, void *pstat, * * This function expects to be called while under rcu_read_lock(). */ -static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q) +static void idpf_add_queue_stats(u64 **data, const void *q, + enum virtchnl2_queue_type type) { + const struct u64_stats_sync *stats_sync; const struct idpf_stats *stats; unsigned int start; unsigned int size; unsigned int i; - if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) { + if (type == VIRTCHNL2_QUEUE_TYPE_RX) { size = IDPF_RX_QUEUE_STATS_LEN; stats = idpf_gstrings_rx_queue_stats; + stats_sync = &((const struct idpf_rx_queue *)q)->stats_sync; } else { size = IDPF_TX_QUEUE_STATS_LEN; stats = idpf_gstrings_tx_queue_stats; + stats_sync = &((const struct idpf_tx_queue *)q)->stats_sync; } /* To avoid invalid statistics values, ensure that we keep retrying @@ -709,10 +703,10 @@ static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q) * u64_stats_fetch_retry. */ do { - start = u64_stats_fetch_begin(&q->stats_sync); + start = u64_stats_fetch_begin(stats_sync); for (i = 0; i < size; i++) idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]); - } while (u64_stats_fetch_retry(&q->stats_sync, start)); + } while (u64_stats_fetch_retry(stats_sync, start)); /* Once we successfully copy the stats in, update the data pointer */ *data += size; @@ -801,7 +795,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) for (j = 0; j < num_rxq; j++) { u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs; struct idpf_rx_queue_stats *stats; - struct idpf_queue *rxq; + struct idpf_rx_queue *rxq; unsigned int start; if (idpf_is_queue_model_split(vport->rxq_model)) @@ -815,7 +809,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) do { start = u64_stats_fetch_begin(&rxq->stats_sync); - stats = &rxq->q_stats.rx; + stats = &rxq->q_stats; hw_csum_err = u64_stats_read(&stats->hw_csum_err); hsplit = u64_stats_read(&stats->hsplit_pkts); hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf); @@ -836,7 +830,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) for (j = 0; j < txq_grp->num_txq; j++) { u64 linearize, qbusy, skb_drops, dma_map_errs; - struct idpf_queue *txq = txq_grp->txqs[j]; + struct idpf_tx_queue *txq = txq_grp->txqs[j]; struct idpf_tx_queue_stats *stats; unsigned int start; @@ -846,7 +840,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) do { start = u64_stats_fetch_begin(&txq->stats_sync); - stats = &txq->q_stats.tx; + stats = &txq->q_stats; linearize = u64_stats_read(&stats->linearize); qbusy = u64_stats_read(&stats->q_busy); skb_drops = u64_stats_read(&stats->skb_drops); @@ -877,7 +871,6 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, { struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_vport_config *vport_config; - struct page_pool_stats pp_stats = { }; struct idpf_vport *vport; unsigned int total = 0; unsigned int i, j; @@ -904,12 +897,12 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, qtype = VIRTCHNL2_QUEUE_TYPE_TX; for (j = 0; j < txq_grp->num_txq; j++, total++) { - struct idpf_queue *txq = txq_grp->txqs[j]; + struct idpf_tx_queue *txq = txq_grp->txqs[j]; if (!txq) idpf_add_empty_queue_stats(&data, qtype); else - idpf_add_queue_stats(&data, txq); + idpf_add_queue_stats(&data, txq, qtype); } } @@ -937,7 +930,7 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, num_rxq = rxq_grp->singleq.num_rxq; for (j = 0; j < num_rxq; j++, total++) { - struct idpf_queue *rxq; + struct idpf_rx_queue *rxq; if (is_splitq) rxq = &rxq_grp->splitq.rxq_sets[j]->rxq; @@ -946,93 +939,77 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, if (!rxq) idpf_add_empty_queue_stats(&data, qtype); else - idpf_add_queue_stats(&data, rxq); - - /* In splitq mode, don't get page pool stats here since - * the pools are attached to the buffer queues - */ - if (is_splitq) - continue; - - if (rxq) - page_pool_get_stats(rxq->pp, &pp_stats); - } - } - - for (i = 0; i < vport->num_rxq_grp; i++) { - for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { - struct idpf_queue *rxbufq = - &vport->rxq_grps[i].splitq.bufq_sets[j].bufq; - - page_pool_get_stats(rxbufq->pp, &pp_stats); + idpf_add_queue_stats(&data, rxq, qtype); } } for (; total < vport_config->max_q.max_rxq; total++) idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX); - page_pool_ethtool_stats_get(data, &pp_stats); - rcu_read_unlock(); idpf_vport_ctrl_unlock(netdev); } /** - * idpf_find_rxq - find rxq from q index + * idpf_find_rxq_vec - find rxq vector from q index * @vport: virtual port associated to queue * @q_num: q index used to find queue * - * returns pointer to rx queue + * returns pointer to rx vector */ -static struct idpf_queue *idpf_find_rxq(struct idpf_vport *vport, int q_num) +static struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport, + int q_num) { int q_grp, q_idx; if (!idpf_is_queue_model_split(vport->rxq_model)) - return vport->rxq_grps->singleq.rxqs[q_num]; + return vport->rxq_grps->singleq.rxqs[q_num]->q_vector; q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; - return &vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq; + return vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector; } /** - * idpf_find_txq - find txq from q index + * idpf_find_txq_vec - find txq vector from q index * @vport: virtual port associated to queue * @q_num: q index used to find queue * - * returns pointer to tx queue + * returns pointer to tx vector */ -static struct idpf_queue *idpf_find_txq(struct idpf_vport *vport, int q_num) +static struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport, + int q_num) { int q_grp; if (!idpf_is_queue_model_split(vport->txq_model)) - return vport->txqs[q_num]; + return vport->txqs[q_num]->q_vector; q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP; - return vport->txq_grps[q_grp].complq; + return vport->txq_grps[q_grp].complq->q_vector; } /** * __idpf_get_q_coalesce - get ITR values for specific queue * @ec: ethtool structure to fill with driver's coalesce settings - * @q: quuee of Rx or Tx + * @q_vector: queue vector corresponding to this queue + * @type: queue type */ static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec, - struct idpf_queue *q) + const struct idpf_q_vector *q_vector, + enum virtchnl2_queue_type type) { - if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) { + if (type == VIRTCHNL2_QUEUE_TYPE_RX) { ec->use_adaptive_rx_coalesce = - IDPF_ITR_IS_DYNAMIC(q->q_vector->rx_intr_mode); - ec->rx_coalesce_usecs = q->q_vector->rx_itr_value; + IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode); + ec->rx_coalesce_usecs = q_vector->rx_itr_value; } else { ec->use_adaptive_tx_coalesce = - IDPF_ITR_IS_DYNAMIC(q->q_vector->tx_intr_mode); - ec->tx_coalesce_usecs = q->q_vector->tx_itr_value; + IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode); + ec->tx_coalesce_usecs = q_vector->tx_itr_value; } } @@ -1048,8 +1025,8 @@ static int idpf_get_q_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, u32 q_num) { - struct idpf_netdev_priv *np = netdev_priv(netdev); - struct idpf_vport *vport; + const struct idpf_netdev_priv *np = netdev_priv(netdev); + const struct idpf_vport *vport; int err = 0; idpf_vport_ctrl_lock(netdev); @@ -1064,10 +1041,12 @@ static int idpf_get_q_coalesce(struct net_device *netdev, } if (q_num < vport->num_rxq) - __idpf_get_q_coalesce(ec, idpf_find_rxq(vport, q_num)); + __idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num), + VIRTCHNL2_QUEUE_TYPE_RX); if (q_num < vport->num_txq) - __idpf_get_q_coalesce(ec, idpf_find_txq(vport, q_num)); + __idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num), + VIRTCHNL2_QUEUE_TYPE_TX); unlock_mutex: idpf_vport_ctrl_unlock(netdev); @@ -1111,16 +1090,15 @@ static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num, /** * __idpf_set_q_coalesce - set ITR values for specific queue * @ec: ethtool structure from user to update ITR settings - * @q: queue for which itr values has to be set + * @qv: queue vector for which itr values has to be set * @is_rxq: is queue type rx * * Returns 0 on success, negative otherwise. */ -static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, - struct idpf_queue *q, bool is_rxq) +static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec, + struct idpf_q_vector *qv, bool is_rxq) { u32 use_adaptive_coalesce, coalesce_usecs; - struct idpf_q_vector *qv = q->q_vector; bool is_dim_ena = false; u16 itr_val; @@ -1136,7 +1114,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, itr_val = qv->tx_itr_value; } if (coalesce_usecs != itr_val && use_adaptive_coalesce) { - netdev_err(q->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n"); + netdev_err(qv->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n"); return -EINVAL; } @@ -1145,7 +1123,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, return 0; if (coalesce_usecs > IDPF_ITR_MAX) { - netdev_err(q->vport->netdev, + netdev_err(qv->vport->netdev, "Invalid value, %d-usecs range is 0-%d\n", coalesce_usecs, IDPF_ITR_MAX); @@ -1154,7 +1132,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, if (coalesce_usecs % 2) { coalesce_usecs--; - netdev_info(q->vport->netdev, + netdev_info(qv->vport->netdev, "HW only supports even ITR values, ITR rounded to %d\n", coalesce_usecs); } @@ -1193,15 +1171,16 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, * * Return 0 on success, and negative on failure */ -static int idpf_set_q_coalesce(struct idpf_vport *vport, - struct ethtool_coalesce *ec, +static int idpf_set_q_coalesce(const struct idpf_vport *vport, + const struct ethtool_coalesce *ec, int q_num, bool is_rxq) { - struct idpf_queue *q; + struct idpf_q_vector *qv; - q = is_rxq ? idpf_find_rxq(vport, q_num) : idpf_find_txq(vport, q_num); + qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) : + idpf_find_txq_vec(vport, q_num); - if (q && __idpf_set_q_coalesce(ec, q, is_rxq)) + if (qv && __idpf_set_q_coalesce(ec, qv, is_rxq)) return -EINVAL; return 0; @@ -1318,27 +1297,87 @@ static void idpf_set_msglevel(struct net_device *netdev, u32 data) static int idpf_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { - struct idpf_vport *vport; - - idpf_vport_ctrl_lock(netdev); - vport = idpf_netdev_to_vport(netdev); + struct idpf_netdev_priv *np = netdev_priv(netdev); ethtool_link_ksettings_zero_link_mode(cmd, supported); cmd->base.autoneg = AUTONEG_DISABLE; cmd->base.port = PORT_NONE; - if (vport->link_up) { + if (netif_carrier_ok(netdev)) { cmd->base.duplex = DUPLEX_FULL; - cmd->base.speed = vport->link_speed_mbps; + cmd->base.speed = np->link_speed_mbps; } else { cmd->base.duplex = DUPLEX_UNKNOWN; cmd->base.speed = SPEED_UNKNOWN; } - idpf_vport_ctrl_unlock(netdev); - return 0; } +/** + * idpf_get_timestamp_filters - Get the supported timestamping mode + * @vport: Virtual port structure + * @info: ethtool timestamping info structure + * + * Get the Tx/Rx timestamp filters. + */ +static void idpf_get_timestamp_filters(const struct idpf_vport *vport, + struct kernel_ethtool_ts_info *info) +{ + info->so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = BIT(HWTSTAMP_TX_OFF); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); + + if (!vport->tx_tstamp_caps || + vport->adapter->ptp->tx_tstamp_access == IDPF_PTP_NONE) + return; + + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE; + + info->tx_types |= BIT(HWTSTAMP_TX_ON); +} + +/** + * idpf_get_ts_info - Get device PHC association + * @netdev: network interface device structure + * @info: ethtool timestamping info structure + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_get_ts_info(struct net_device *netdev, + struct kernel_ethtool_ts_info *info) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport *vport; + int err = 0; + + if (!mutex_trylock(&np->adapter->vport_ctrl_lock)) + return -EBUSY; + + vport = idpf_netdev_to_vport(netdev); + + if (!vport->adapter->ptp) { + err = -EOPNOTSUPP; + goto unlock; + } + + if (idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP) && + vport->adapter->ptp->clock) { + info->phc_index = ptp_clock_index(vport->adapter->ptp->clock); + idpf_get_timestamp_filters(vport, info); + } else { + pci_dbg(vport->adapter->pdev, "PTP clock not detected\n"); + err = ethtool_op_get_ts_info(netdev, info); + } + +unlock: + mutex_unlock(&np->adapter->vport_ctrl_lock); + + return err; +} + static const struct ethtool_ops idpf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE, @@ -1363,6 +1402,7 @@ static const struct ethtool_ops idpf_ethtool_ops = { .get_ringparam = idpf_get_ringparam, .set_ringparam = idpf_set_ringparam, .get_link_ksettings = idpf_get_link_ksettings, + .get_ts_info = idpf_get_ts_info, }; /** |