diff options
Diffstat (limited to 'drivers/net/ethernet/google/gve/gve_tx_dqo.c')
-rw-r--r-- | drivers/net/ethernet/google/gve/gve_tx_dqo.c | 109 |
1 files changed, 56 insertions, 53 deletions
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c index bc34b6cd3a3e..a27f1574a733 100644 --- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c @@ -209,6 +209,7 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, struct device *hdev = &priv->pdev->dev; int idx = tx->q_num; size_t bytes; + u32 qpl_id; if (tx->q_resources) { dma_free_coherent(hdev, sizeof(*tx->q_resources), @@ -237,7 +238,8 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, tx->dqo.tx_qpl_buf_next = NULL; if (tx->dqo.qpl) { - gve_unassign_qpl(cfg->qpl_cfg, tx->dqo.qpl->id); + qpl_id = gve_tx_qpl_id(priv, tx->q_num); + gve_free_queue_page_list(priv, tx->dqo.qpl, qpl_id); tx->dqo.qpl = NULL; } @@ -285,7 +287,9 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, { struct device *hdev = &priv->pdev->dev; int num_pending_packets; + int qpl_page_cnt; size_t bytes; + u32 qpl_id; int i; memset(tx, 0, sizeof(*tx)); @@ -295,9 +299,7 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, /* Queue sizes must be a power of 2 */ tx->mask = cfg->ring_size - 1; - tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ? - priv->options_dqo_rda.tx_comp_ring_entries - 1 : - tx->mask; + tx->dqo.complq_mask = tx->mask; /* The max number of pending packets determines the maximum number of * descriptors which maybe written to the completion queue. @@ -354,7 +356,11 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, goto err; if (!cfg->raw_addressing) { - tx->dqo.qpl = gve_assign_tx_qpl(cfg, idx); + qpl_id = gve_tx_qpl_id(priv, tx->q_num); + qpl_page_cnt = priv->tx_pages_per_qpl; + + tx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id, + qpl_page_cnt); if (!tx->dqo.qpl) goto err; @@ -373,33 +379,23 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *cfg) { struct gve_tx_ring *tx = cfg->tx; + int total_queues; int err = 0; int i, j; - if (!cfg->raw_addressing && !cfg->qpls) { - netif_err(priv, drv, priv->dev, - "Cannot alloc QPL ring before allocing QPLs\n"); - return -EINVAL; - } - - if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) { + total_queues = cfg->qcfg->num_queues + cfg->num_xdp_rings; + if (total_queues > cfg->qcfg->max_queues) { netif_err(priv, drv, priv->dev, "Cannot alloc more than the max num of Tx rings\n"); return -EINVAL; } - if (cfg->start_idx == 0) { - tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring), - GFP_KERNEL); - if (!tx) - return -ENOMEM; - } else if (!tx) { - netif_err(priv, drv, priv->dev, - "Cannot alloc tx rings from a nonzero start idx without tx array\n"); - return -EINVAL; - } + tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring), + GFP_KERNEL); + if (!tx) + return -ENOMEM; - for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) { + for (i = 0; i < total_queues; i++) { err = gve_tx_alloc_ring_dqo(priv, cfg, &tx[i], i); if (err) { netif_err(priv, drv, priv->dev, @@ -415,8 +411,7 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv, err: for (j = 0; j < i; j++) gve_tx_free_ring_dqo(priv, &tx[j], cfg); - if (cfg->start_idx == 0) - kvfree(tx); + kvfree(tx); return err; } @@ -429,13 +424,11 @@ void gve_tx_free_rings_dqo(struct gve_priv *priv, if (!tx) return; - for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) + for (i = 0; i < cfg->qcfg->num_queues + cfg->qcfg->num_xdp_queues; i++) gve_tx_free_ring_dqo(priv, &tx[i], cfg); - if (cfg->start_idx == 0) { - kvfree(tx); - cfg->tx = NULL; - } + kvfree(tx); + cfg->tx = NULL; } /* Returns the number of slots available in the ring */ @@ -555,28 +548,18 @@ static int gve_prep_tso(struct sk_buff *skb) if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO)) return -1; + if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) + return -EINVAL; + /* Needed because we will modify header. */ err = skb_cow_head(skb, 0); if (err < 0) return err; tcp = tcp_hdr(skb); - - /* Remove payload length from checksum. */ paylen = skb->len - skb_transport_offset(skb); - - switch (skb_shinfo(skb)->gso_type) { - case SKB_GSO_TCPV4: - case SKB_GSO_TCPV6: - csum_replace_by_diff(&tcp->check, - (__force __wsum)htonl(paylen)); - - /* Compute length of segmentation header. */ - header_len = skb_tcp_all_headers(skb); - break; - default: - return -EINVAL; - } + csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen)); + header_len = skb_tcp_all_headers(skb); if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO)) return -EINVAL; @@ -677,7 +660,8 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx, goto err; dma_unmap_len_set(pkt, len[pkt->num_bufs], len); - dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr); + netmem_dma_unmap_addr_set(skb_frag_netmem(frag), pkt, + dma[pkt->num_bufs], addr); ++pkt->num_bufs; gve_tx_fill_pkt_desc_dqo(tx, desc_idx, skb, len, addr, @@ -876,22 +860,42 @@ static bool gve_can_send_tso(const struct sk_buff *skb) const int header_len = skb_tcp_all_headers(skb); const int gso_size = shinfo->gso_size; int cur_seg_num_bufs; + int prev_frag_size; int cur_seg_size; int i; cur_seg_size = skb_headlen(skb) - header_len; + prev_frag_size = skb_headlen(skb); cur_seg_num_bufs = cur_seg_size > 0; for (i = 0; i < shinfo->nr_frags; i++) { if (cur_seg_size >= gso_size) { cur_seg_size %= gso_size; cur_seg_num_bufs = cur_seg_size > 0; + + if (prev_frag_size > GVE_TX_MAX_BUF_SIZE_DQO) { + int prev_frag_remain = prev_frag_size % + GVE_TX_MAX_BUF_SIZE_DQO; + + /* If the last descriptor of the previous frag + * is less than cur_seg_size, the segment will + * span two descriptors in the previous frag. + * Since max gso size (9728) is less than + * GVE_TX_MAX_BUF_SIZE_DQO, it is impossible + * for the segment to span more than two + * descriptors. + */ + if (prev_frag_remain && + cur_seg_size > prev_frag_remain) + cur_seg_num_bufs++; + } } if (unlikely(++cur_seg_num_bufs > max_bufs_per_seg)) return false; - cur_seg_size += skb_frag_size(&shinfo->frags[i]); + prev_frag_size = skb_frag_size(&shinfo->frags[i]); + cur_seg_size += prev_frag_size; } return true; @@ -1035,8 +1039,9 @@ static void gve_unmap_packet(struct device *dev, dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]), dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE); for (i = 1; i < pkt->num_bufs; i++) { - dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]), - dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE); + netmem_dma_unmap_page_attrs(dev, dma_unmap_addr(pkt, dma[i]), + dma_unmap_len(pkt, len[i]), + DMA_TO_DEVICE, 0); } pkt->num_bufs = 0; } @@ -1136,8 +1141,7 @@ static void gve_handle_miss_completion(struct gve_priv *priv, /* jiffies can wraparound but time comparisons can handle overflows. */ pending_packet->timeout_jiffies = jiffies + - msecs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT * - MSEC_PER_SEC); + secs_to_jiffies(GVE_REINJECT_COMPL_TIMEOUT); add_to_list(tx, &tx->dqo_compl.miss_completions, pending_packet); *bytes += pending_packet->skb->len; @@ -1181,8 +1185,7 @@ static void remove_miss_completions(struct gve_priv *priv, pending_packet->state = GVE_PACKET_STATE_TIMED_OUT_COMPL; pending_packet->timeout_jiffies = jiffies + - msecs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT * - MSEC_PER_SEC); + secs_to_jiffies(GVE_DEALLOCATE_COMPL_TIMEOUT); /* Maintain pending packet in another list so the packet can be * unallocated at a later time. */ |