summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/google/gve/gve_adminq.c
diff options
context:
space:
mode:
authorCatherine Sullivan <csully@google.com>2020-12-07 14:45:23 -0800
committerDavid S. Miller <davem@davemloft.net>2020-12-08 16:06:28 -0800
commit4944db80ac1133fe4acb0c9756758da088338b26 (patch)
tree5aa0f3ab319fc725f969d6fe5df14d332b2f9f8e /drivers/net/ethernet/google/gve/gve_adminq.c
parent8354bcbebd26325c9efeb6682a3e14fbbc8ace95 (diff)
gve: Add support for raw addressing device option
Add support to describe device for parsing device options. As the first device option, add raw addressing. "Raw Addressing" mode (as opposed to the current "qpl" mode) is an operational mode which allows the driver avoid bounce buffer copies which it currently performs using pre-allocated qpls (queue_page_lists) when sending and receiving packets. For egress packets, the provided skb data addresses will be dma_map'ed and passed to the device, allowing the NIC can perform DMA directly - the driver will not have to copy the buffer content into pre-allocated buffers/qpls (as in qpl mode). For ingress packets, copies are also eliminated as buffers are handed to the networking stack and then recycled or re-allocated as necessary, avoiding the use of skb_copy_to_linear_data(). This patch only introduces the option to the driver. Subsequent patches will add the ingress and egress functionality. Reviewed-by: Yangchun Fu <yangchun@google.com> Signed-off-by: Catherine Sullivan <csully@google.com> Signed-off-by: David Awogbemila <awogbemila@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/google/gve/gve_adminq.c')
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c71
1 files changed, 71 insertions, 0 deletions
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 24ae6a28a806..608a8806f8c8 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -14,6 +14,57 @@
#define GVE_ADMINQ_SLEEP_LEN 20
#define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 100
+#define GVE_DEVICE_OPTION_ERROR_FMT "%s option error:\n" \
+"Expected: length=%d, feature_mask=%x.\n" \
+"Actual: length=%d, feature_mask=%x.\n"
+
+static
+struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor,
+ struct gve_device_option *option)
+{
+ void *option_end, *descriptor_end;
+
+ option_end = (void *)(option + 1) + be16_to_cpu(option->option_length);
+ descriptor_end = (void *)descriptor + be16_to_cpu(descriptor->total_length);
+
+ return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end;
+}
+
+static
+void gve_parse_device_option(struct gve_priv *priv,
+ struct gve_device_descriptor *device_descriptor,
+ struct gve_device_option *option)
+{
+ u16 option_length = be16_to_cpu(option->option_length);
+ u16 option_id = be16_to_cpu(option->option_id);
+
+ switch (option_id) {
+ case GVE_DEV_OPT_ID_RAW_ADDRESSING:
+ /* If the length or feature mask doesn't match,
+ * continue without enabling the feature.
+ */
+ if (option_length != GVE_DEV_OPT_LEN_RAW_ADDRESSING ||
+ option->feat_mask != cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING)) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, "Raw Addressing",
+ GVE_DEV_OPT_LEN_RAW_ADDRESSING,
+ cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING),
+ option_length, option->feat_mask);
+ priv->raw_addressing = 0;
+ } else {
+ dev_info(&priv->pdev->dev,
+ "Raw addressing device option enabled.\n");
+ priv->raw_addressing = 1;
+ }
+ break;
+ default:
+ /* If we don't recognize the option just continue
+ * without doing anything.
+ */
+ dev_dbg(&priv->pdev->dev, "Unrecognized device option 0x%hx not enabled.\n",
+ option_id);
+ }
+}
+
int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
{
priv->adminq = dma_alloc_coherent(dev, PAGE_SIZE,
@@ -460,11 +511,14 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
int gve_adminq_describe_device(struct gve_priv *priv)
{
struct gve_device_descriptor *descriptor;
+ struct gve_device_option *dev_opt;
union gve_adminq_command cmd;
dma_addr_t descriptor_bus;
+ u16 num_options;
int err = 0;
u8 *mac;
u16 mtu;
+ int i;
memset(&cmd, 0, sizeof(cmd));
descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE,
@@ -518,6 +572,23 @@ int gve_adminq_describe_device(struct gve_priv *priv)
priv->rx_desc_cnt = priv->rx_pages_per_qpl;
}
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
+ dev_opt = (void *)(descriptor + 1);
+
+ num_options = be16_to_cpu(descriptor->num_device_options);
+ for (i = 0; i < num_options; i++) {
+ struct gve_device_option *next_opt;
+
+ next_opt = gve_get_next_option(descriptor, dev_opt);
+ if (!next_opt) {
+ dev_err(&priv->dev->dev,
+ "options exceed device_descriptor's total length.\n");
+ err = -EINVAL;
+ goto free_device_descriptor;
+ }
+
+ gve_parse_device_option(priv, descriptor, dev_opt);
+ dev_opt = next_opt;
+ }
free_device_descriptor:
dma_free_coherent(&priv->pdev->dev, sizeof(*descriptor), descriptor,