diff options
Diffstat (limited to 'drivers/net/ethernet/intel/idpf/idpf_virtchnl.c')
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf_virtchnl.c | 315 |
1 files changed, 306 insertions, 9 deletions
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index 24febaaa8fbb..a028c69f7fdc 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2023 Intel Corporation */ +#include <linux/export.h> #include <net/libeth/rx.h> #include "idpf.h" @@ -849,14 +850,14 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL); caps.rss_caps = - cpu_to_le64(VIRTCHNL2_CAP_RSS_IPV4_TCP | - VIRTCHNL2_CAP_RSS_IPV4_UDP | - VIRTCHNL2_CAP_RSS_IPV4_SCTP | - VIRTCHNL2_CAP_RSS_IPV4_OTHER | - VIRTCHNL2_CAP_RSS_IPV6_TCP | - VIRTCHNL2_CAP_RSS_IPV6_UDP | - VIRTCHNL2_CAP_RSS_IPV6_SCTP | - VIRTCHNL2_CAP_RSS_IPV6_OTHER); + cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP | + VIRTCHNL2_FLOW_IPV4_UDP | + VIRTCHNL2_FLOW_IPV4_SCTP | + VIRTCHNL2_FLOW_IPV4_OTHER | + VIRTCHNL2_FLOW_IPV6_TCP | + VIRTCHNL2_FLOW_IPV6_UDP | + VIRTCHNL2_FLOW_IPV6_SCTP | + VIRTCHNL2_FLOW_IPV6_OTHER); caps.hsplit_caps = cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 | @@ -868,6 +869,8 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) caps.other_caps = cpu_to_le64(VIRTCHNL2_CAP_SRIOV | + VIRTCHNL2_CAP_RDMA | + VIRTCHNL2_CAP_LAN_MEMORY_REGIONS | VIRTCHNL2_CAP_MACFILTER | VIRTCHNL2_CAP_SPLITQ_QSCHED | VIRTCHNL2_CAP_PROMISC | @@ -891,6 +894,163 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) } /** + * idpf_send_get_lan_memory_regions - Send virtchnl get LAN memory regions msg + * @adapter: Driver specific private struct + * + * Return: 0 on success or error code on failure. + */ +static int idpf_send_get_lan_memory_regions(struct idpf_adapter *adapter) +{ + struct virtchnl2_get_lan_memory_regions *rcvd_regions __free(kfree); + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS, + .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN, + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + int num_regions, size; + struct idpf_hw *hw; + ssize_t reply_sz; + int err = 0; + + rcvd_regions = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); + if (!rcvd_regions) + return -ENOMEM; + + xn_params.recv_buf.iov_base = rcvd_regions; + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + + num_regions = le16_to_cpu(rcvd_regions->num_memory_regions); + size = struct_size(rcvd_regions, mem_reg, num_regions); + if (reply_sz < size) + return -EIO; + + if (size > IDPF_CTLQ_MAX_BUF_LEN) + return -EINVAL; + + hw = &adapter->hw; + hw->lan_regs = kcalloc(num_regions, sizeof(*hw->lan_regs), GFP_KERNEL); + if (!hw->lan_regs) + return -ENOMEM; + + for (int i = 0; i < num_regions; i++) { + hw->lan_regs[i].addr_len = + le64_to_cpu(rcvd_regions->mem_reg[i].size); + hw->lan_regs[i].addr_start = + le64_to_cpu(rcvd_regions->mem_reg[i].start_offset); + } + hw->num_lan_regs = num_regions; + + return err; +} + +/** + * idpf_calc_remaining_mmio_regs - calculate MMIO regions outside mbx and rstat + * @adapter: Driver specific private structure + * + * Called when idpf_send_get_lan_memory_regions is not supported. This will + * calculate the offsets and sizes for the regions before, in between, and + * after the mailbox and rstat MMIO mappings. + * + * Return: 0 on success or error code on failure. + */ +static int idpf_calc_remaining_mmio_regs(struct idpf_adapter *adapter) +{ + struct resource *rstat_reg = &adapter->dev_ops.static_reg_info[1]; + struct resource *mbx_reg = &adapter->dev_ops.static_reg_info[0]; + struct idpf_hw *hw = &adapter->hw; + + hw->num_lan_regs = IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING; + hw->lan_regs = kcalloc(hw->num_lan_regs, sizeof(*hw->lan_regs), + GFP_KERNEL); + if (!hw->lan_regs) + return -ENOMEM; + + /* Region preceding mailbox */ + hw->lan_regs[0].addr_start = 0; + hw->lan_regs[0].addr_len = mbx_reg->start; + /* Region between mailbox and rstat */ + hw->lan_regs[1].addr_start = mbx_reg->end + 1; + hw->lan_regs[1].addr_len = rstat_reg->start - + hw->lan_regs[1].addr_start; + /* Region after rstat */ + hw->lan_regs[2].addr_start = rstat_reg->end + 1; + hw->lan_regs[2].addr_len = pci_resource_len(adapter->pdev, 0) - + hw->lan_regs[2].addr_start; + + return 0; +} + +/** + * idpf_map_lan_mmio_regs - map remaining LAN BAR regions + * @adapter: Driver specific private structure + * + * Return: 0 on success or error code on failure. + */ +static int idpf_map_lan_mmio_regs(struct idpf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct idpf_hw *hw = &adapter->hw; + resource_size_t res_start; + + res_start = pci_resource_start(pdev, 0); + + for (int i = 0; i < hw->num_lan_regs; i++) { + resource_size_t start; + long len; + + len = hw->lan_regs[i].addr_len; + if (!len) + continue; + start = hw->lan_regs[i].addr_start + res_start; + + hw->lan_regs[i].vaddr = devm_ioremap(&pdev->dev, start, len); + if (!hw->lan_regs[i].vaddr) { + pci_err(pdev, "failed to allocate BAR0 region\n"); + return -ENOMEM; + } + } + + return 0; +} + +/** + * idpf_add_del_fsteer_filters - Send virtchnl add/del Flow Steering message + * @adapter: adapter info struct + * @rule: Flow steering rule to add/delete + * @opcode: VIRTCHNL2_OP_ADD_FLOW_RULE to add filter, or + * VIRTCHNL2_OP_DEL_FLOW_RULE to delete. All other values are invalid. + * + * Send ADD/DELETE flow steering virtchnl message and receive the result. + * + * Return: 0 on success, negative on failure. + */ +int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter, + struct virtchnl2_flow_rule_add_del *rule, + enum virtchnl2_op opcode) +{ + int rule_count = le32_to_cpu(rule->count); + struct idpf_vc_xn_params xn_params = {}; + ssize_t reply_sz; + + if (opcode != VIRTCHNL2_OP_ADD_FLOW_RULE && + opcode != VIRTCHNL2_OP_DEL_FLOW_RULE) + return -EINVAL; + + xn_params.vc_op = opcode; + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; + xn_params.async = false; + xn_params.send_buf.iov_base = rule; + xn_params.send_buf.iov_len = struct_size(rule, rule_info, rule_count); + xn_params.recv_buf.iov_base = rule; + xn_params.recv_buf.iov_len = struct_size(rule, rule_info, rule_count); + + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + return reply_sz < 0 ? reply_sz : 0; +} + +/** * idpf_vport_alloc_max_qs - Allocate max queues for a vport * @adapter: Driver specific private structure * @max_q: vport max queue structure @@ -2801,7 +2961,7 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter) struct idpf_hw *hw = &adapter->hw; int err; - adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info); + adapter->dev_ops.reg_ops.ctlq_reg_init(adapter, ctlq_info); err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info); if (err) @@ -2961,6 +3121,30 @@ restart: msleep(task_delay); } + if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LAN_MEMORY_REGIONS)) { + err = idpf_send_get_lan_memory_regions(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to get LAN memory regions: %d\n", + err); + return -EINVAL; + } + } else { + /* Fallback to mapping the remaining regions of the entire BAR */ + err = idpf_calc_remaining_mmio_regs(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to allocate BAR0 region(s): %d\n", + err); + return -ENOMEM; + } + } + + err = idpf_map_lan_mmio_regs(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to map BAR0 region(s): %d\n", + err); + return -ENOMEM; + } + pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter)); num_max_vports = idpf_get_max_vports(adapter); adapter->max_vports = num_max_vports; @@ -3070,6 +3254,7 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter) idpf_ptp_release(adapter); idpf_deinit_task(adapter); + idpf_idc_deinit_core_aux_device(adapter->cdev_info); idpf_intr_rel(adapter); if (remove_in_prog) @@ -3493,6 +3678,79 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, } /** + * idpf_vport_is_cap_ena - Check if vport capability is enabled + * @vport: Private data struct + * @flag: flag(s) to check + * + * Return: true if the capability is supported, false otherwise + */ +bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag) +{ + struct virtchnl2_create_vport *vport_msg; + + vport_msg = vport->adapter->vport_params_recvd[vport->idx]; + + return !!(le16_to_cpu(vport_msg->vport_flags) & flag); +} + +/** + * idpf_sideband_flow_type_ena - Check if steering is enabled for flow type + * @vport: Private data struct + * @flow_type: flow type to check (from ethtool.h) + * + * Return: true if sideband filters are allowed for @flow_type, false otherwise + */ +bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type) +{ + struct virtchnl2_create_vport *vport_msg; + __le64 caps; + + vport_msg = vport->adapter->vport_params_recvd[vport->idx]; + caps = vport_msg->sideband_flow_caps; + + switch (flow_type) { + case TCP_V4_FLOW: + return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP)); + case UDP_V4_FLOW: + return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_UDP)); + default: + return false; + } +} + +/** + * idpf_sideband_action_ena - Check if steering is enabled for action + * @vport: Private data struct + * @fsp: flow spec + * + * Return: true if sideband filters are allowed for @fsp, false otherwise + */ +bool idpf_sideband_action_ena(struct idpf_vport *vport, + struct ethtool_rx_flow_spec *fsp) +{ + struct virtchnl2_create_vport *vport_msg; + unsigned int supp_actions; + + vport_msg = vport->adapter->vport_params_recvd[vport->idx]; + supp_actions = le32_to_cpu(vport_msg->sideband_flow_actions); + + /* Actions Drop/Wake are not supported */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC || + fsp->ring_cookie == RX_CLS_FLOW_WAKE) + return false; + + return !!(supp_actions & VIRTCHNL2_ACTION_QUEUE); +} + +unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport) +{ + struct virtchnl2_create_vport *vport_msg; + + vport_msg = vport->adapter->vport_params_recvd[vport->idx]; + return le32_to_cpu(vport_msg->flow_steer_max_rules); +} + +/** * idpf_get_vport_id: Get vport id * @vport: virtual port structure * @@ -3728,3 +3986,42 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter, return reply_sz < 0 ? reply_sz : 0; } + +/** + * idpf_idc_rdma_vc_send_sync - virtchnl send callback for IDC registered drivers + * @cdev_info: IDC core device info pointer + * @send_msg: message to send + * @msg_size: size of message to send + * @recv_msg: message to populate on reception of response + * @recv_len: length of message copied into recv_msg or 0 on error + * + * Return: 0 on success or error code on failure. + */ +int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info, + u8 *send_msg, u16 msg_size, + u8 *recv_msg, u16 *recv_len) +{ + struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev); + struct idpf_vc_xn_params xn_params = { }; + ssize_t reply_sz; + u16 recv_size; + + if (!recv_msg || !recv_len || msg_size > IDPF_CTLQ_MAX_BUF_LEN) + return -EINVAL; + + recv_size = min_t(u16, *recv_len, IDPF_CTLQ_MAX_BUF_LEN); + *recv_len = 0; + xn_params.vc_op = VIRTCHNL2_OP_RDMA; + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; + xn_params.send_buf.iov_base = send_msg; + xn_params.send_buf.iov_len = msg_size; + xn_params.recv_buf.iov_base = recv_msg; + xn_params.recv_buf.iov_len = recv_size; + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + *recv_len = reply_sz; + + return 0; +} +EXPORT_SYMBOL_GPL(idpf_idc_rdma_vc_send_sync); |