diff options
Diffstat (limited to 'drivers/net/ethernet/intel/idpf')
21 files changed, 2039 insertions, 168 deletions
diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile index 83ac5e296382..4ef4b2b5e37a 100644 --- a/drivers/net/ethernet/intel/idpf/Makefile +++ b/drivers/net/ethernet/intel/idpf/Makefile @@ -10,6 +10,7 @@ idpf-y := \ idpf_controlq_setup.o \ idpf_dev.o \ idpf_ethtool.o \ + idpf_idc.o \ idpf_lib.o \ idpf_main.o \ idpf_txrx.o \ diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index 1e812c3f62f9..f4c0eaf9bde3 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -12,12 +12,16 @@ struct idpf_vport_max_q; #include <net/pkt_sched.h> #include <linux/aer.h> #include <linux/etherdevice.h> +#include <linux/ioport.h> #include <linux/pci.h> #include <linux/bitfield.h> #include <linux/sctp.h> #include <linux/ethtool_netlink.h> #include <net/gro.h> +#include <linux/net/intel/iidc_rdma.h> +#include <linux/net/intel/iidc_rdma_idpf.h> + #include "virtchnl2.h" #include "idpf_txrx.h" #include "idpf_controlq.h" @@ -194,7 +198,8 @@ struct idpf_vport_max_q { * @ptp_reg_init: PTP register initialization */ struct idpf_reg_ops { - void (*ctlq_reg_init)(struct idpf_ctlq_create_info *cq); + void (*ctlq_reg_init)(struct idpf_adapter *adapter, + struct idpf_ctlq_create_info *cq); int (*intr_reg_init)(struct idpf_vport *vport); void (*mb_intr_reg_init)(struct idpf_adapter *adapter); void (*reset_reg_init)(struct idpf_adapter *adapter); @@ -203,12 +208,25 @@ struct idpf_reg_ops { void (*ptp_reg_init)(const struct idpf_adapter *adapter); }; +#define IDPF_MMIO_REG_NUM_STATIC 2 +#define IDPF_PF_MBX_REGION_SZ 4096 +#define IDPF_PF_RSTAT_REGION_SZ 2048 +#define IDPF_VF_MBX_REGION_SZ 10240 +#define IDPF_VF_RSTAT_REGION_SZ 2048 + /** * struct idpf_dev_ops - Device specific operations * @reg_ops: Register operations + * @idc_init: IDC initialization + * @static_reg_info: array of mailbox and rstat register info */ struct idpf_dev_ops { struct idpf_reg_ops reg_ops; + + int (*idc_init)(struct idpf_adapter *adapter); + + /* static_reg_info[0] is mailbox region, static_reg_info[1] is rstat */ + struct resource static_reg_info[IDPF_MMIO_REG_NUM_STATIC]; }; /** @@ -251,6 +269,12 @@ struct idpf_port_stats { struct virtchnl2_vport_stats vport_stats; }; +struct idpf_fsteer_fltr { + struct list_head list; + u32 loc; + u32 q_index; +}; + /** * struct idpf_vport - Handle for netdevices and queue resources * @num_txq: Number of allocated TX queues @@ -275,6 +299,7 @@ struct idpf_port_stats { * group will yield total number of RX queues. * @rxq_model: Splitq queue or single queue queuing model * @rx_ptype_lkup: Lookup table for ptypes on RX + * @vdev_info: IDC vport device info pointer * @adapter: back pointer to associated adapter * @netdev: Associated net_device. Each vport should have one and only one * associated netdev. @@ -320,6 +345,8 @@ struct idpf_vport { u32 rxq_model; struct libeth_rx_pt *rx_ptype_lkup; + struct iidc_rdma_vport_dev_info *vdev_info; + struct idpf_adapter *adapter; struct net_device *netdev; DECLARE_BITMAP(flags, IDPF_VPORT_FLAGS_NBITS); @@ -379,9 +406,27 @@ struct idpf_rss_data { }; /** + * struct idpf_q_coalesce - User defined coalescing configuration values for + * a single queue. + * @tx_intr_mode: Dynamic TX ITR or not + * @rx_intr_mode: Dynamic RX ITR or not + * @tx_coalesce_usecs: TX interrupt throttling rate + * @rx_coalesce_usecs: RX interrupt throttling rate + * + * Used to restore user coalescing configuration after a reset. + */ +struct idpf_q_coalesce { + u32 tx_intr_mode; + u32 rx_intr_mode; + u32 tx_coalesce_usecs; + u32 rx_coalesce_usecs; +}; + +/** * struct idpf_vport_user_config_data - User defined configuration values for * each vport. * @rss_data: See struct idpf_rss_data + * @q_coalesce: Array of per queue coalescing data * @num_req_tx_qs: Number of user requested TX queues through ethtool * @num_req_rx_qs: Number of user requested RX queues through ethtool * @num_req_txq_desc: Number of user requested TX queue descriptors through @@ -390,17 +435,22 @@ struct idpf_rss_data { * ethtool * @user_flags: User toggled config flags * @mac_filter_list: List of MAC filters + * @num_fsteer_fltrs: number of flow steering filters + * @flow_steer_list: list of flow steering filters * * Used to restore configuration after a reset as the vport will get wiped. */ struct idpf_vport_user_config_data { struct idpf_rss_data rss_data; + struct idpf_q_coalesce *q_coalesce; u16 num_req_tx_qs; u16 num_req_rx_qs; u32 num_req_txq_desc; u32 num_req_rxq_desc; DECLARE_BITMAP(user_flags, __IDPF_USER_FLAGS_NBITS); struct list_head mac_filter_list; + u32 num_fsteer_fltrs; + struct list_head flow_steer_list; }; /** @@ -507,10 +557,11 @@ struct idpf_vc_xn_manager; * @flags: See enum idpf_flags * @reset_reg: See struct idpf_reset_reg * @hw: Device access data - * @num_req_msix: Requested number of MSIX vectors * @num_avail_msix: Available number of MSIX vectors * @num_msix_entries: Number of entries in MSIX table * @msix_entries: MSIX table + * @num_rdma_msix_entries: Available number of MSIX vectors for RDMA + * @rdma_msix_entries: RDMA MSIX table * @req_vec_chunks: Requested vector chunk data * @mb_vector: Mailbox vector data * @vector_stack: Stack to store the msix vector indexes @@ -539,6 +590,7 @@ struct idpf_vc_xn_manager; * @caps: Negotiated capabilities with device * @vcxn_mngr: Virtchnl transaction manager * @dev_ops: See idpf_dev_ops + * @cdev_info: IDC core device info pointer * @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk * to VFs but is used to initialize them * @crc_enable: Enable CRC insertion offload @@ -561,10 +613,11 @@ struct idpf_adapter { DECLARE_BITMAP(flags, IDPF_FLAGS_NBITS); struct idpf_reset_reg reset_reg; struct idpf_hw hw; - u16 num_req_msix; u16 num_avail_msix; u16 num_msix_entries; struct msix_entry *msix_entries; + u16 num_rdma_msix_entries; + struct msix_entry *rdma_msix_entries; struct virtchnl2_alloc_vectors *req_vec_chunks; struct idpf_q_vector mb_vector; struct idpf_vector_lifo vector_stack; @@ -597,6 +650,7 @@ struct idpf_adapter { struct idpf_vc_xn_manager *vcxn_mngr; struct idpf_dev_ops dev_ops; + struct iidc_rdma_core_dev_info *cdev_info; int num_vfs; bool crc_enable; bool req_tx_splitq; @@ -630,17 +684,28 @@ static inline int idpf_is_queue_model_split(u16 q_model) bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, enum idpf_cap_field field, u64 flag); +/** + * idpf_is_rdma_cap_ena - Determine if RDMA is supported + * @adapter: private data struct + * + * Return: true if RDMA capability is enabled, false otherwise + */ +static inline bool idpf_is_rdma_cap_ena(struct idpf_adapter *adapter) +{ + return idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_RDMA); +} + #define IDPF_CAP_RSS (\ - VIRTCHNL2_CAP_RSS_IPV4_TCP |\ - VIRTCHNL2_CAP_RSS_IPV4_TCP |\ - VIRTCHNL2_CAP_RSS_IPV4_UDP |\ - VIRTCHNL2_CAP_RSS_IPV4_SCTP |\ - VIRTCHNL2_CAP_RSS_IPV4_OTHER |\ - VIRTCHNL2_CAP_RSS_IPV6_TCP |\ - VIRTCHNL2_CAP_RSS_IPV6_TCP |\ - VIRTCHNL2_CAP_RSS_IPV6_UDP |\ - VIRTCHNL2_CAP_RSS_IPV6_SCTP |\ - VIRTCHNL2_CAP_RSS_IPV6_OTHER) + VIRTCHNL2_FLOW_IPV4_TCP |\ + VIRTCHNL2_FLOW_IPV4_TCP |\ + VIRTCHNL2_FLOW_IPV4_UDP |\ + VIRTCHNL2_FLOW_IPV4_SCTP |\ + VIRTCHNL2_FLOW_IPV4_OTHER |\ + VIRTCHNL2_FLOW_IPV6_TCP |\ + VIRTCHNL2_FLOW_IPV6_TCP |\ + VIRTCHNL2_FLOW_IPV6_UDP |\ + VIRTCHNL2_FLOW_IPV6_SCTP |\ + VIRTCHNL2_FLOW_IPV6_OTHER) #define IDPF_CAP_RSC (\ VIRTCHNL2_CAP_RSC_IPV4_TCP |\ @@ -683,6 +748,17 @@ static inline u16 idpf_get_reserved_vecs(struct idpf_adapter *adapter) } /** + * idpf_get_reserved_rdma_vecs - Get reserved RDMA vectors + * @adapter: private data struct + * + * Return: number of vectors reserved for RDMA + */ +static inline u16 idpf_get_reserved_rdma_vecs(struct idpf_adapter *adapter) +{ + return le16_to_cpu(adapter->caps.num_rdma_allocated_vectors); +} + +/** * idpf_get_default_vports - Get default number of vports * @adapter: private data struct */ @@ -721,6 +797,34 @@ static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter) } /** + * idpf_get_mbx_reg_addr - Get BAR0 mailbox register address + * @adapter: private data struct + * @reg_offset: register offset value + * + * Return: BAR0 mailbox register address based on register offset. + */ +static inline void __iomem *idpf_get_mbx_reg_addr(struct idpf_adapter *adapter, + resource_size_t reg_offset) +{ + return adapter->hw.mbx.vaddr + reg_offset; +} + +/** + * idpf_get_rstat_reg_addr - Get BAR0 rstat register address + * @adapter: private data struct + * @reg_offset: register offset value + * + * Return: BAR0 rstat register address based on register offset. + */ +static inline void __iomem *idpf_get_rstat_reg_addr(struct idpf_adapter *adapter, + resource_size_t reg_offset) +{ + reg_offset -= adapter->dev_ops.static_reg_info[1].start; + + return adapter->hw.rstat.vaddr + reg_offset; +} + +/** * idpf_get_reg_addr - Get BAR0 register address * @adapter: private data struct * @reg_offset: register offset value @@ -730,7 +834,30 @@ static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter) static inline void __iomem *idpf_get_reg_addr(struct idpf_adapter *adapter, resource_size_t reg_offset) { - return (void __iomem *)(adapter->hw.hw_addr + reg_offset); + struct idpf_hw *hw = &adapter->hw; + + for (int i = 0; i < hw->num_lan_regs; i++) { + struct idpf_mmio_reg *region = &hw->lan_regs[i]; + + if (reg_offset >= region->addr_start && + reg_offset < (region->addr_start + region->addr_len)) { + /* Convert the offset so that it is relative to the + * start of the region. Then add the base address of + * the region to get the final address. + */ + reg_offset -= region->addr_start; + + return region->vaddr + reg_offset; + } + } + + /* It's impossible to hit this case with offsets from the CP. But if we + * do for any other reason, the kernel will panic on that register + * access. Might as well do it here to make it clear what's happening. + */ + BUG(); + + return NULL; } /** @@ -744,7 +871,7 @@ static inline bool idpf_is_reset_detected(struct idpf_adapter *adapter) if (!adapter->hw.arq) return true; - return !(readl(idpf_get_reg_addr(adapter, adapter->hw.arq->reg.len)) & + return !(readl(idpf_get_mbx_reg_addr(adapter, adapter->hw.arq->reg.len)) & adapter->hw.arq->reg.len_mask); } @@ -853,5 +980,16 @@ int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs); u8 idpf_vport_get_hsplit(const struct idpf_vport *vport); bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val); - +int idpf_idc_init(struct idpf_adapter *adapter); +int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter, + enum iidc_function_type ftype); +void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info); +void idpf_idc_deinit_vport_aux_device(struct iidc_rdma_vport_dev_info *vdev_info); +void idpf_idc_issue_reset_event(struct iidc_rdma_core_dev_info *cdev_info); +void idpf_idc_vdev_mtu_event(struct iidc_rdma_vport_dev_info *vdev_info, + enum iidc_rdma_event_type event_type); + +int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter, + struct virtchnl2_flow_rule_add_del *rule, + enum virtchnl2_op opcode); #endif /* !_IDPF_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c index b28991dd1870..67894eda2d29 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c @@ -36,19 +36,19 @@ static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, { /* Update tail to post pre-allocated buffers for rx queues */ if (is_rxq) - wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1)); + idpf_mbx_wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1)); /* For non-Mailbox control queues only TAIL need to be set */ if (cq->q_id != -1) return; /* Clear Head for both send or receive */ - wr32(hw, cq->reg.head, 0); + idpf_mbx_wr32(hw, cq->reg.head, 0); /* set starting point */ - wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa)); - wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa)); - wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask)); + idpf_mbx_wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa)); + idpf_mbx_wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa)); + idpf_mbx_wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask)); } /** @@ -96,7 +96,7 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq) */ static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq) { - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); /* free ring buffers and the ring itself */ idpf_ctlq_dealloc_ring_res(hw, cq); @@ -104,8 +104,7 @@ static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq) /* Set ring_size to 0 to indicate uninitialized queue */ cq->ring_size = 0; - mutex_unlock(&cq->cq_lock); - mutex_destroy(&cq->cq_lock); + spin_unlock(&cq->cq_lock); } /** @@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw, idpf_ctlq_init_regs(hw, cq, is_rxq); - mutex_init(&cq->cq_lock); + spin_lock_init(&cq->cq_lock); list_add(&cq->cq_list, &hw->cq_list_head); @@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, int err = 0; int i; - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); /* Ensure there are enough descriptors to send all messages */ num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq); @@ -329,10 +328,10 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, */ dma_wmb(); - wr32(hw, cq->reg.tail, cq->next_to_use); + idpf_mbx_wr32(hw, cq->reg.tail, cq->next_to_use); err_unlock: - mutex_unlock(&cq->cq_lock); + spin_unlock(&cq->cq_lock); return err; } @@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, if (*clean_count > cq->ring_size) return -EBADR; - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); ntc = cq->next_to_clean; @@ -397,7 +396,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, cq->next_to_clean = ntc; - mutex_unlock(&cq->cq_lock); + spin_unlock(&cq->cq_lock); /* Return number of descriptors actually cleaned */ *clean_count = i; @@ -435,7 +434,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, if (*buff_count > 0) buffs_avail = true; - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); if (tbp >= cq->ring_size) tbp = 0; @@ -521,10 +520,10 @@ post_buffs_out: dma_wmb(); - wr32(hw, cq->reg.tail, cq->next_to_post); + idpf_mbx_wr32(hw, cq->reg.tail, cq->next_to_post); } - mutex_unlock(&cq->cq_lock); + spin_unlock(&cq->cq_lock); /* return the number of buffers that were not posted */ *buff_count = *buff_count - i; @@ -552,7 +551,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, u16 i; /* take the lock before we start messing with the ring */ - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); ntc = cq->next_to_clean; @@ -614,7 +613,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, cq->next_to_clean = ntc; - mutex_unlock(&cq->cq_lock); + spin_unlock(&cq->cq_lock); *num_q_msg = i; if (*num_q_msg == 0) diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.h b/drivers/net/ethernet/intel/idpf/idpf_controlq.h index c1aba09e9856..de4ece40c2ff 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_controlq.h +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.h @@ -94,12 +94,26 @@ struct idpf_mbxq_desc { u32 pf_vf_id; /* used by CP when sending to PF */ }; +/* Max number of MMIO regions not including the mailbox and rstat regions in + * the fallback case when the whole bar is mapped. + */ +#define IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING 3 + +struct idpf_mmio_reg { + void __iomem *vaddr; + resource_size_t addr_start; + resource_size_t addr_len; +}; + /* Define the driver hardware struct to replace other control structs as needed * Align to ctlq_hw_info */ struct idpf_hw { - void __iomem *hw_addr; - resource_size_t hw_addr_len; + struct idpf_mmio_reg mbx; + struct idpf_mmio_reg rstat; + /* Array of remaining LAN BAR regions */ + int num_lan_regs; + struct idpf_mmio_reg *lan_regs; struct idpf_adapter *back; diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h index 9642494a67d8..3414c5f9a831 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h @@ -99,7 +99,7 @@ struct idpf_ctlq_info { enum idpf_ctlq_type cq_type; int q_id; - struct mutex cq_lock; /* control queue lock */ + spinlock_t cq_lock; /* control queue lock */ /* used for interrupt processing */ u16 next_to_use; u16 next_to_clean; diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c index 3fae81f1f988..bfa60f7d43de 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_dev.c +++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c @@ -10,10 +10,13 @@ /** * idpf_ctlq_reg_init - initialize default mailbox registers + * @adapter: adapter structure * @cq: pointer to the array of create control queues */ -static void idpf_ctlq_reg_init(struct idpf_ctlq_create_info *cq) +static void idpf_ctlq_reg_init(struct idpf_adapter *adapter, + struct idpf_ctlq_create_info *cq) { + resource_size_t mbx_start = adapter->dev_ops.static_reg_info[0].start; int i; for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) { @@ -22,22 +25,22 @@ static void idpf_ctlq_reg_init(struct idpf_ctlq_create_info *cq) switch (ccq->type) { case IDPF_CTLQ_TYPE_MAILBOX_TX: /* set head and tail registers in our local struct */ - ccq->reg.head = PF_FW_ATQH; - ccq->reg.tail = PF_FW_ATQT; - ccq->reg.len = PF_FW_ATQLEN; - ccq->reg.bah = PF_FW_ATQBAH; - ccq->reg.bal = PF_FW_ATQBAL; + ccq->reg.head = PF_FW_ATQH - mbx_start; + ccq->reg.tail = PF_FW_ATQT - mbx_start; + ccq->reg.len = PF_FW_ATQLEN - mbx_start; + ccq->reg.bah = PF_FW_ATQBAH - mbx_start; + ccq->reg.bal = PF_FW_ATQBAL - mbx_start; ccq->reg.len_mask = PF_FW_ATQLEN_ATQLEN_M; ccq->reg.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M; ccq->reg.head_mask = PF_FW_ATQH_ATQH_M; break; case IDPF_CTLQ_TYPE_MAILBOX_RX: /* set head and tail registers in our local struct */ - ccq->reg.head = PF_FW_ARQH; - ccq->reg.tail = PF_FW_ARQT; - ccq->reg.len = PF_FW_ARQLEN; - ccq->reg.bah = PF_FW_ARQBAH; - ccq->reg.bal = PF_FW_ARQBAL; + ccq->reg.head = PF_FW_ARQH - mbx_start; + ccq->reg.tail = PF_FW_ARQT - mbx_start; + ccq->reg.len = PF_FW_ARQLEN - mbx_start; + ccq->reg.bah = PF_FW_ARQBAH - mbx_start; + ccq->reg.bal = PF_FW_ARQBAL - mbx_start; ccq->reg.len_mask = PF_FW_ARQLEN_ARQLEN_M; ccq->reg.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M; ccq->reg.head_mask = PF_FW_ARQH_ARQH_M; @@ -130,7 +133,7 @@ free_reg_vals: */ static void idpf_reset_reg_init(struct idpf_adapter *adapter) { - adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, PFGEN_RSTAT); + adapter->reset_reg.rstat = idpf_get_rstat_reg_addr(adapter, PFGEN_RSTAT); adapter->reset_reg.rstat_m = PFGEN_RSTAT_PFR_STATE_M; } @@ -144,9 +147,9 @@ static void idpf_trigger_reset(struct idpf_adapter *adapter, { u32 reset_reg; - reset_reg = readl(idpf_get_reg_addr(adapter, PFGEN_CTRL)); + reset_reg = readl(idpf_get_rstat_reg_addr(adapter, PFGEN_CTRL)); writel(reset_reg | PFGEN_CTRL_PFSWR, - idpf_get_reg_addr(adapter, PFGEN_CTRL)); + idpf_get_rstat_reg_addr(adapter, PFGEN_CTRL)); } /** @@ -162,6 +165,17 @@ static void idpf_ptp_reg_init(const struct idpf_adapter *adapter) } /** + * idpf_idc_register - register for IDC callbacks + * @adapter: Driver specific private structure + * + * Return: 0 on success or error code on failure. + */ +static int idpf_idc_register(struct idpf_adapter *adapter) +{ + return idpf_idc_init_aux_core_dev(adapter, IIDC_FUNCTION_TYPE_PF); +} + +/** * idpf_reg_ops_init - Initialize register API function pointers * @adapter: Driver specific private structure */ @@ -182,4 +196,11 @@ static void idpf_reg_ops_init(struct idpf_adapter *adapter) void idpf_dev_ops_init(struct idpf_adapter *adapter) { idpf_reg_ops_init(adapter); + + adapter->dev_ops.idc_init = idpf_idc_register; + + resource_set_range(&adapter->dev_ops.static_reg_info[0], + PF_FW_BASE, IDPF_PF_MBX_REGION_SZ); + resource_set_range(&adapter->dev_ops.static_reg_info[1], + PFGEN_RTRIG, IDPF_PF_RSTAT_REGION_SZ); } diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c index 9bdb309b668e..0eb812ac19c2 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -3,6 +3,7 @@ #include "idpf.h" #include "idpf_ptp.h" +#include "idpf_virtchnl.h" /** * idpf_get_rxnfc - command to get RX flow classification rules @@ -13,26 +14,312 @@ * Returns Success if the command is supported. */ static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, - u32 __always_unused *rule_locs) + u32 *rule_locs) { + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_user_config_data *user_config; + struct idpf_fsteer_fltr *f; struct idpf_vport *vport; + unsigned int cnt = 0; + int err = 0; idpf_vport_ctrl_lock(netdev); vport = idpf_netdev_to_vport(netdev); + user_config = &np->adapter->vport_config[np->vport_idx]->user_config; switch (cmd->cmd) { case ETHTOOL_GRXRINGS: cmd->data = vport->num_rxq; - idpf_vport_ctrl_unlock(netdev); - - return 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = user_config->num_fsteer_fltrs; + cmd->data = idpf_fsteer_max_rules(vport); + break; + case ETHTOOL_GRXCLSRULE: + err = -EINVAL; + list_for_each_entry(f, &user_config->flow_steer_list, list) + if (f->loc == cmd->fs.location) { + cmd->fs.ring_cookie = f->q_index; + err = 0; + break; + } + break; + case ETHTOOL_GRXCLSRLALL: + cmd->data = idpf_fsteer_max_rules(vport); + list_for_each_entry(f, &user_config->flow_steer_list, list) { + if (cnt == cmd->rule_cnt) { + err = -EMSGSIZE; + break; + } + rule_locs[cnt] = f->loc; + cnt++; + } + if (!err) + cmd->rule_cnt = user_config->num_fsteer_fltrs; + break; default: break; } idpf_vport_ctrl_unlock(netdev); - return -EOPNOTSUPP; + return err; +} + +static void idpf_fsteer_fill_ipv4(struct virtchnl2_proto_hdrs *hdrs, + struct ethtool_rx_flow_spec *fsp) +{ + struct iphdr *iph; + + hdrs->proto_hdr[0].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_IPV4); + + iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_spec; + iph->saddr = fsp->h_u.tcp_ip4_spec.ip4src; + iph->daddr = fsp->h_u.tcp_ip4_spec.ip4dst; + + iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_mask; + iph->saddr = fsp->m_u.tcp_ip4_spec.ip4src; + iph->daddr = fsp->m_u.tcp_ip4_spec.ip4dst; +} + +static void idpf_fsteer_fill_udp(struct virtchnl2_proto_hdrs *hdrs, + struct ethtool_rx_flow_spec *fsp, + bool v4) +{ + struct udphdr *udph, *udpm; + + hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_UDP); + + udph = (struct udphdr *)hdrs->proto_hdr[1].buffer_spec; + udpm = (struct udphdr *)hdrs->proto_hdr[1].buffer_mask; + + if (v4) { + udph->source = fsp->h_u.udp_ip4_spec.psrc; + udph->dest = fsp->h_u.udp_ip4_spec.pdst; + udpm->source = fsp->m_u.udp_ip4_spec.psrc; + udpm->dest = fsp->m_u.udp_ip4_spec.pdst; + } else { + udph->source = fsp->h_u.udp_ip6_spec.psrc; + udph->dest = fsp->h_u.udp_ip6_spec.pdst; + udpm->source = fsp->m_u.udp_ip6_spec.psrc; + udpm->dest = fsp->m_u.udp_ip6_spec.pdst; + } +} + +static void idpf_fsteer_fill_tcp(struct virtchnl2_proto_hdrs *hdrs, + struct ethtool_rx_flow_spec *fsp, + bool v4) +{ + struct tcphdr *tcph, *tcpm; + + hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_TCP); + + tcph = (struct tcphdr *)hdrs->proto_hdr[1].buffer_spec; + tcpm = (struct tcphdr *)hdrs->proto_hdr[1].buffer_mask; + + if (v4) { + tcph->source = fsp->h_u.tcp_ip4_spec.psrc; + tcph->dest = fsp->h_u.tcp_ip4_spec.pdst; + tcpm->source = fsp->m_u.tcp_ip4_spec.psrc; + tcpm->dest = fsp->m_u.tcp_ip4_spec.pdst; + } else { + tcph->source = fsp->h_u.tcp_ip6_spec.psrc; + tcph->dest = fsp->h_u.tcp_ip6_spec.pdst; + tcpm->source = fsp->m_u.tcp_ip6_spec.psrc; + tcpm->dest = fsp->m_u.tcp_ip6_spec.pdst; + } +} + +/** + * idpf_add_flow_steer - add a Flow Steering filter + * @netdev: network interface device structure + * @cmd: command to add Flow Steering filter + * + * Return: 0 on success and negative values for failure + */ +static int idpf_add_flow_steer(struct net_device *netdev, + struct ethtool_rxnfc *cmd) +{ + struct idpf_fsteer_fltr *fltr, *parent = NULL, *f; + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_user_config_data *user_config; + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct virtchnl2_flow_rule_add_del *rule; + struct idpf_vport_config *vport_config; + struct virtchnl2_rule_action_set *acts; + struct virtchnl2_flow_rule_info *info; + struct virtchnl2_proto_hdrs *hdrs; + struct idpf_vport *vport; + u32 flow_type, q_index; + u16 num_rxq; + int err; + + vport = idpf_netdev_to_vport(netdev); + vport_config = vport->adapter->vport_config[np->vport_idx]; + user_config = &vport_config->user_config; + num_rxq = user_config->num_req_rx_qs; + + flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); + if (flow_type != fsp->flow_type) + return -EINVAL; + + if (!idpf_sideband_action_ena(vport, fsp) || + !idpf_sideband_flow_type_ena(vport, flow_type)) + return -EOPNOTSUPP; + + if (user_config->num_fsteer_fltrs > idpf_fsteer_max_rules(vport)) + return -ENOSPC; + + q_index = fsp->ring_cookie; + if (q_index >= num_rxq) + return -EINVAL; + + rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + rule->vport_id = cpu_to_le32(vport->vport_id); + rule->count = cpu_to_le32(1); + info = &rule->rule_info[0]; + info->rule_id = cpu_to_le32(fsp->location); + + hdrs = &info->rule_cfg.proto_hdrs; + hdrs->tunnel_level = 0; + hdrs->count = cpu_to_le32(2); + + acts = &info->rule_cfg.action_set; + acts->count = cpu_to_le32(1); + acts->actions[0].action_type = cpu_to_le32(VIRTCHNL2_ACTION_QUEUE); + acts->actions[0].act_conf.q_id = cpu_to_le32(q_index); + + switch (flow_type) { + case UDP_V4_FLOW: + idpf_fsteer_fill_ipv4(hdrs, fsp); + idpf_fsteer_fill_udp(hdrs, fsp, true); + break; + case TCP_V4_FLOW: + idpf_fsteer_fill_ipv4(hdrs, fsp); + idpf_fsteer_fill_tcp(hdrs, fsp, true); + break; + default: + err = -EINVAL; + goto out; + } + + err = idpf_add_del_fsteer_filters(vport->adapter, rule, + VIRTCHNL2_OP_ADD_FLOW_RULE); + if (err) + goto out; + + if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) { + err = -EIO; + goto out; + } + + fltr = kzalloc(sizeof(*fltr), GFP_KERNEL); + if (!fltr) { + err = -ENOMEM; + goto out; + } + + fltr->loc = fsp->location; + fltr->q_index = q_index; + list_for_each_entry(f, &user_config->flow_steer_list, list) { + if (f->loc >= fltr->loc) + break; + parent = f; + } + + parent ? list_add(&fltr->list, &parent->list) : + list_add(&fltr->list, &user_config->flow_steer_list); + + user_config->num_fsteer_fltrs++; + +out: + kfree(rule); + return err; +} + +/** + * idpf_del_flow_steer - delete a Flow Steering filter + * @netdev: network interface device structure + * @cmd: command to add Flow Steering filter + * + * Return: 0 on success and negative values for failure + */ +static int idpf_del_flow_steer(struct net_device *netdev, + struct ethtool_rxnfc *cmd) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_user_config_data *user_config; + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct virtchnl2_flow_rule_add_del *rule; + struct idpf_vport_config *vport_config; + struct virtchnl2_flow_rule_info *info; + struct idpf_fsteer_fltr *f, *iter; + struct idpf_vport *vport; + int err; + + vport = idpf_netdev_to_vport(netdev); + vport_config = vport->adapter->vport_config[np->vport_idx]; + user_config = &vport_config->user_config; + + if (!idpf_sideband_action_ena(vport, fsp)) + return -EOPNOTSUPP; + + rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + rule->vport_id = cpu_to_le32(vport->vport_id); + rule->count = cpu_to_le32(1); + info = &rule->rule_info[0]; + info->rule_id = cpu_to_le32(fsp->location); + + err = idpf_add_del_fsteer_filters(vport->adapter, rule, + VIRTCHNL2_OP_DEL_FLOW_RULE); + if (err) + goto out; + + if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) { + err = -EIO; + goto out; + } + + list_for_each_entry_safe(f, iter, + &user_config->flow_steer_list, list) { + if (f->loc == fsp->location) { + list_del(&f->list); + kfree(f); + user_config->num_fsteer_fltrs--; + goto out; + } + } + err = -EINVAL; + +out: + kfree(rule); + return err; +} + +static int idpf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + int ret = -EOPNOTSUPP; + + idpf_vport_ctrl_lock(netdev); + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = idpf_add_flow_steer(netdev, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = idpf_del_flow_steer(netdev, cmd); + break; + default: + break; + } + + idpf_vport_ctrl_unlock(netdev); + return ret; } /** @@ -47,7 +334,7 @@ static u32 idpf_get_rxfh_key_size(struct net_device *netdev) struct idpf_vport_user_config_data *user_config; if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) - return -EOPNOTSUPP; + return 0; user_config = &np->adapter->vport_config[np->vport_idx]->user_config; @@ -66,7 +353,7 @@ static u32 idpf_get_rxfh_indir_size(struct net_device *netdev) struct idpf_vport_user_config_data *user_config; if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) - return -EOPNOTSUPP; + return 0; user_config = &np->adapter->vport_config[np->vport_idx]->user_config; @@ -1090,12 +1377,14 @@ static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num, /** * __idpf_set_q_coalesce - set ITR values for specific queue * @ec: ethtool structure from user to update ITR settings + * @q_coal: per queue coalesce settings * @qv: queue vector for which itr values has to be set * @is_rxq: is queue type rx * * Returns 0 on success, negative otherwise. */ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec, + struct idpf_q_coalesce *q_coal, struct idpf_q_vector *qv, bool is_rxq) { u32 use_adaptive_coalesce, coalesce_usecs; @@ -1139,20 +1428,25 @@ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec, if (is_rxq) { qv->rx_itr_value = coalesce_usecs; + q_coal->rx_coalesce_usecs = coalesce_usecs; if (use_adaptive_coalesce) { qv->rx_intr_mode = IDPF_ITR_DYNAMIC; + q_coal->rx_intr_mode = IDPF_ITR_DYNAMIC; } else { qv->rx_intr_mode = !IDPF_ITR_DYNAMIC; - idpf_vport_intr_write_itr(qv, qv->rx_itr_value, - false); + q_coal->rx_intr_mode = !IDPF_ITR_DYNAMIC; + idpf_vport_intr_write_itr(qv, coalesce_usecs, false); } } else { qv->tx_itr_value = coalesce_usecs; + q_coal->tx_coalesce_usecs = coalesce_usecs; if (use_adaptive_coalesce) { qv->tx_intr_mode = IDPF_ITR_DYNAMIC; + q_coal->tx_intr_mode = IDPF_ITR_DYNAMIC; } else { qv->tx_intr_mode = !IDPF_ITR_DYNAMIC; - idpf_vport_intr_write_itr(qv, qv->tx_itr_value, true); + q_coal->tx_intr_mode = !IDPF_ITR_DYNAMIC; + idpf_vport_intr_write_itr(qv, coalesce_usecs, true); } } @@ -1165,6 +1459,7 @@ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec, /** * idpf_set_q_coalesce - set ITR values for specific queue * @vport: vport associated to the queue that need updating + * @q_coal: per queue coalesce settings * @ec: coalesce settings to program the device with * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index * @is_rxq: is queue type rx @@ -1172,6 +1467,7 @@ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec, * Return 0 on success, and negative on failure */ static int idpf_set_q_coalesce(const struct idpf_vport *vport, + struct idpf_q_coalesce *q_coal, const struct ethtool_coalesce *ec, int q_num, bool is_rxq) { @@ -1180,7 +1476,7 @@ static int idpf_set_q_coalesce(const struct idpf_vport *vport, qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) : idpf_find_txq_vec(vport, q_num); - if (qv && __idpf_set_q_coalesce(ec, qv, is_rxq)) + if (qv && __idpf_set_q_coalesce(ec, q_coal, qv, is_rxq)) return -EINVAL; return 0; @@ -1201,9 +1497,13 @@ static int idpf_set_coalesce(struct net_device *netdev, struct netlink_ext_ack *extack) { struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_user_config_data *user_config; + struct idpf_q_coalesce *q_coal; struct idpf_vport *vport; int i, err = 0; + user_config = &np->adapter->vport_config[np->vport_idx]->user_config; + idpf_vport_ctrl_lock(netdev); vport = idpf_netdev_to_vport(netdev); @@ -1211,13 +1511,15 @@ static int idpf_set_coalesce(struct net_device *netdev, goto unlock_mutex; for (i = 0; i < vport->num_txq; i++) { - err = idpf_set_q_coalesce(vport, ec, i, false); + q_coal = &user_config->q_coalesce[i]; + err = idpf_set_q_coalesce(vport, q_coal, ec, i, false); if (err) goto unlock_mutex; } for (i = 0; i < vport->num_rxq; i++) { - err = idpf_set_q_coalesce(vport, ec, i, true); + q_coal = &user_config->q_coalesce[i]; + err = idpf_set_q_coalesce(vport, q_coal, ec, i, true); if (err) goto unlock_mutex; } @@ -1239,20 +1541,25 @@ unlock_mutex: static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num, struct ethtool_coalesce *ec) { + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_user_config_data *user_config; + struct idpf_q_coalesce *q_coal; struct idpf_vport *vport; int err; idpf_vport_ctrl_lock(netdev); vport = idpf_netdev_to_vport(netdev); + user_config = &np->adapter->vport_config[np->vport_idx]->user_config; + q_coal = &user_config->q_coalesce[q_num]; - err = idpf_set_q_coalesce(vport, ec, q_num, false); + err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, false); if (err) { idpf_vport_ctrl_unlock(netdev); return err; } - err = idpf_set_q_coalesce(vport, ec, q_num, true); + err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, true); idpf_vport_ctrl_unlock(netdev); @@ -1394,6 +1701,7 @@ static const struct ethtool_ops idpf_ethtool_ops = { .get_sset_count = idpf_get_sset_count, .get_channels = idpf_get_channels, .get_rxnfc = idpf_get_rxnfc, + .set_rxnfc = idpf_set_rxnfc, .get_rxfh_key_size = idpf_get_rxfh_key_size, .get_rxfh_indir_size = idpf_get_rxfh_indir_size, .get_rxfh = idpf_get_rxfh, diff --git a/drivers/net/ethernet/intel/idpf/idpf_idc.c b/drivers/net/ethernet/intel/idpf/idpf_idc.c new file mode 100644 index 000000000000..4d2905103215 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_idc.c @@ -0,0 +1,503 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2025 Intel Corporation */ + +#include <linux/export.h> + +#include "idpf.h" +#include "idpf_virtchnl.h" + +static DEFINE_IDA(idpf_idc_ida); + +#define IDPF_IDC_MAX_ADEV_NAME_LEN 15 + +/** + * idpf_idc_init - Called to initialize IDC + * @adapter: driver private data structure + * + * Return: 0 on success or cap not enabled, error code on failure. + */ +int idpf_idc_init(struct idpf_adapter *adapter) +{ + int err; + + if (!idpf_is_rdma_cap_ena(adapter) || + !adapter->dev_ops.idc_init) + return 0; + + err = adapter->dev_ops.idc_init(adapter); + if (err) + dev_err(&adapter->pdev->dev, "failed to initialize idc: %d\n", + err); + + return err; +} + +/** + * idpf_vport_adev_release - function to be mapped to aux dev's release op + * @dev: pointer to device to free + */ +static void idpf_vport_adev_release(struct device *dev) +{ + struct iidc_rdma_vport_auxiliary_dev *iadev; + + iadev = container_of(dev, struct iidc_rdma_vport_auxiliary_dev, adev.dev); + kfree(iadev); + iadev = NULL; +} + +/** + * idpf_plug_vport_aux_dev - allocate and register a vport Auxiliary device + * @cdev_info: IDC core device info pointer + * @vdev_info: IDC vport device info pointer + * + * Return: 0 on success or error code on failure. + */ +static int idpf_plug_vport_aux_dev(struct iidc_rdma_core_dev_info *cdev_info, + struct iidc_rdma_vport_dev_info *vdev_info) +{ + struct iidc_rdma_vport_auxiliary_dev *iadev; + char name[IDPF_IDC_MAX_ADEV_NAME_LEN]; + struct auxiliary_device *adev; + int ret; + + iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); + if (!iadev) + return -ENOMEM; + + adev = &iadev->adev; + vdev_info->adev = &iadev->adev; + iadev->vdev_info = vdev_info; + + ret = ida_alloc(&idpf_idc_ida, GFP_KERNEL); + if (ret < 0) { + pr_err("failed to allocate unique device ID for Auxiliary driver\n"); + goto err_ida_alloc; + } + adev->id = ret; + adev->dev.release = idpf_vport_adev_release; + adev->dev.parent = &cdev_info->pdev->dev; + sprintf(name, "%04x.rdma.vdev", cdev_info->pdev->vendor); + adev->name = name; + + ret = auxiliary_device_init(adev); + if (ret) + goto err_aux_dev_init; + + ret = auxiliary_device_add(adev); + if (ret) + goto err_aux_dev_add; + + return 0; + +err_aux_dev_add: + auxiliary_device_uninit(adev); +err_aux_dev_init: + ida_free(&idpf_idc_ida, adev->id); +err_ida_alloc: + vdev_info->adev = NULL; + kfree(iadev); + + return ret; +} + +/** + * idpf_idc_init_aux_vport_dev - initialize vport Auxiliary Device(s) + * @vport: virtual port data struct + * + * Return: 0 on success or error code on failure. + */ +static int idpf_idc_init_aux_vport_dev(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct iidc_rdma_vport_dev_info *vdev_info; + struct iidc_rdma_core_dev_info *cdev_info; + struct virtchnl2_create_vport *vport_msg; + int err; + + vport_msg = (struct virtchnl2_create_vport *) + adapter->vport_params_recvd[vport->idx]; + + if (!(le16_to_cpu(vport_msg->vport_flags) & VIRTCHNL2_VPORT_ENABLE_RDMA)) + return 0; + + vport->vdev_info = kzalloc(sizeof(*vdev_info), GFP_KERNEL); + if (!vport->vdev_info) + return -ENOMEM; + + cdev_info = vport->adapter->cdev_info; + + vdev_info = vport->vdev_info; + vdev_info->vport_id = vport->vport_id; + vdev_info->netdev = vport->netdev; + vdev_info->core_adev = cdev_info->adev; + + err = idpf_plug_vport_aux_dev(cdev_info, vdev_info); + if (err) { + vport->vdev_info = NULL; + kfree(vdev_info); + return err; + } + + return 0; +} + +/** + * idpf_idc_vdev_mtu_event - Function to handle IDC vport mtu change events + * @vdev_info: IDC vport device info pointer + * @event_type: type of event to pass to handler + */ +void idpf_idc_vdev_mtu_event(struct iidc_rdma_vport_dev_info *vdev_info, + enum iidc_rdma_event_type event_type) +{ + struct iidc_rdma_vport_auxiliary_drv *iadrv; + struct iidc_rdma_event event = { }; + struct auxiliary_device *adev; + + if (!vdev_info) + /* RDMA is not enabled */ + return; + + set_bit(event_type, event.type); + + device_lock(&vdev_info->adev->dev); + adev = vdev_info->adev; + if (!adev || !adev->dev.driver) + goto unlock; + iadrv = container_of(adev->dev.driver, + struct iidc_rdma_vport_auxiliary_drv, + adrv.driver); + if (iadrv->event_handler) + iadrv->event_handler(vdev_info, &event); +unlock: + device_unlock(&vdev_info->adev->dev); +} + +/** + * idpf_core_adev_release - function to be mapped to aux dev's release op + * @dev: pointer to device to free + */ +static void idpf_core_adev_release(struct device *dev) +{ + struct iidc_rdma_core_auxiliary_dev *iadev; + + iadev = container_of(dev, struct iidc_rdma_core_auxiliary_dev, adev.dev); + kfree(iadev); + iadev = NULL; +} + +/** + * idpf_plug_core_aux_dev - allocate and register an Auxiliary device + * @cdev_info: IDC core device info pointer + * + * Return: 0 on success or error code on failure. + */ +static int idpf_plug_core_aux_dev(struct iidc_rdma_core_dev_info *cdev_info) +{ + struct iidc_rdma_core_auxiliary_dev *iadev; + char name[IDPF_IDC_MAX_ADEV_NAME_LEN]; + struct auxiliary_device *adev; + int ret; + + iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); + if (!iadev) + return -ENOMEM; + + adev = &iadev->adev; + cdev_info->adev = adev; + iadev->cdev_info = cdev_info; + + ret = ida_alloc(&idpf_idc_ida, GFP_KERNEL); + if (ret < 0) { + pr_err("failed to allocate unique device ID for Auxiliary driver\n"); + goto err_ida_alloc; + } + adev->id = ret; + adev->dev.release = idpf_core_adev_release; + adev->dev.parent = &cdev_info->pdev->dev; + sprintf(name, "%04x.rdma.core", cdev_info->pdev->vendor); + adev->name = name; + + ret = auxiliary_device_init(adev); + if (ret) + goto err_aux_dev_init; + + ret = auxiliary_device_add(adev); + if (ret) + goto err_aux_dev_add; + + return 0; + +err_aux_dev_add: + auxiliary_device_uninit(adev); +err_aux_dev_init: + ida_free(&idpf_idc_ida, adev->id); +err_ida_alloc: + cdev_info->adev = NULL; + kfree(iadev); + + return ret; +} + +/** + * idpf_unplug_aux_dev - unregister and free an Auxiliary device + * @adev: auxiliary device struct + */ +static void idpf_unplug_aux_dev(struct auxiliary_device *adev) +{ + if (!adev) + return; + + auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); + + ida_free(&idpf_idc_ida, adev->id); +} + +/** + * idpf_idc_issue_reset_event - Function to handle reset IDC event + * @cdev_info: IDC core device info pointer + */ +void idpf_idc_issue_reset_event(struct iidc_rdma_core_dev_info *cdev_info) +{ + enum iidc_rdma_event_type event_type = IIDC_RDMA_EVENT_WARN_RESET; + struct iidc_rdma_core_auxiliary_drv *iadrv; + struct iidc_rdma_event event = { }; + struct auxiliary_device *adev; + + if (!cdev_info) + /* RDMA is not enabled */ + return; + + set_bit(event_type, event.type); + + device_lock(&cdev_info->adev->dev); + + adev = cdev_info->adev; + if (!adev || !adev->dev.driver) + goto unlock; + + iadrv = container_of(adev->dev.driver, + struct iidc_rdma_core_auxiliary_drv, + adrv.driver); + if (iadrv->event_handler) + iadrv->event_handler(cdev_info, &event); +unlock: + device_unlock(&cdev_info->adev->dev); +} + +/** + * idpf_idc_vport_dev_up - called when CORE is ready for vport aux devs + * @adapter: private data struct + * + * Return: 0 on success or error code on failure. + */ +static int idpf_idc_vport_dev_up(struct idpf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_alloc_vports; i++) { + struct idpf_vport *vport = adapter->vports[i]; + + if (!vport) + continue; + + if (!vport->vdev_info) + err = idpf_idc_init_aux_vport_dev(vport); + else + err = idpf_plug_vport_aux_dev(vport->adapter->cdev_info, + vport->vdev_info); + } + + return err; +} + +/** + * idpf_idc_vport_dev_down - called CORE is leaving vport aux dev support state + * @adapter: private data struct + */ +static void idpf_idc_vport_dev_down(struct idpf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_alloc_vports; i++) { + struct idpf_vport *vport = adapter->vports[i]; + + if (!vport) + continue; + + idpf_unplug_aux_dev(vport->vdev_info->adev); + vport->vdev_info->adev = NULL; + } +} + +/** + * idpf_idc_vport_dev_ctrl - Called by an Auxiliary Driver + * @cdev_info: IDC core device info pointer + * @up: RDMA core driver status + * + * This callback function is accessed by an Auxiliary Driver to indicate + * whether core driver is ready to support vport driver load or if vport + * drivers need to be taken down. + * + * Return: 0 on success or error code on failure. + */ +int idpf_idc_vport_dev_ctrl(struct iidc_rdma_core_dev_info *cdev_info, bool up) +{ + struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev); + + if (up) + return idpf_idc_vport_dev_up(adapter); + + idpf_idc_vport_dev_down(adapter); + + return 0; +} +EXPORT_SYMBOL_GPL(idpf_idc_vport_dev_ctrl); + +/** + * idpf_idc_request_reset - Called by an Auxiliary Driver + * @cdev_info: IDC core device info pointer + * @reset_type: function, core or other + * + * This callback function is accessed by an Auxiliary Driver to request a reset + * on the Auxiliary Device. + * + * Return: 0 on success or error code on failure. + */ +int idpf_idc_request_reset(struct iidc_rdma_core_dev_info *cdev_info, + enum iidc_rdma_reset_type __always_unused reset_type) +{ + struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev); + + if (!idpf_is_reset_in_prog(adapter)) { + set_bit(IDPF_HR_FUNC_RESET, adapter->flags); + queue_delayed_work(adapter->vc_event_wq, + &adapter->vc_event_task, + msecs_to_jiffies(10)); + } + + return 0; +} +EXPORT_SYMBOL_GPL(idpf_idc_request_reset); + +/** + * idpf_idc_init_msix_data - initialize MSIX data for the cdev_info structure + * @adapter: driver private data structure + */ +static void +idpf_idc_init_msix_data(struct idpf_adapter *adapter) +{ + struct iidc_rdma_core_dev_info *cdev_info; + struct iidc_rdma_priv_dev_info *privd; + + if (!adapter->rdma_msix_entries) + return; + + cdev_info = adapter->cdev_info; + privd = cdev_info->iidc_priv; + + privd->msix_entries = adapter->rdma_msix_entries; + privd->msix_count = adapter->num_rdma_msix_entries; +} + +/** + * idpf_idc_init_aux_core_dev - initialize Auxiliary Device(s) + * @adapter: driver private data structure + * @ftype: PF or VF + * + * Return: 0 on success or error code on failure. + */ +int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter, + enum iidc_function_type ftype) +{ + struct iidc_rdma_core_dev_info *cdev_info; + struct iidc_rdma_priv_dev_info *privd; + int err, i; + + adapter->cdev_info = kzalloc(sizeof(*cdev_info), GFP_KERNEL); + if (!adapter->cdev_info) + return -ENOMEM; + cdev_info = adapter->cdev_info; + + privd = kzalloc(sizeof(*privd), GFP_KERNEL); + if (!privd) { + err = -ENOMEM; + goto err_privd_alloc; + } + + cdev_info->iidc_priv = privd; + cdev_info->pdev = adapter->pdev; + cdev_info->rdma_protocol = IIDC_RDMA_PROTOCOL_ROCEV2; + privd->ftype = ftype; + + privd->mapped_mem_regions = + kcalloc(adapter->hw.num_lan_regs, + sizeof(struct iidc_rdma_lan_mapped_mem_region), + GFP_KERNEL); + if (!privd->mapped_mem_regions) { + err = -ENOMEM; + goto err_plug_aux_dev; + } + + privd->num_memory_regions = cpu_to_le16(adapter->hw.num_lan_regs); + for (i = 0; i < adapter->hw.num_lan_regs; i++) { + privd->mapped_mem_regions[i].region_addr = + adapter->hw.lan_regs[i].vaddr; + privd->mapped_mem_regions[i].size = + cpu_to_le64(adapter->hw.lan_regs[i].addr_len); + privd->mapped_mem_regions[i].start_offset = + cpu_to_le64(adapter->hw.lan_regs[i].addr_start); + } + + idpf_idc_init_msix_data(adapter); + + err = idpf_plug_core_aux_dev(cdev_info); + if (err) + goto err_free_mem_regions; + + return 0; + +err_free_mem_regions: + kfree(privd->mapped_mem_regions); + privd->mapped_mem_regions = NULL; +err_plug_aux_dev: + kfree(privd); +err_privd_alloc: + kfree(cdev_info); + adapter->cdev_info = NULL; + + return err; +} + +/** + * idpf_idc_deinit_core_aux_device - de-initialize Auxiliary Device(s) + * @cdev_info: IDC core device info pointer + */ +void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info) +{ + struct iidc_rdma_priv_dev_info *privd; + + if (!cdev_info) + return; + + idpf_unplug_aux_dev(cdev_info->adev); + + privd = cdev_info->iidc_priv; + kfree(privd->mapped_mem_regions); + kfree(privd); + kfree(cdev_info); +} + +/** + * idpf_idc_deinit_vport_aux_device - de-initialize Auxiliary Device(s) + * @vdev_info: IDC vport device info pointer + */ +void idpf_idc_deinit_vport_aux_device(struct iidc_rdma_vport_dev_info *vdev_info) +{ + if (!vdev_info) + return; + + idpf_unplug_aux_dev(vdev_info->adev); + + kfree(vdev_info); +} diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 4eb20ec2accb..2c2a3e85d693 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -88,6 +88,8 @@ void idpf_intr_rel(struct idpf_adapter *adapter) idpf_deinit_vector_stack(adapter); kfree(adapter->msix_entries); adapter->msix_entries = NULL; + kfree(adapter->rdma_msix_entries); + adapter->rdma_msix_entries = NULL; } /** @@ -299,13 +301,33 @@ rel_lock: */ int idpf_intr_req(struct idpf_adapter *adapter) { + u16 num_lan_vecs, min_lan_vecs, num_rdma_vecs = 0, min_rdma_vecs = 0; u16 default_vports = idpf_get_default_vports(adapter); int num_q_vecs, total_vecs, num_vec_ids; - int min_vectors, v_actual, err; + int min_vectors, actual_vecs, err; unsigned int vector; u16 *vecids; + int i; total_vecs = idpf_get_reserved_vecs(adapter); + num_lan_vecs = total_vecs; + if (idpf_is_rdma_cap_ena(adapter)) { + num_rdma_vecs = idpf_get_reserved_rdma_vecs(adapter); + min_rdma_vecs = IDPF_MIN_RDMA_VEC; + + if (!num_rdma_vecs) { + /* If idpf_get_reserved_rdma_vecs is 0, vectors are + * pulled from the LAN pool. + */ + num_rdma_vecs = min_rdma_vecs; + } else if (num_rdma_vecs < min_rdma_vecs) { + dev_err(&adapter->pdev->dev, + "Not enough vectors reserved for RDMA (min: %u, current: %u)\n", + min_rdma_vecs, num_rdma_vecs); + return -EINVAL; + } + } + num_q_vecs = total_vecs - IDPF_MBX_Q_VEC; err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs); @@ -316,52 +338,76 @@ int idpf_intr_req(struct idpf_adapter *adapter) return -EAGAIN; } - min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports; - v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors, - total_vecs, PCI_IRQ_MSIX); - if (v_actual < min_vectors) { - dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n", - v_actual); - err = -EAGAIN; + min_lan_vecs = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports; + min_vectors = min_lan_vecs + min_rdma_vecs; + actual_vecs = pci_alloc_irq_vectors(adapter->pdev, min_vectors, + total_vecs, PCI_IRQ_MSIX); + if (actual_vecs < 0) { + dev_err(&adapter->pdev->dev, "Failed to allocate minimum MSIX vectors required: %d\n", + min_vectors); + err = actual_vecs; goto send_dealloc_vecs; } - adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry), - GFP_KERNEL); + if (idpf_is_rdma_cap_ena(adapter)) { + if (actual_vecs < total_vecs) { + dev_warn(&adapter->pdev->dev, + "Warning: %d vectors requested, only %d available. Defaulting to minimum (%d) for RDMA and remaining for LAN.\n", + total_vecs, actual_vecs, IDPF_MIN_RDMA_VEC); + num_rdma_vecs = IDPF_MIN_RDMA_VEC; + } + adapter->rdma_msix_entries = kcalloc(num_rdma_vecs, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!adapter->rdma_msix_entries) { + err = -ENOMEM; + goto free_irq; + } + } + + num_lan_vecs = actual_vecs - num_rdma_vecs; + adapter->msix_entries = kcalloc(num_lan_vecs, sizeof(struct msix_entry), + GFP_KERNEL); if (!adapter->msix_entries) { err = -ENOMEM; - goto free_irq; + goto free_rdma_msix; } adapter->mb_vector.v_idx = le16_to_cpu(adapter->caps.mailbox_vector_id); - vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL); + vecids = kcalloc(actual_vecs, sizeof(u16), GFP_KERNEL); if (!vecids) { err = -ENOMEM; goto free_msix; } - num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs, + num_vec_ids = idpf_get_vec_ids(adapter, vecids, actual_vecs, &adapter->req_vec_chunks->vchunks); - if (num_vec_ids < v_actual) { + if (num_vec_ids < actual_vecs) { err = -EINVAL; goto free_vecids; } - for (vector = 0; vector < v_actual; vector++) { + for (vector = 0; vector < num_lan_vecs; vector++) { adapter->msix_entries[vector].entry = vecids[vector]; adapter->msix_entries[vector].vector = pci_irq_vector(adapter->pdev, vector); } + for (i = 0; i < num_rdma_vecs; vector++, i++) { + adapter->rdma_msix_entries[i].entry = vecids[vector]; + adapter->rdma_msix_entries[i].vector = + pci_irq_vector(adapter->pdev, vector); + } - adapter->num_req_msix = total_vecs; - adapter->num_msix_entries = v_actual; /* 'num_avail_msix' is used to distribute excess vectors to the vports * after considering the minimum vectors required per each default * vport */ - adapter->num_avail_msix = v_actual - min_vectors; + adapter->num_avail_msix = num_lan_vecs - min_lan_vecs; + adapter->num_msix_entries = num_lan_vecs; + if (idpf_is_rdma_cap_ena(adapter)) + adapter->num_rdma_msix_entries = num_rdma_vecs; /* Fill MSIX vector lifo stack with vector indexes */ err = idpf_init_vector_stack(adapter); @@ -383,6 +429,9 @@ free_vecids: free_msix: kfree(adapter->msix_entries); adapter->msix_entries = NULL; +free_rdma_msix: + kfree(adapter->rdma_msix_entries); + adapter->rdma_msix_entries = NULL; free_irq: pci_free_irq_vectors(adapter->pdev); send_dealloc_vecs: @@ -755,6 +804,10 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) dflt_features |= NETIF_F_RXHASH; + if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, + VIRTCHNL2_CAP_FLOW_STEER) && + idpf_vport_is_cap_ena(vport, VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER)) + dflt_features |= NETIF_F_NTUPLE; if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4)) csum_offloads |= NETIF_F_IP_CSUM; if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6)) @@ -972,6 +1025,8 @@ static void idpf_vport_dealloc(struct idpf_vport *vport) struct idpf_adapter *adapter = vport->adapter; unsigned int i = vport->idx; + idpf_idc_deinit_vport_aux_device(vport->vdev_info); + idpf_deinit_mac_addr(vport); idpf_vport_stop(vport); @@ -1079,8 +1134,10 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, if (!vport) return vport; + num_max_q = max(max_q->max_txq, max_q->max_rxq); if (!adapter->vport_config[idx]) { struct idpf_vport_config *vport_config; + struct idpf_q_coalesce *q_coal; vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL); if (!vport_config) { @@ -1089,6 +1146,21 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, return NULL; } + q_coal = kcalloc(num_max_q, sizeof(*q_coal), GFP_KERNEL); + if (!q_coal) { + kfree(vport_config); + kfree(vport); + + return NULL; + } + for (int i = 0; i < num_max_q; i++) { + q_coal[i].tx_intr_mode = IDPF_ITR_DYNAMIC; + q_coal[i].tx_coalesce_usecs = IDPF_ITR_TX_DEF; + q_coal[i].rx_intr_mode = IDPF_ITR_DYNAMIC; + q_coal[i].rx_coalesce_usecs = IDPF_ITR_RX_DEF; + } + vport_config->user_config.q_coalesce = q_coal; + adapter->vport_config[idx] = vport_config; } @@ -1098,7 +1170,6 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, vport->default_vport = adapter->num_alloc_vports < idpf_get_default_vports(adapter); - num_max_q = max(max_q->max_txq, max_q->max_rxq); vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL); if (!vport->q_vector_idxs) goto free_vport; @@ -1481,6 +1552,7 @@ void idpf_init_task(struct work_struct *work) spin_lock_init(&vport_config->mac_filter_list_lock); INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list); + INIT_LIST_HEAD(&vport_config->user_config.flow_steer_list); err = idpf_check_supported_desc_ids(vport); if (err) { @@ -1738,6 +1810,8 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter) } else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) { bool is_reset = idpf_is_reset_detected(adapter); + idpf_idc_issue_reset_event(adapter->cdev_info); + idpf_set_vport_state(adapter); idpf_vc_core_deinit(adapter); if (!is_reset) @@ -1785,6 +1859,10 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter) unlock_mutex: mutex_unlock(&adapter->vport_ctrl_lock); + /* Wait until all vports are created to init RDMA CORE AUX */ + if (!err) + err = idpf_idc_init(adapter); + return err; } @@ -1868,6 +1946,9 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, idpf_vport_calc_num_q_desc(new_vport); break; case IDPF_SR_MTU_CHANGE: + idpf_idc_vdev_mtu_event(vport->vdev_info, + IIDC_RDMA_EVENT_BEFORE_MTU_CHANGE); + break; case IDPF_SR_RSC_CHANGE: break; default: @@ -1912,9 +1993,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, if (current_state == __IDPF_VPORT_UP) err = idpf_vport_open(vport); - kfree(new_vport); - - return err; + goto free_vport; err_reset: idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq, @@ -1927,6 +2006,10 @@ err_open: free_vport: kfree(new_vport); + if (reset_cause == IDPF_SR_MTU_CHANGE) + idpf_idc_vdev_mtu_event(vport->vdev_info, + IIDC_RDMA_EVENT_AFTER_MTU_CHANGE); + return err; } @@ -2314,8 +2397,12 @@ void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size) struct idpf_adapter *adapter = hw->back; size_t sz = ALIGN(size, 4096); - mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz, - &mem->pa, GFP_KERNEL); + /* The control queue resources are freed under a spinlock, contiguous + * pages will avoid IOMMU remapping and the use vmap (and vunmap in + * dma_free_*() path. + */ + mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa, + GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS); mem->size = sz; return mem->va; @@ -2330,8 +2417,8 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem) { struct idpf_adapter *adapter = hw->back; - dma_free_coherent(&adapter->pdev->dev, mem->size, - mem->va, mem->pa); + dma_free_attrs(&adapter->pdev->dev, mem->size, + mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS); mem->size = 0; mem->va = NULL; mem->pa = 0; diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c index 0efd9c0c7a90..dfe9126f1f4a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_main.c +++ b/drivers/net/ethernet/intel/idpf/idpf_main.c @@ -62,6 +62,7 @@ destroy_wqs: destroy_workqueue(adapter->vc_event_wq); for (i = 0; i < adapter->max_vports; i++) { + kfree(adapter->vport_config[i]->user_config.q_coalesce); kfree(adapter->vport_config[i]); adapter->vport_config[i] = NULL; } @@ -106,15 +107,37 @@ static void idpf_shutdown(struct pci_dev *pdev) */ static int idpf_cfg_hw(struct idpf_adapter *adapter) { + resource_size_t res_start, mbx_start, rstat_start; struct pci_dev *pdev = adapter->pdev; struct idpf_hw *hw = &adapter->hw; + struct device *dev = &pdev->dev; + long len; + + res_start = pci_resource_start(pdev, 0); + + /* Map mailbox space for virtchnl communication */ + mbx_start = res_start + adapter->dev_ops.static_reg_info[0].start; + len = resource_size(&adapter->dev_ops.static_reg_info[0]); + hw->mbx.vaddr = devm_ioremap(dev, mbx_start, len); + if (!hw->mbx.vaddr) { + pci_err(pdev, "failed to allocate BAR0 mbx region\n"); + + return -ENOMEM; + } + hw->mbx.addr_start = adapter->dev_ops.static_reg_info[0].start; + hw->mbx.addr_len = len; - hw->hw_addr = pcim_iomap_table(pdev)[0]; - if (!hw->hw_addr) { - pci_err(pdev, "failed to allocate PCI iomap table\n"); + /* Map rstat space for resets */ + rstat_start = res_start + adapter->dev_ops.static_reg_info[1].start; + len = resource_size(&adapter->dev_ops.static_reg_info[1]); + hw->rstat.vaddr = devm_ioremap(dev, rstat_start, len); + if (!hw->rstat.vaddr) { + pci_err(pdev, "failed to allocate BAR0 rstat region\n"); return -ENOMEM; } + hw->rstat.addr_start = adapter->dev_ops.static_reg_info[1].start; + hw->rstat.addr_len = len; hw->back = adapter; @@ -161,9 +184,9 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_free; - err = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); + err = pcim_request_region(pdev, 0, pci_name(pdev)); if (err) { - pci_err(pdev, "pcim_iomap_regions failed %pe\n", ERR_PTR(err)); + pci_err(pdev, "pcim_request_region failed %pe\n", ERR_PTR(err)); goto err_free; } diff --git a/drivers/net/ethernet/intel/idpf/idpf_mem.h b/drivers/net/ethernet/intel/idpf/idpf_mem.h index b21a04fccf0f..2aaabdc02dd2 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_mem.h +++ b/drivers/net/ethernet/intel/idpf/idpf_mem.h @@ -12,9 +12,9 @@ struct idpf_dma_mem { size_t size; }; -#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) -#define rd32(a, reg) readl((a)->hw_addr + (reg)) -#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) -#define rd64(a, reg) readq((a)->hw_addr + (reg)) +#define idpf_mbx_wr32(a, reg, value) writel((value), ((a)->mbx.vaddr + (reg))) +#define idpf_mbx_rd32(a, reg) readl((a)->mbx.vaddr + (reg)) +#define idpf_mbx_wr64(a, reg, value) writeq((value), ((a)->mbx.vaddr + (reg))) +#define idpf_mbx_rd64(a, reg) readq((a)->mbx.vaddr + (reg)) #endif /* _IDPF_MEM_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c index 4f8725c85332..ee21f2ff0cad 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ptp.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c @@ -42,6 +42,13 @@ void idpf_ptp_get_features_access(const struct idpf_adapter *adapter) direct, mailbox); + /* Get the cross timestamp */ + direct = VIRTCHNL2_CAP_PTP_GET_CROSS_TIME; + mailbox = VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB; + ptp->get_cross_tstamp_access = idpf_ptp_get_access(adapter, + direct, + mailbox); + /* Set the device clock time */ direct = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME; mailbox = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME; @@ -171,6 +178,127 @@ static int idpf_ptp_read_src_clk_reg(struct idpf_adapter *adapter, u64 *src_clk, return 0; } +#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER) || IS_ENABLED(CONFIG_X86) +/** + * idpf_ptp_get_sync_device_time_direct - Get the cross time stamp values + * directly + * @adapter: Driver specific private structure + * @dev_time: 64bit main timer value + * @sys_time: 64bit system time value + */ +static void idpf_ptp_get_sync_device_time_direct(struct idpf_adapter *adapter, + u64 *dev_time, u64 *sys_time) +{ + u32 dev_time_lo, dev_time_hi, sys_time_lo, sys_time_hi; + struct idpf_ptp *ptp = adapter->ptp; + + spin_lock(&ptp->read_dev_clk_lock); + + idpf_ptp_enable_shtime(adapter); + + dev_time_lo = readl(ptp->dev_clk_regs.dev_clk_ns_l); + dev_time_hi = readl(ptp->dev_clk_regs.dev_clk_ns_h); + + sys_time_lo = readl(ptp->dev_clk_regs.sys_time_ns_l); + sys_time_hi = readl(ptp->dev_clk_regs.sys_time_ns_h); + + spin_unlock(&ptp->read_dev_clk_lock); + + *dev_time = (u64)dev_time_hi << 32 | dev_time_lo; + *sys_time = (u64)sys_time_hi << 32 | sys_time_lo; +} + +/** + * idpf_ptp_get_sync_device_time_mailbox - Get the cross time stamp values + * through mailbox + * @adapter: Driver specific private structure + * @dev_time: 64bit main timer value expressed in nanoseconds + * @sys_time: 64bit system time value expressed in nanoseconds + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_get_sync_device_time_mailbox(struct idpf_adapter *adapter, + u64 *dev_time, u64 *sys_time) +{ + struct idpf_ptp_dev_timers cross_time; + int err; + + err = idpf_ptp_get_cross_time(adapter, &cross_time); + if (err) + return err; + + *dev_time = cross_time.dev_clk_time_ns; + *sys_time = cross_time.sys_time_ns; + + return err; +} + +/** + * idpf_ptp_get_sync_device_time - Get the cross time stamp info + * @device: Current device time + * @system: System counter value read synchronously with device time + * @ctx: Context provided by timekeeping code + * + * The device and the system clocks time read simultaneously. + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_get_sync_device_time(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + struct idpf_adapter *adapter = ctx; + u64 ns_time_dev, ns_time_sys; + int err; + + switch (adapter->ptp->get_cross_tstamp_access) { + case IDPF_PTP_NONE: + return -EOPNOTSUPP; + case IDPF_PTP_DIRECT: + idpf_ptp_get_sync_device_time_direct(adapter, &ns_time_dev, + &ns_time_sys); + break; + case IDPF_PTP_MAILBOX: + err = idpf_ptp_get_sync_device_time_mailbox(adapter, + &ns_time_dev, + &ns_time_sys); + if (err) + return err; + break; + default: + return -EOPNOTSUPP; + } + + *device = ns_to_ktime(ns_time_dev); + + system->cs_id = IS_ENABLED(CONFIG_X86) ? CSID_X86_ART + : CSID_ARM_ARCH_COUNTER; + system->cycles = ns_time_sys; + system->use_nsecs = true; + + return 0; +} + +/** + * idpf_ptp_get_crosststamp - Capture a device cross timestamp + * @info: the driver's PTP info structure + * @cts: The memory to fill the cross timestamp info + * + * Capture a cross timestamp between the system time and the device PTP hardware + * clock. + * + * Return: cross timestamp value on success, -errno on failure. + */ +static int idpf_ptp_get_crosststamp(struct ptp_clock_info *info, + struct system_device_crosststamp *cts) +{ + struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info); + + return get_device_system_crosststamp(idpf_ptp_get_sync_device_time, + adapter, NULL, cts); +} +#endif /* CONFIG_ARM_ARCH_TIMER || CONFIG_X86 */ + /** * idpf_ptp_gettimex64 - Get the time of the clock * @info: the driver's PTP info structure @@ -661,6 +789,14 @@ static void idpf_ptp_set_caps(const struct idpf_adapter *adapter) info->verify = idpf_ptp_verify_pin; info->enable = idpf_ptp_gpio_enable; info->do_aux_work = idpf_ptp_do_aux_work; +#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER) + info->getcrosststamp = idpf_ptp_get_crosststamp; +#elif IS_ENABLED(CONFIG_X86) + if (pcie_ptm_enabled(adapter->pdev) && + boot_cpu_has(X86_FEATURE_ART) && + boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) + info->getcrosststamp = idpf_ptp_get_crosststamp; +#endif /* CONFIG_ARM_ARCH_TIMER */ } /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.h b/drivers/net/ethernet/intel/idpf/idpf_ptp.h index a876749d6116..785da03e4cf5 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ptp.h +++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.h @@ -21,6 +21,8 @@ struct idpf_ptp_cmd { * @dev_clk_ns_h: high part of the device clock register * @phy_clk_ns_l: low part of the PHY clock register * @phy_clk_ns_h: high part of the PHY clock register + * @sys_time_ns_l: low part of the system time register + * @sys_time_ns_h: high part of the system time register * @incval_l: low part of the increment value register * @incval_h: high part of the increment value register * @shadj_l: low part of the shadow adjust register @@ -42,6 +44,10 @@ struct idpf_ptp_dev_clk_regs { void __iomem *phy_clk_ns_l; void __iomem *phy_clk_ns_h; + /* System time */ + void __iomem *sys_time_ns_l; + void __iomem *sys_time_ns_h; + /* Main timer adjustments */ void __iomem *incval_l; void __iomem *incval_h; @@ -162,6 +168,7 @@ struct idpf_ptp_vport_tx_tstamp_caps { * @dev_clk_regs: the set of registers to access the device clock * @caps: PTP capabilities negotiated with the Control Plane * @get_dev_clk_time_access: access type for getting the device clock time + * @get_cross_tstamp_access: access type for the cross timestamping * @set_dev_clk_time_access: access type for setting the device clock time * @adj_dev_clk_time_access: access type for the adjusting the device clock * @tx_tstamp_access: access type for the Tx timestamp value read @@ -182,6 +189,7 @@ struct idpf_ptp { struct idpf_ptp_dev_clk_regs dev_clk_regs; u32 caps; enum idpf_ptp_access get_dev_clk_time_access:2; + enum idpf_ptp_access get_cross_tstamp_access:2; enum idpf_ptp_access set_dev_clk_time_access:2; enum idpf_ptp_access adj_dev_clk_time_access:2; enum idpf_ptp_access tx_tstamp_access:2; @@ -264,6 +272,8 @@ void idpf_ptp_get_features_access(const struct idpf_adapter *adapter); bool idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq); int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter, struct idpf_ptp_dev_timers *dev_clk_time); +int idpf_ptp_get_cross_time(struct idpf_adapter *adapter, + struct idpf_ptp_dev_timers *cross_time); int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time); int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval); int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta); @@ -305,6 +315,13 @@ idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter, return -EOPNOTSUPP; } +static inline int +idpf_ptp_get_cross_time(struct idpf_adapter *adapter, + struct idpf_ptp_dev_timers *cross_time) +{ + return -EOPNOTSUPP; +} + static inline int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time) { diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c index 993c354aa27a..555879b1248d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -1006,7 +1006,7 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget) break; skip_data: - rx_buf->page = NULL; + rx_buf->netmem = 0; IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc); cleaned_count++; diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 5cf440e09d0a..66a1b040639d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -383,12 +383,12 @@ err_out: */ static void idpf_rx_page_rel(struct libeth_fqe *rx_buf) { - if (unlikely(!rx_buf->page)) + if (unlikely(!rx_buf->netmem)) return; - page_pool_put_full_page(rx_buf->page->pp, rx_buf->page, false); + libeth_rx_recycle_slow(rx_buf->netmem); - rx_buf->page = NULL; + rx_buf->netmem = 0; rx_buf->offset = 0; } @@ -3240,10 +3240,10 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb, void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb, unsigned int size) { - u32 hr = rx_buf->page->pp->p.offset; + u32 hr = netmem_get_pp(rx_buf->netmem)->p.offset; - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, - rx_buf->offset + hr, size, rx_buf->truesize); + skb_add_rx_frag_netmem(skb, skb_shinfo(skb)->nr_frags, rx_buf->netmem, + rx_buf->offset + hr, size, rx_buf->truesize); } /** @@ -3266,16 +3266,22 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr, struct libeth_fqe *buf, u32 data_len) { u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN; + struct page *hdr_page, *buf_page; const void *src; void *dst; - if (!libeth_rx_sync_for_cpu(buf, copy)) + if (unlikely(netmem_is_net_iov(buf->netmem)) || + !libeth_rx_sync_for_cpu(buf, copy)) return 0; - dst = page_address(hdr->page) + hdr->offset + hdr->page->pp->p.offset; - src = page_address(buf->page) + buf->offset + buf->page->pp->p.offset; - memcpy(dst, src, LARGEST_ALIGN(copy)); + hdr_page = __netmem_to_page(hdr->netmem); + buf_page = __netmem_to_page(buf->netmem); + dst = page_address(hdr_page) + hdr->offset + + pp_page_to_nmdesc(hdr_page)->pp->p.offset; + src = page_address(buf_page) + buf->offset + + pp_page_to_nmdesc(buf_page)->pp->p.offset; + memcpy(dst, src, LARGEST_ALIGN(copy)); buf->offset += copy; return copy; @@ -3291,11 +3297,12 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr, */ struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size) { - u32 hr = buf->page->pp->p.offset; + struct page *buf_page = __netmem_to_page(buf->netmem); + u32 hr = pp_page_to_nmdesc(buf_page)->pp->p.offset; struct sk_buff *skb; void *va; - va = page_address(buf->page) + buf->offset; + va = page_address(buf_page) + buf->offset; prefetch(va + hr); skb = napi_build_skb(va, buf->truesize); @@ -3429,7 +3436,8 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget) if (unlikely(!hdr_len && !skb)) { hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len); - pkt_len -= hdr_len; + /* If failed, drop both buffers by setting len to 0 */ + pkt_len -= hdr_len ? : pkt_len; u64_stats_update_begin(&rxq->stats_sync); u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf); @@ -3446,7 +3454,7 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget) u64_stats_update_end(&rxq->stats_sync); } - hdr->page = NULL; + hdr->netmem = 0; payload: if (!libeth_rx_sync_for_cpu(rx_buf, pkt_len)) @@ -3462,7 +3470,7 @@ payload: break; skip_data: - rx_buf->page = NULL; + rx_buf->netmem = 0; idpf_rx_post_buf_refill(refillq, buf_id); IDPF_RX_BUMP_NTC(rxq, ntc); @@ -4349,9 +4357,13 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport) int idpf_vport_intr_alloc(struct idpf_vport *vport) { u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector; + struct idpf_vport_user_config_data *user_config; struct idpf_q_vector *q_vector; + struct idpf_q_coalesce *q_coal; u32 complqs_per_vector, v_idx; + u16 idx = vport->idx; + user_config = &vport->adapter->vport_config[idx]->user_config; vport->q_vectors = kcalloc(vport->num_q_vectors, sizeof(struct idpf_q_vector), GFP_KERNEL); if (!vport->q_vectors) @@ -4369,14 +4381,15 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport) for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { q_vector = &vport->q_vectors[v_idx]; + q_coal = &user_config->q_coalesce[v_idx]; q_vector->vport = vport; - q_vector->tx_itr_value = IDPF_ITR_TX_DEF; - q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC; + q_vector->tx_itr_value = q_coal->tx_coalesce_usecs; + q_vector->tx_intr_mode = q_coal->tx_intr_mode; q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1; - q_vector->rx_itr_value = IDPF_ITR_RX_DEF; - q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC; + q_vector->rx_itr_value = q_coal->rx_coalesce_usecs; + q_vector->rx_intr_mode = q_coal->rx_intr_mode; q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0; q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx), diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index 36a0f828a6f8..281de655a813 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -57,6 +57,7 @@ /* Default vector sharing */ #define IDPF_MBX_Q_VEC 1 #define IDPF_MIN_Q_VEC 1 +#define IDPF_MIN_RDMA_VEC 2 #define IDPF_DFLT_TX_Q_DESC_COUNT 512 #define IDPF_DFLT_TX_COMPLQ_DESC_COUNT 512 diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c index aba828abcb17..259d50fded67 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c +++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c @@ -9,10 +9,13 @@ /** * idpf_vf_ctlq_reg_init - initialize default mailbox registers + * @adapter: adapter structure * @cq: pointer to the array of create control queues */ -static void idpf_vf_ctlq_reg_init(struct idpf_ctlq_create_info *cq) +static void idpf_vf_ctlq_reg_init(struct idpf_adapter *adapter, + struct idpf_ctlq_create_info *cq) { + resource_size_t mbx_start = adapter->dev_ops.static_reg_info[0].start; int i; for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) { @@ -21,22 +24,22 @@ static void idpf_vf_ctlq_reg_init(struct idpf_ctlq_create_info *cq) switch (ccq->type) { case IDPF_CTLQ_TYPE_MAILBOX_TX: /* set head and tail registers in our local struct */ - ccq->reg.head = VF_ATQH; - ccq->reg.tail = VF_ATQT; - ccq->reg.len = VF_ATQLEN; - ccq->reg.bah = VF_ATQBAH; - ccq->reg.bal = VF_ATQBAL; + ccq->reg.head = VF_ATQH - mbx_start; + ccq->reg.tail = VF_ATQT - mbx_start; + ccq->reg.len = VF_ATQLEN - mbx_start; + ccq->reg.bah = VF_ATQBAH - mbx_start; + ccq->reg.bal = VF_ATQBAL - mbx_start; ccq->reg.len_mask = VF_ATQLEN_ATQLEN_M; ccq->reg.len_ena_mask = VF_ATQLEN_ATQENABLE_M; ccq->reg.head_mask = VF_ATQH_ATQH_M; break; case IDPF_CTLQ_TYPE_MAILBOX_RX: /* set head and tail registers in our local struct */ - ccq->reg.head = VF_ARQH; - ccq->reg.tail = VF_ARQT; - ccq->reg.len = VF_ARQLEN; - ccq->reg.bah = VF_ARQBAH; - ccq->reg.bal = VF_ARQBAL; + ccq->reg.head = VF_ARQH - mbx_start; + ccq->reg.tail = VF_ARQT - mbx_start; + ccq->reg.len = VF_ARQLEN - mbx_start; + ccq->reg.bah = VF_ARQBAH - mbx_start; + ccq->reg.bal = VF_ARQBAL - mbx_start; ccq->reg.len_mask = VF_ARQLEN_ARQLEN_M; ccq->reg.len_ena_mask = VF_ARQLEN_ARQENABLE_M; ccq->reg.head_mask = VF_ARQH_ARQH_M; @@ -129,7 +132,7 @@ free_reg_vals: */ static void idpf_vf_reset_reg_init(struct idpf_adapter *adapter) { - adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, VFGEN_RSTAT); + adapter->reset_reg.rstat = idpf_get_rstat_reg_addr(adapter, VFGEN_RSTAT); adapter->reset_reg.rstat_m = VFGEN_RSTAT_VFR_STATE_M; } @@ -148,6 +151,17 @@ static void idpf_vf_trigger_reset(struct idpf_adapter *adapter, } /** + * idpf_idc_vf_register - register for IDC callbacks + * @adapter: Driver specific private structure + * + * Return: 0 on success or error code on failure. + */ +static int idpf_idc_vf_register(struct idpf_adapter *adapter) +{ + return idpf_idc_init_aux_core_dev(adapter, IIDC_FUNCTION_TYPE_VF); +} + +/** * idpf_vf_reg_ops_init - Initialize register API function pointers * @adapter: Driver specific private structure */ @@ -167,4 +181,11 @@ static void idpf_vf_reg_ops_init(struct idpf_adapter *adapter) void idpf_vf_dev_ops_init(struct idpf_adapter *adapter) { idpf_vf_reg_ops_init(adapter); + + adapter->dev_ops.idc_init = idpf_idc_vf_register; + + resource_set_range(&adapter->dev_ops.static_reg_info[0], + VF_BASE, IDPF_VF_MBX_REGION_SZ); + resource_set_range(&adapter->dev_ops.static_reg_info[1], + VFGEN_RSTAT, IDPF_VF_RSTAT_REGION_SZ); } diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index 24febaaa8fbb..a028c69f7fdc 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2023 Intel Corporation */ +#include <linux/export.h> #include <net/libeth/rx.h> #include "idpf.h" @@ -849,14 +850,14 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL); caps.rss_caps = - cpu_to_le64(VIRTCHNL2_CAP_RSS_IPV4_TCP | - VIRTCHNL2_CAP_RSS_IPV4_UDP | - VIRTCHNL2_CAP_RSS_IPV4_SCTP | - VIRTCHNL2_CAP_RSS_IPV4_OTHER | - VIRTCHNL2_CAP_RSS_IPV6_TCP | - VIRTCHNL2_CAP_RSS_IPV6_UDP | - VIRTCHNL2_CAP_RSS_IPV6_SCTP | - VIRTCHNL2_CAP_RSS_IPV6_OTHER); + cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP | + VIRTCHNL2_FLOW_IPV4_UDP | + VIRTCHNL2_FLOW_IPV4_SCTP | + VIRTCHNL2_FLOW_IPV4_OTHER | + VIRTCHNL2_FLOW_IPV6_TCP | + VIRTCHNL2_FLOW_IPV6_UDP | + VIRTCHNL2_FLOW_IPV6_SCTP | + VIRTCHNL2_FLOW_IPV6_OTHER); caps.hsplit_caps = cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 | @@ -868,6 +869,8 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) caps.other_caps = cpu_to_le64(VIRTCHNL2_CAP_SRIOV | + VIRTCHNL2_CAP_RDMA | + VIRTCHNL2_CAP_LAN_MEMORY_REGIONS | VIRTCHNL2_CAP_MACFILTER | VIRTCHNL2_CAP_SPLITQ_QSCHED | VIRTCHNL2_CAP_PROMISC | @@ -891,6 +894,163 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) } /** + * idpf_send_get_lan_memory_regions - Send virtchnl get LAN memory regions msg + * @adapter: Driver specific private struct + * + * Return: 0 on success or error code on failure. + */ +static int idpf_send_get_lan_memory_regions(struct idpf_adapter *adapter) +{ + struct virtchnl2_get_lan_memory_regions *rcvd_regions __free(kfree); + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS, + .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN, + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + int num_regions, size; + struct idpf_hw *hw; + ssize_t reply_sz; + int err = 0; + + rcvd_regions = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); + if (!rcvd_regions) + return -ENOMEM; + + xn_params.recv_buf.iov_base = rcvd_regions; + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + + num_regions = le16_to_cpu(rcvd_regions->num_memory_regions); + size = struct_size(rcvd_regions, mem_reg, num_regions); + if (reply_sz < size) + return -EIO; + + if (size > IDPF_CTLQ_MAX_BUF_LEN) + return -EINVAL; + + hw = &adapter->hw; + hw->lan_regs = kcalloc(num_regions, sizeof(*hw->lan_regs), GFP_KERNEL); + if (!hw->lan_regs) + return -ENOMEM; + + for (int i = 0; i < num_regions; i++) { + hw->lan_regs[i].addr_len = + le64_to_cpu(rcvd_regions->mem_reg[i].size); + hw->lan_regs[i].addr_start = + le64_to_cpu(rcvd_regions->mem_reg[i].start_offset); + } + hw->num_lan_regs = num_regions; + + return err; +} + +/** + * idpf_calc_remaining_mmio_regs - calculate MMIO regions outside mbx and rstat + * @adapter: Driver specific private structure + * + * Called when idpf_send_get_lan_memory_regions is not supported. This will + * calculate the offsets and sizes for the regions before, in between, and + * after the mailbox and rstat MMIO mappings. + * + * Return: 0 on success or error code on failure. + */ +static int idpf_calc_remaining_mmio_regs(struct idpf_adapter *adapter) +{ + struct resource *rstat_reg = &adapter->dev_ops.static_reg_info[1]; + struct resource *mbx_reg = &adapter->dev_ops.static_reg_info[0]; + struct idpf_hw *hw = &adapter->hw; + + hw->num_lan_regs = IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING; + hw->lan_regs = kcalloc(hw->num_lan_regs, sizeof(*hw->lan_regs), + GFP_KERNEL); + if (!hw->lan_regs) + return -ENOMEM; + + /* Region preceding mailbox */ + hw->lan_regs[0].addr_start = 0; + hw->lan_regs[0].addr_len = mbx_reg->start; + /* Region between mailbox and rstat */ + hw->lan_regs[1].addr_start = mbx_reg->end + 1; + hw->lan_regs[1].addr_len = rstat_reg->start - + hw->lan_regs[1].addr_start; + /* Region after rstat */ + hw->lan_regs[2].addr_start = rstat_reg->end + 1; + hw->lan_regs[2].addr_len = pci_resource_len(adapter->pdev, 0) - + hw->lan_regs[2].addr_start; + + return 0; +} + +/** + * idpf_map_lan_mmio_regs - map remaining LAN BAR regions + * @adapter: Driver specific private structure + * + * Return: 0 on success or error code on failure. + */ +static int idpf_map_lan_mmio_regs(struct idpf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct idpf_hw *hw = &adapter->hw; + resource_size_t res_start; + + res_start = pci_resource_start(pdev, 0); + + for (int i = 0; i < hw->num_lan_regs; i++) { + resource_size_t start; + long len; + + len = hw->lan_regs[i].addr_len; + if (!len) + continue; + start = hw->lan_regs[i].addr_start + res_start; + + hw->lan_regs[i].vaddr = devm_ioremap(&pdev->dev, start, len); + if (!hw->lan_regs[i].vaddr) { + pci_err(pdev, "failed to allocate BAR0 region\n"); + return -ENOMEM; + } + } + + return 0; +} + +/** + * idpf_add_del_fsteer_filters - Send virtchnl add/del Flow Steering message + * @adapter: adapter info struct + * @rule: Flow steering rule to add/delete + * @opcode: VIRTCHNL2_OP_ADD_FLOW_RULE to add filter, or + * VIRTCHNL2_OP_DEL_FLOW_RULE to delete. All other values are invalid. + * + * Send ADD/DELETE flow steering virtchnl message and receive the result. + * + * Return: 0 on success, negative on failure. + */ +int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter, + struct virtchnl2_flow_rule_add_del *rule, + enum virtchnl2_op opcode) +{ + int rule_count = le32_to_cpu(rule->count); + struct idpf_vc_xn_params xn_params = {}; + ssize_t reply_sz; + + if (opcode != VIRTCHNL2_OP_ADD_FLOW_RULE && + opcode != VIRTCHNL2_OP_DEL_FLOW_RULE) + return -EINVAL; + + xn_params.vc_op = opcode; + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; + xn_params.async = false; + xn_params.send_buf.iov_base = rule; + xn_params.send_buf.iov_len = struct_size(rule, rule_info, rule_count); + xn_params.recv_buf.iov_base = rule; + xn_params.recv_buf.iov_len = struct_size(rule, rule_info, rule_count); + + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + return reply_sz < 0 ? reply_sz : 0; +} + +/** * idpf_vport_alloc_max_qs - Allocate max queues for a vport * @adapter: Driver specific private structure * @max_q: vport max queue structure @@ -2801,7 +2961,7 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter) struct idpf_hw *hw = &adapter->hw; int err; - adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info); + adapter->dev_ops.reg_ops.ctlq_reg_init(adapter, ctlq_info); err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info); if (err) @@ -2961,6 +3121,30 @@ restart: msleep(task_delay); } + if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LAN_MEMORY_REGIONS)) { + err = idpf_send_get_lan_memory_regions(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to get LAN memory regions: %d\n", + err); + return -EINVAL; + } + } else { + /* Fallback to mapping the remaining regions of the entire BAR */ + err = idpf_calc_remaining_mmio_regs(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to allocate BAR0 region(s): %d\n", + err); + return -ENOMEM; + } + } + + err = idpf_map_lan_mmio_regs(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to map BAR0 region(s): %d\n", + err); + return -ENOMEM; + } + pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter)); num_max_vports = idpf_get_max_vports(adapter); adapter->max_vports = num_max_vports; @@ -3070,6 +3254,7 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter) idpf_ptp_release(adapter); idpf_deinit_task(adapter); + idpf_idc_deinit_core_aux_device(adapter->cdev_info); idpf_intr_rel(adapter); if (remove_in_prog) @@ -3493,6 +3678,79 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, } /** + * idpf_vport_is_cap_ena - Check if vport capability is enabled + * @vport: Private data struct + * @flag: flag(s) to check + * + * Return: true if the capability is supported, false otherwise + */ +bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag) +{ + struct virtchnl2_create_vport *vport_msg; + + vport_msg = vport->adapter->vport_params_recvd[vport->idx]; + + return !!(le16_to_cpu(vport_msg->vport_flags) & flag); +} + +/** + * idpf_sideband_flow_type_ena - Check if steering is enabled for flow type + * @vport: Private data struct + * @flow_type: flow type to check (from ethtool.h) + * + * Return: true if sideband filters are allowed for @flow_type, false otherwise + */ +bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type) +{ + struct virtchnl2_create_vport *vport_msg; + __le64 caps; + + vport_msg = vport->adapter->vport_params_recvd[vport->idx]; + caps = vport_msg->sideband_flow_caps; + + switch (flow_type) { + case TCP_V4_FLOW: + return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP)); + case UDP_V4_FLOW: + return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_UDP)); + default: + return false; + } +} + +/** + * idpf_sideband_action_ena - Check if steering is enabled for action + * @vport: Private data struct + * @fsp: flow spec + * + * Return: true if sideband filters are allowed for @fsp, false otherwise + */ +bool idpf_sideband_action_ena(struct idpf_vport *vport, + struct ethtool_rx_flow_spec *fsp) +{ + struct virtchnl2_create_vport *vport_msg; + unsigned int supp_actions; + + vport_msg = vport->adapter->vport_params_recvd[vport->idx]; + supp_actions = le32_to_cpu(vport_msg->sideband_flow_actions); + + /* Actions Drop/Wake are not supported */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC || + fsp->ring_cookie == RX_CLS_FLOW_WAKE) + return false; + + return !!(supp_actions & VIRTCHNL2_ACTION_QUEUE); +} + +unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport) +{ + struct virtchnl2_create_vport *vport_msg; + + vport_msg = vport->adapter->vport_params_recvd[vport->idx]; + return le32_to_cpu(vport_msg->flow_steer_max_rules); +} + +/** * idpf_get_vport_id: Get vport id * @vport: virtual port structure * @@ -3728,3 +3986,42 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter, return reply_sz < 0 ? reply_sz : 0; } + +/** + * idpf_idc_rdma_vc_send_sync - virtchnl send callback for IDC registered drivers + * @cdev_info: IDC core device info pointer + * @send_msg: message to send + * @msg_size: size of message to send + * @recv_msg: message to populate on reception of response + * @recv_len: length of message copied into recv_msg or 0 on error + * + * Return: 0 on success or error code on failure. + */ +int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info, + u8 *send_msg, u16 msg_size, + u8 *recv_msg, u16 *recv_len) +{ + struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev); + struct idpf_vc_xn_params xn_params = { }; + ssize_t reply_sz; + u16 recv_size; + + if (!recv_msg || !recv_len || msg_size > IDPF_CTLQ_MAX_BUF_LEN) + return -EINVAL; + + recv_size = min_t(u16, *recv_len, IDPF_CTLQ_MAX_BUF_LEN); + *recv_len = 0; + xn_params.vc_op = VIRTCHNL2_OP_RDMA; + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; + xn_params.send_buf.iov_base = send_msg; + xn_params.send_buf.iov_len = msg_size; + xn_params.recv_buf.iov_base = recv_msg; + xn_params.recv_buf.iov_len = recv_size; + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + *recv_len = reply_sz; + + return 0; +} +EXPORT_SYMBOL_GPL(idpf_idc_rdma_vc_send_sync); diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h index 77578206bada..86f30f0db07a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h @@ -105,6 +105,12 @@ int idpf_get_reg_intr_vecs(struct idpf_vport *vport, int idpf_queue_reg_init(struct idpf_vport *vport); int idpf_vport_queue_ids_init(struct idpf_vport *vport); +bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag); +bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type); +bool idpf_sideband_action_ena(struct idpf_vport *vport, + struct ethtool_rx_flow_spec *fsp); +unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport); + int idpf_recv_mb_msg(struct idpf_adapter *adapter); int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, u16 msg_size, u8 *msg, u16 cookie); @@ -151,5 +157,8 @@ int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs); int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get); int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get); void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr); +int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info, + u8 *send_msg, u16 msg_size, + u8 *recv_msg, u16 *recv_len); #endif /* _IDPF_VIRTCHNL_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c index bdcc54a5fb56..4f1fb0cefe51 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c @@ -30,6 +30,7 @@ int idpf_ptp_get_caps(struct idpf_adapter *adapter) .send_buf.iov_len = sizeof(send_ptp_caps_msg), .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, }; + struct virtchnl2_ptp_cross_time_reg_offsets cross_tstamp_offsets; struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets; struct virtchnl2_ptp_clk_reg_offsets clock_offsets; struct idpf_ptp_secondary_mbx *scnd_mbx; @@ -71,7 +72,7 @@ int idpf_ptp_get_caps(struct idpf_adapter *adapter) access_type = ptp->get_dev_clk_time_access; if (access_type != IDPF_PTP_DIRECT) - goto discipline_clock; + goto cross_tstamp; clock_offsets = recv_ptp_caps_msg->clk_offsets; @@ -90,6 +91,22 @@ int idpf_ptp_get_caps(struct idpf_adapter *adapter) temp_offset = le32_to_cpu(clock_offsets.cmd_sync_trigger); ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset); +cross_tstamp: + access_type = ptp->get_cross_tstamp_access; + if (access_type != IDPF_PTP_DIRECT) + goto discipline_clock; + + cross_tstamp_offsets = recv_ptp_caps_msg->cross_time_offsets; + + temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_l); + ptp->dev_clk_regs.sys_time_ns_l = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_h); + ptp->dev_clk_regs.sys_time_ns_h = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(cross_tstamp_offsets.cmd_sync_trigger); + ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset); + discipline_clock: access_type = ptp->adj_dev_clk_time_access; if (access_type != IDPF_PTP_DIRECT) @@ -163,6 +180,42 @@ int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter, } /** + * idpf_ptp_get_cross_time - Send virtchnl get cross time message + * @adapter: Driver specific private structure + * @cross_time: Pointer to the device clock structure where the value is set + * + * Send virtchnl get cross time message to get the time of the clock and the + * system time. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_get_cross_time(struct idpf_adapter *adapter, + struct idpf_ptp_dev_timers *cross_time) +{ + struct virtchnl2_ptp_get_cross_time cross_time_msg; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_GET_CROSS_TIME, + .send_buf.iov_base = &cross_time_msg, + .send_buf.iov_len = sizeof(cross_time_msg), + .recv_buf.iov_base = &cross_time_msg, + .recv_buf.iov_len = sizeof(cross_time_msg), + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + int reply_sz; + + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + if (reply_sz != sizeof(cross_time_msg)) + return -EIO; + + cross_time->dev_clk_time_ns = le64_to_cpu(cross_time_msg.dev_time_ns); + cross_time->sys_time_ns = le64_to_cpu(cross_time_msg.sys_time_ns); + + return 0; +} + +/** * idpf_ptp_set_dev_clk_time - Send virtchnl set device time message * @adapter: Driver specific private structure * @time: New time value diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h index 11b8f6f05799..02ae447cc24a 100644 --- a/drivers/net/ethernet/intel/idpf/virtchnl2.h +++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h @@ -62,8 +62,9 @@ enum virtchnl2_op { VIRTCHNL2_OP_GET_PTYPE_INFO = 526, /* Opcode 527 and 528 are reserved for VIRTCHNL2_OP_GET_PTYPE_ID and * VIRTCHNL2_OP_GET_PTYPE_INFO_RAW. - * Opcodes 529, 530, 531, 532 and 533 are reserved. */ + VIRTCHNL2_OP_RDMA = 529, + /* Opcodes 530 through 533 are reserved. */ VIRTCHNL2_OP_LOOPBACK = 534, VIRTCHNL2_OP_ADD_MAC_ADDR = 535, VIRTCHNL2_OP_DEL_MAC_ADDR = 536, @@ -78,6 +79,11 @@ enum virtchnl2_op { VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE = 546, VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME = 547, VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS = 548, + VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS = 549, + /* Opcode 550 is reserved */ + VIRTCHNL2_OP_ADD_FLOW_RULE = 551, + VIRTCHNL2_OP_GET_FLOW_RULE = 552, + VIRTCHNL2_OP_DEL_FLOW_RULE = 553, }; /** @@ -151,22 +157,22 @@ enum virtchnl2_cap_seg { VIRTCHNL2_CAP_SEG_TX_DOUBLE_TUNNEL = BIT(8), }; -/* Receive Side Scaling Flow type capability flags */ -enum virtchnl2_cap_rss { - VIRTCHNL2_CAP_RSS_IPV4_TCP = BIT(0), - VIRTCHNL2_CAP_RSS_IPV4_UDP = BIT(1), - VIRTCHNL2_CAP_RSS_IPV4_SCTP = BIT(2), - VIRTCHNL2_CAP_RSS_IPV4_OTHER = BIT(3), - VIRTCHNL2_CAP_RSS_IPV6_TCP = BIT(4), - VIRTCHNL2_CAP_RSS_IPV6_UDP = BIT(5), - VIRTCHNL2_CAP_RSS_IPV6_SCTP = BIT(6), - VIRTCHNL2_CAP_RSS_IPV6_OTHER = BIT(7), - VIRTCHNL2_CAP_RSS_IPV4_AH = BIT(8), - VIRTCHNL2_CAP_RSS_IPV4_ESP = BIT(9), - VIRTCHNL2_CAP_RSS_IPV4_AH_ESP = BIT(10), - VIRTCHNL2_CAP_RSS_IPV6_AH = BIT(11), - VIRTCHNL2_CAP_RSS_IPV6_ESP = BIT(12), - VIRTCHNL2_CAP_RSS_IPV6_AH_ESP = BIT(13), +/* Receive Side Scaling and Flow Steering Flow type capability flags */ +enum virtchnl2_flow_types { + VIRTCHNL2_FLOW_IPV4_TCP = BIT(0), + VIRTCHNL2_FLOW_IPV4_UDP = BIT(1), + VIRTCHNL2_FLOW_IPV4_SCTP = BIT(2), + VIRTCHNL2_FLOW_IPV4_OTHER = BIT(3), + VIRTCHNL2_FLOW_IPV6_TCP = BIT(4), + VIRTCHNL2_FLOW_IPV6_UDP = BIT(5), + VIRTCHNL2_FLOW_IPV6_SCTP = BIT(6), + VIRTCHNL2_FLOW_IPV6_OTHER = BIT(7), + VIRTCHNL2_FLOW_IPV4_AH = BIT(8), + VIRTCHNL2_FLOW_IPV4_ESP = BIT(9), + VIRTCHNL2_FLOW_IPV4_AH_ESP = BIT(10), + VIRTCHNL2_FLOW_IPV6_AH = BIT(11), + VIRTCHNL2_FLOW_IPV6_ESP = BIT(12), + VIRTCHNL2_FLOW_IPV6_AH_ESP = BIT(13), }; /* Header split capability flags */ @@ -192,8 +198,9 @@ enum virtchnl2_cap_other { VIRTCHNL2_CAP_RDMA = BIT_ULL(0), VIRTCHNL2_CAP_SRIOV = BIT_ULL(1), VIRTCHNL2_CAP_MACFILTER = BIT_ULL(2), - VIRTCHNL2_CAP_FLOW_DIRECTOR = BIT_ULL(3), - /* Queue based scheduling using split queue model */ + /* Other capability 3 is available + * Queue based scheduling using split queue model + */ VIRTCHNL2_CAP_SPLITQ_QSCHED = BIT_ULL(4), VIRTCHNL2_CAP_CRC = BIT_ULL(5), VIRTCHNL2_CAP_ADQ = BIT_ULL(6), @@ -207,16 +214,37 @@ enum virtchnl2_cap_other { /* EDT: Earliest Departure Time capability used for Timing Wheel */ VIRTCHNL2_CAP_EDT = BIT_ULL(14), VIRTCHNL2_CAP_ADV_RSS = BIT_ULL(15), - VIRTCHNL2_CAP_FDIR = BIT_ULL(16), + /* Other capability 16 is available */ VIRTCHNL2_CAP_RX_FLEX_DESC = BIT_ULL(17), VIRTCHNL2_CAP_PTYPE = BIT_ULL(18), VIRTCHNL2_CAP_LOOPBACK = BIT_ULL(19), /* Other capability 20 is reserved */ + VIRTCHNL2_CAP_FLOW_STEER = BIT_ULL(21), + VIRTCHNL2_CAP_LAN_MEMORY_REGIONS = BIT_ULL(22), /* this must be the last capability */ VIRTCHNL2_CAP_OEM = BIT_ULL(63), }; +/** + * enum virtchnl2_action_types - Available actions for sideband flow steering + * @VIRTCHNL2_ACTION_DROP: Drop the packet + * @VIRTCHNL2_ACTION_PASSTHRU: Forward the packet to the next classifier/stage + * @VIRTCHNL2_ACTION_QUEUE: Forward the packet to a receive queue + * @VIRTCHNL2_ACTION_Q_GROUP: Forward the packet to a receive queue group + * @VIRTCHNL2_ACTION_MARK: Mark the packet with specific marker value + * @VIRTCHNL2_ACTION_COUNT: Increment the corresponding counter + */ + +enum virtchnl2_action_types { + VIRTCHNL2_ACTION_DROP = BIT(0), + VIRTCHNL2_ACTION_PASSTHRU = BIT(1), + VIRTCHNL2_ACTION_QUEUE = BIT(2), + VIRTCHNL2_ACTION_Q_GROUP = BIT(3), + VIRTCHNL2_ACTION_MARK = BIT(4), + VIRTCHNL2_ACTION_COUNT = BIT(5), +}; + /* underlying device type */ enum virtchl2_device_type { VIRTCHNL2_MEV_DEVICE = 0, @@ -458,7 +486,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info); * @seg_caps: See enum virtchnl2_cap_seg. * @hsplit_caps: See enum virtchnl2_cap_rx_hsplit_at. * @rsc_caps: See enum virtchnl2_cap_rsc. - * @rss_caps: See enum virtchnl2_cap_rss. + * @rss_caps: See enum virtchnl2_flow_types. * @other_caps: See enum virtchnl2_cap_other. * @mailbox_dyn_ctl: DYN_CTL register offset and vector id for mailbox * provided by CP. @@ -483,6 +511,8 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info); * segment offload. * @max_hdr_buf_per_lso: Max number of header buffers that can be used for * an LSO. + * @num_rdma_allocated_vectors: Maximum number of allocated RDMA vectors for + * the device. * @pad1: Padding for future extensions. * * Dataplane driver sends this message to CP to negotiate capabilities and @@ -530,7 +560,8 @@ struct virtchnl2_get_capabilities { __le32 device_type; u8 min_sso_packet_len; u8 max_hdr_buf_per_lso; - u8 pad1[10]; + __le16 num_rdma_allocated_vectors; + u8 pad1[8]; }; VIRTCHNL2_CHECK_STRUCT_LEN(80, virtchnl2_get_capabilities); @@ -572,9 +603,18 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks); /** * enum virtchnl2_vport_flags - Vport flags that indicate vport capabilities. * @VIRTCHNL2_VPORT_UPLINK_PORT: Representatives of underlying physical ports + * @VIRTCHNL2_VPORT_INLINE_FLOW_STEER: Inline flow steering enabled + * @VIRTCHNL2_VPORT_INLINE_FLOW_STEER_RXQ: Inline flow steering enabled + * with explicit Rx queue action + * @VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER: Sideband flow steering enabled + * @VIRTCHNL2_VPORT_ENABLE_RDMA: RDMA is enabled for this vport */ enum virtchnl2_vport_flags { - VIRTCHNL2_VPORT_UPLINK_PORT = BIT(0), + VIRTCHNL2_VPORT_UPLINK_PORT = BIT(0), + VIRTCHNL2_VPORT_INLINE_FLOW_STEER = BIT(1), + VIRTCHNL2_VPORT_INLINE_FLOW_STEER_RXQ = BIT(2), + VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER = BIT(3), + VIRTCHNL2_VPORT_ENABLE_RDMA = BIT(4), }; /** @@ -599,6 +639,14 @@ enum virtchnl2_vport_flags { * @rx_desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions. * @tx_desc_ids: See VIRTCHNL2_TX_DESC_IDS definitions. * @pad1: Padding. + * @inline_flow_caps: Bit mask of supported inline-flow-steering + * flow types (See enum virtchnl2_flow_types) + * @sideband_flow_caps: Bit mask of supported sideband-flow-steering + * flow types (See enum virtchnl2_flow_types) + * @sideband_flow_actions: Bit mask of supported action types + * for sideband flow steering (See enum virtchnl2_action_types) + * @flow_steer_max_rules: Max rules allowed for inline and sideband + * flow steering combined * @rss_algorithm: RSS algorithm. * @rss_key_size: RSS key size. * @rss_lut_size: RSS LUT size. @@ -631,7 +679,11 @@ struct virtchnl2_create_vport { __le16 vport_flags; __le64 rx_desc_ids; __le64 tx_desc_ids; - u8 pad1[72]; + u8 pad1[48]; + __le64 inline_flow_caps; + __le64 sideband_flow_caps; + __le32 sideband_flow_actions; + __le32 flow_steer_max_rules; __le32 rss_algorithm; __le16 rss_key_size; __le16 rss_lut_size; @@ -1580,4 +1632,182 @@ struct virtchnl2_ptp_adj_dev_clk_time { }; VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_time); +/** + * struct virtchnl2_mem_region - MMIO memory region + * @start_offset: starting offset of the MMIO memory region + * @size: size of the MMIO memory region + */ +struct virtchnl2_mem_region { + __le64 start_offset; + __le64 size; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_mem_region); + +/** + * struct virtchnl2_get_lan_memory_regions - List of LAN MMIO memory regions + * @num_memory_regions: number of memory regions + * @pad: Padding + * @mem_reg: List with memory region info + * + * PF/VF sends this message to learn what LAN MMIO memory regions it should map. + */ +struct virtchnl2_get_lan_memory_regions { + __le16 num_memory_regions; + u8 pad[6]; + struct virtchnl2_mem_region mem_reg[]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_get_lan_memory_regions); + +#define VIRTCHNL2_MAX_NUM_PROTO_HDRS 4 +#define VIRTCHNL2_MAX_SIZE_RAW_PACKET 256 +#define VIRTCHNL2_MAX_NUM_ACTIONS 8 + +/** + * struct virtchnl2_proto_hdr - represent one protocol header + * @hdr_type: See enum virtchnl2_proto_hdr_type + * @pad: padding + * @buffer_spec: binary buffer based on header type. + * @buffer_mask: mask applied on buffer_spec. + * + * Structure to hold protocol headers based on hdr_type + */ +struct virtchnl2_proto_hdr { + __le32 hdr_type; + u8 pad[4]; + u8 buffer_spec[64]; + u8 buffer_mask[64]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(136, virtchnl2_proto_hdr); + +/** + * struct virtchnl2_proto_hdrs - struct to represent match criteria + * @tunnel_level: specify where protocol header(s) start from. + * must be 0 when sending a raw packet request. + * 0 - from the outer layer + * 1 - from the first inner layer + * 2 - from the second inner layer + * @pad: Padding bytes + * @count: total number of protocol headers in proto_hdr. 0 for raw packet. + * @proto_hdr: Array of protocol headers + * @raw: struct holding raw packet buffer when count is 0 + */ +struct virtchnl2_proto_hdrs { + u8 tunnel_level; + u8 pad[3]; + __le32 count; + union { + struct virtchnl2_proto_hdr + proto_hdr[VIRTCHNL2_MAX_NUM_PROTO_HDRS]; + struct { + __le16 pkt_len; + u8 spec[VIRTCHNL2_MAX_SIZE_RAW_PACKET]; + u8 mask[VIRTCHNL2_MAX_SIZE_RAW_PACKET]; + } raw; + }; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(552, virtchnl2_proto_hdrs); + +/** + * struct virtchnl2_rule_action - struct representing single action for a flow + * @action_type: see enum virtchnl2_action_types + * @act_conf: union representing action depending on action_type. + * @act_conf.q_id: queue id to redirect the packets to. + * @act_conf.q_grp_id: queue group id to redirect the packets to. + * @act_conf.ctr_id: used for count action. If input value 0xFFFFFFFF control + * plane assigns a new counter and returns the counter ID to + * the driver. If input value is not 0xFFFFFFFF then it must + * be an existing counter given to the driver for an earlier + * flow. Then this flow will share the counter. + * @act_conf.mark_id: Value used to mark the packets. Used for mark action. + * @act_conf.reserved: Reserved for future use. + */ +struct virtchnl2_rule_action { + __le32 action_type; + union { + __le32 q_id; + __le32 q_grp_id; + __le32 ctr_id; + __le32 mark_id; + u8 reserved[8]; + } act_conf; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(12, virtchnl2_rule_action); + +/** + * struct virtchnl2_rule_action_set - struct representing multiple actions + * @count: number of valid actions in the action set of a rule + * @actions: array of struct virtchnl2_rule_action + */ +struct virtchnl2_rule_action_set { + /* action count must be less than VIRTCHNL2_MAX_NUM_ACTIONS */ + __le32 count; + struct virtchnl2_rule_action actions[VIRTCHNL2_MAX_NUM_ACTIONS]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(100, virtchnl2_rule_action_set); + +/** + * struct virtchnl2_flow_rule - represent one flow steering rule + * @proto_hdrs: array of protocol header buffers representing match criteria + * @action_set: series of actions to be applied for given rule + * @priority: rule priority. + * @pad: padding for future extensions. + */ +struct virtchnl2_flow_rule { + struct virtchnl2_proto_hdrs proto_hdrs; + struct virtchnl2_rule_action_set action_set; + __le32 priority; + u8 pad[8]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(664, virtchnl2_flow_rule); + +enum virtchnl2_flow_rule_status { + VIRTCHNL2_FLOW_RULE_SUCCESS = 1, + VIRTCHNL2_FLOW_RULE_NORESOURCE = 2, + VIRTCHNL2_FLOW_RULE_EXIST = 3, + VIRTCHNL2_FLOW_RULE_TIMEOUT = 4, + VIRTCHNL2_FLOW_RULE_FLOW_TYPE_NOT_SUPPORTED = 5, + VIRTCHNL2_FLOW_RULE_MATCH_KEY_NOT_SUPPORTED = 6, + VIRTCHNL2_FLOW_RULE_ACTION_NOT_SUPPORTED = 7, + VIRTCHNL2_FLOW_RULE_ACTION_COMBINATION_INVALID = 8, + VIRTCHNL2_FLOW_RULE_ACTION_DATA_INVALID = 9, + VIRTCHNL2_FLOW_RULE_NOT_ADDED = 10, +}; + +/** + * struct virtchnl2_flow_rule_info: structure representing single flow rule + * @rule_id: rule_id associated with the flow_rule. + * @rule_cfg: structure representing rule. + * @status: status of rule programming. See enum virtchnl2_flow_rule_status. + */ +struct virtchnl2_flow_rule_info { + __le32 rule_id; + struct virtchnl2_flow_rule rule_cfg; + __le32 status; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(672, virtchnl2_flow_rule_info); + +/** + * struct virtchnl2_flow_rule_add_del - add/delete a flow steering rule + * @vport_id: vport id for which the rule is to be added or deleted. + * @count: Indicates number of rules to be added or deleted. + * @rule_info: Array of flow rules to be added or deleted. + * + * For VIRTCHNL2_OP_FLOW_RULE_ADD, rule_info contains list of rules to be + * added. If rule_id is 0xFFFFFFFF, then the rule is programmed and not cached. + * + * For VIRTCHNL2_OP_FLOW_RULE_DEL, there are two possibilities. The structure + * can contain either array of rule_ids or array of match keys to be deleted. + * When match keys are used the corresponding rule_ids must be 0xFFFFFFFF. + * + * status member of each rule indicates the result. Maximum of 6 rules can be + * added or deleted using this method. Driver has to retry in case of any + * failure of ADD or DEL opcode. CP doesn't retry in case of failure. + */ +struct virtchnl2_flow_rule_add_del { + __le32 vport_id; + __le32 count; + struct virtchnl2_flow_rule_info rule_info[] __counted_by_le(count); +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_flow_rule_add_del); + #endif /* _VIRTCHNL_2_H_ */ |