diff options
Diffstat (limited to 'drivers/net/ethernet/intel/idpf')
24 files changed, 4833 insertions, 415 deletions
diff --git a/drivers/net/ethernet/intel/idpf/Kconfig b/drivers/net/ethernet/intel/idpf/Kconfig index 1addd663acad..2c359a8551c7 100644 --- a/drivers/net/ethernet/intel/idpf/Kconfig +++ b/drivers/net/ethernet/intel/idpf/Kconfig @@ -4,6 +4,7 @@ config IDPF tristate "Intel(R) Infrastructure Data Path Function Support" depends on PCI_MSI + depends on PTP_1588_CLOCK_OPTIONAL select DIMLIB select LIBETH help diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile index 2ce01a0b5898..4ef4b2b5e37a 100644 --- a/drivers/net/ethernet/intel/idpf/Makefile +++ b/drivers/net/ethernet/intel/idpf/Makefile @@ -10,6 +10,7 @@ idpf-y := \ idpf_controlq_setup.o \ idpf_dev.o \ idpf_ethtool.o \ + idpf_idc.o \ idpf_lib.o \ idpf_main.o \ idpf_txrx.o \ @@ -17,3 +18,6 @@ idpf-y := \ idpf_vf_dev.o idpf-$(CONFIG_IDPF_SINGLEQ) += idpf_singleq_txrx.o + +idpf-$(CONFIG_PTP_1588_CLOCK) += idpf_ptp.o +idpf-$(CONFIG_PTP_1588_CLOCK) += idpf_virtchnl_ptp.o diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index 66544faab710..f4c0eaf9bde3 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -12,12 +12,16 @@ struct idpf_vport_max_q; #include <net/pkt_sched.h> #include <linux/aer.h> #include <linux/etherdevice.h> +#include <linux/ioport.h> #include <linux/pci.h> #include <linux/bitfield.h> #include <linux/sctp.h> #include <linux/ethtool_netlink.h> #include <net/gro.h> +#include <linux/net/intel/iidc_rdma.h> +#include <linux/net/intel/iidc_rdma_idpf.h> + #include "virtchnl2.h" #include "idpf_txrx.h" #include "idpf_controlq.h" @@ -143,6 +147,7 @@ enum idpf_vport_state { * @vport_id: Vport identifier * @link_speed_mbps: Link speed in mbps * @vport_idx: Relative vport index + * @max_tx_hdr_size: Max header length hardware can support * @state: See enum idpf_vport_state * @netstats: Packet and byte stats * @stats_lock: Lock to protect stats update @@ -153,6 +158,7 @@ struct idpf_netdev_priv { u32 vport_id; u32 link_speed_mbps; u16 vport_idx; + u16 max_tx_hdr_size; enum idpf_vport_state state; struct rtnl_link_stats64 netstats; spinlock_t stats_lock; @@ -189,22 +195,38 @@ struct idpf_vport_max_q { * @mb_intr_reg_init: Mailbox interrupt register initialization * @reset_reg_init: Reset register initialization * @trigger_reset: Trigger a reset to occur + * @ptp_reg_init: PTP register initialization */ struct idpf_reg_ops { - void (*ctlq_reg_init)(struct idpf_ctlq_create_info *cq); + void (*ctlq_reg_init)(struct idpf_adapter *adapter, + struct idpf_ctlq_create_info *cq); int (*intr_reg_init)(struct idpf_vport *vport); void (*mb_intr_reg_init)(struct idpf_adapter *adapter); void (*reset_reg_init)(struct idpf_adapter *adapter); void (*trigger_reset)(struct idpf_adapter *adapter, enum idpf_flags trig_cause); + void (*ptp_reg_init)(const struct idpf_adapter *adapter); }; +#define IDPF_MMIO_REG_NUM_STATIC 2 +#define IDPF_PF_MBX_REGION_SZ 4096 +#define IDPF_PF_RSTAT_REGION_SZ 2048 +#define IDPF_VF_MBX_REGION_SZ 10240 +#define IDPF_VF_RSTAT_REGION_SZ 2048 + /** * struct idpf_dev_ops - Device specific operations * @reg_ops: Register operations + * @idc_init: IDC initialization + * @static_reg_info: array of mailbox and rstat register info */ struct idpf_dev_ops { struct idpf_reg_ops reg_ops; + + int (*idc_init)(struct idpf_adapter *adapter); + + /* static_reg_info[0] is mailbox region, static_reg_info[1] is rstat */ + struct resource static_reg_info[IDPF_MMIO_REG_NUM_STATIC]; }; /** @@ -247,6 +269,12 @@ struct idpf_port_stats { struct virtchnl2_vport_stats vport_stats; }; +struct idpf_fsteer_fltr { + struct list_head list; + u32 loc; + u32 q_index; +}; + /** * struct idpf_vport - Handle for netdevices and queue resources * @num_txq: Number of allocated TX queues @@ -271,6 +299,7 @@ struct idpf_port_stats { * group will yield total number of RX queues. * @rxq_model: Splitq queue or single queue queuing model * @rx_ptype_lkup: Lookup table for ptypes on RX + * @vdev_info: IDC vport device info pointer * @adapter: back pointer to associated adapter * @netdev: Associated net_device. Each vport should have one and only one * associated netdev. @@ -290,6 +319,9 @@ struct idpf_port_stats { * @port_stats: per port csum, header split, and other offload stats * @link_up: True if link is up * @sw_marker_wq: workqueue for marker packets + * @tx_tstamp_caps: Capabilities negotiated for Tx timestamping + * @tstamp_config: The Tx tstamp config + * @tstamp_task: Tx timestamping task */ struct idpf_vport { u16 num_txq; @@ -313,6 +345,8 @@ struct idpf_vport { u32 rxq_model; struct libeth_rx_pt *rx_ptype_lkup; + struct iidc_rdma_vport_dev_info *vdev_info; + struct idpf_adapter *adapter; struct net_device *netdev; DECLARE_BITMAP(flags, IDPF_VPORT_FLAGS_NBITS); @@ -334,6 +368,10 @@ struct idpf_vport { bool link_up; wait_queue_head_t sw_marker_wq; + + struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps; + struct kernel_hwtstamp_config tstamp_config; + struct work_struct tstamp_task; }; /** @@ -368,9 +406,27 @@ struct idpf_rss_data { }; /** + * struct idpf_q_coalesce - User defined coalescing configuration values for + * a single queue. + * @tx_intr_mode: Dynamic TX ITR or not + * @rx_intr_mode: Dynamic RX ITR or not + * @tx_coalesce_usecs: TX interrupt throttling rate + * @rx_coalesce_usecs: RX interrupt throttling rate + * + * Used to restore user coalescing configuration after a reset. + */ +struct idpf_q_coalesce { + u32 tx_intr_mode; + u32 rx_intr_mode; + u32 tx_coalesce_usecs; + u32 rx_coalesce_usecs; +}; + +/** * struct idpf_vport_user_config_data - User defined configuration values for * each vport. * @rss_data: See struct idpf_rss_data + * @q_coalesce: Array of per queue coalescing data * @num_req_tx_qs: Number of user requested TX queues through ethtool * @num_req_rx_qs: Number of user requested RX queues through ethtool * @num_req_txq_desc: Number of user requested TX queue descriptors through @@ -379,17 +435,22 @@ struct idpf_rss_data { * ethtool * @user_flags: User toggled config flags * @mac_filter_list: List of MAC filters + * @num_fsteer_fltrs: number of flow steering filters + * @flow_steer_list: list of flow steering filters * * Used to restore configuration after a reset as the vport will get wiped. */ struct idpf_vport_user_config_data { struct idpf_rss_data rss_data; + struct idpf_q_coalesce *q_coalesce; u16 num_req_tx_qs; u16 num_req_rx_qs; u32 num_req_txq_desc; u32 num_req_rxq_desc; DECLARE_BITMAP(user_flags, __IDPF_USER_FLAGS_NBITS); struct list_head mac_filter_list; + u32 num_fsteer_fltrs; + struct list_head flow_steer_list; }; /** @@ -478,6 +539,13 @@ struct idpf_vport_config { struct idpf_vc_xn_manager; +#define idpf_for_each_vport(adapter, iter) \ + for (struct idpf_vport **__##iter = &(adapter)->vports[0], \ + *iter = (adapter)->max_vports ? *__##iter : NULL; \ + iter; \ + iter = (++__##iter) < &(adapter)->vports[(adapter)->max_vports] ? \ + *__##iter : NULL) + /** * struct idpf_adapter - Device data struct generated on probe * @pdev: PCI device struct given on probe @@ -489,10 +557,11 @@ struct idpf_vc_xn_manager; * @flags: See enum idpf_flags * @reset_reg: See struct idpf_reset_reg * @hw: Device access data - * @num_req_msix: Requested number of MSIX vectors * @num_avail_msix: Available number of MSIX vectors * @num_msix_entries: Number of entries in MSIX table * @msix_entries: MSIX table + * @num_rdma_msix_entries: Available number of MSIX vectors for RDMA + * @rdma_msix_entries: RDMA MSIX table * @req_vec_chunks: Requested vector chunk data * @mb_vector: Mailbox vector data * @vector_stack: Stack to store the msix vector indexes @@ -521,6 +590,7 @@ struct idpf_vc_xn_manager; * @caps: Negotiated capabilities with device * @vcxn_mngr: Virtchnl transaction manager * @dev_ops: See idpf_dev_ops + * @cdev_info: IDC core device info pointer * @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk * to VFs but is used to initialize them * @crc_enable: Enable CRC insertion offload @@ -530,6 +600,7 @@ struct idpf_vc_xn_manager; * @vector_lock: Lock to protect vector distribution * @queue_lock: Lock to protect queue distribution * @vc_buf_lock: Lock to protect virtchnl buffer + * @ptp: Storage for PTP-related data */ struct idpf_adapter { struct pci_dev *pdev; @@ -542,10 +613,11 @@ struct idpf_adapter { DECLARE_BITMAP(flags, IDPF_FLAGS_NBITS); struct idpf_reset_reg reset_reg; struct idpf_hw hw; - u16 num_req_msix; u16 num_avail_msix; u16 num_msix_entries; struct msix_entry *msix_entries; + u16 num_rdma_msix_entries; + struct msix_entry *rdma_msix_entries; struct virtchnl2_alloc_vectors *req_vec_chunks; struct idpf_q_vector mb_vector; struct idpf_vector_lifo vector_stack; @@ -578,6 +650,7 @@ struct idpf_adapter { struct idpf_vc_xn_manager *vcxn_mngr; struct idpf_dev_ops dev_ops; + struct iidc_rdma_core_dev_info *cdev_info; int num_vfs; bool crc_enable; bool req_tx_splitq; @@ -587,6 +660,8 @@ struct idpf_adapter { struct mutex vector_lock; struct mutex queue_lock; struct mutex vc_buf_lock; + + struct idpf_ptp *ptp; }; /** @@ -609,17 +684,28 @@ static inline int idpf_is_queue_model_split(u16 q_model) bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, enum idpf_cap_field field, u64 flag); +/** + * idpf_is_rdma_cap_ena - Determine if RDMA is supported + * @adapter: private data struct + * + * Return: true if RDMA capability is enabled, false otherwise + */ +static inline bool idpf_is_rdma_cap_ena(struct idpf_adapter *adapter) +{ + return idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_RDMA); +} + #define IDPF_CAP_RSS (\ - VIRTCHNL2_CAP_RSS_IPV4_TCP |\ - VIRTCHNL2_CAP_RSS_IPV4_TCP |\ - VIRTCHNL2_CAP_RSS_IPV4_UDP |\ - VIRTCHNL2_CAP_RSS_IPV4_SCTP |\ - VIRTCHNL2_CAP_RSS_IPV4_OTHER |\ - VIRTCHNL2_CAP_RSS_IPV6_TCP |\ - VIRTCHNL2_CAP_RSS_IPV6_TCP |\ - VIRTCHNL2_CAP_RSS_IPV6_UDP |\ - VIRTCHNL2_CAP_RSS_IPV6_SCTP |\ - VIRTCHNL2_CAP_RSS_IPV6_OTHER) + VIRTCHNL2_FLOW_IPV4_TCP |\ + VIRTCHNL2_FLOW_IPV4_TCP |\ + VIRTCHNL2_FLOW_IPV4_UDP |\ + VIRTCHNL2_FLOW_IPV4_SCTP |\ + VIRTCHNL2_FLOW_IPV4_OTHER |\ + VIRTCHNL2_FLOW_IPV6_TCP |\ + VIRTCHNL2_FLOW_IPV6_TCP |\ + VIRTCHNL2_FLOW_IPV6_UDP |\ + VIRTCHNL2_FLOW_IPV6_SCTP |\ + VIRTCHNL2_FLOW_IPV6_OTHER) #define IDPF_CAP_RSC (\ VIRTCHNL2_CAP_RSC_IPV4_TCP |\ @@ -629,13 +715,13 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |\ VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6) -#define IDPF_CAP_RX_CSUM_L4V4 (\ - VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\ - VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP) +#define IDPF_CAP_TX_CSUM_L4V4 (\ + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP |\ + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP) -#define IDPF_CAP_RX_CSUM_L4V6 (\ - VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\ - VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP) +#define IDPF_CAP_TX_CSUM_L4V6 (\ + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP |\ + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP) #define IDPF_CAP_RX_CSUM (\ VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 |\ @@ -644,11 +730,9 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP) -#define IDPF_CAP_SCTP_CSUM (\ +#define IDPF_CAP_TX_SCTP_CSUM (\ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |\ - VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP |\ - VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP |\ - VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP) + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP) #define IDPF_CAP_TUNNEL_TX_CSUM (\ VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |\ @@ -664,6 +748,17 @@ static inline u16 idpf_get_reserved_vecs(struct idpf_adapter *adapter) } /** + * idpf_get_reserved_rdma_vecs - Get reserved RDMA vectors + * @adapter: private data struct + * + * Return: number of vectors reserved for RDMA + */ +static inline u16 idpf_get_reserved_rdma_vecs(struct idpf_adapter *adapter) +{ + return le16_to_cpu(adapter->caps.num_rdma_allocated_vectors); +} + +/** * idpf_get_default_vports - Get default number of vports * @adapter: private data struct */ @@ -702,6 +797,34 @@ static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter) } /** + * idpf_get_mbx_reg_addr - Get BAR0 mailbox register address + * @adapter: private data struct + * @reg_offset: register offset value + * + * Return: BAR0 mailbox register address based on register offset. + */ +static inline void __iomem *idpf_get_mbx_reg_addr(struct idpf_adapter *adapter, + resource_size_t reg_offset) +{ + return adapter->hw.mbx.vaddr + reg_offset; +} + +/** + * idpf_get_rstat_reg_addr - Get BAR0 rstat register address + * @adapter: private data struct + * @reg_offset: register offset value + * + * Return: BAR0 rstat register address based on register offset. + */ +static inline void __iomem *idpf_get_rstat_reg_addr(struct idpf_adapter *adapter, + resource_size_t reg_offset) +{ + reg_offset -= adapter->dev_ops.static_reg_info[1].start; + + return adapter->hw.rstat.vaddr + reg_offset; +} + +/** * idpf_get_reg_addr - Get BAR0 register address * @adapter: private data struct * @reg_offset: register offset value @@ -711,7 +834,30 @@ static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter) static inline void __iomem *idpf_get_reg_addr(struct idpf_adapter *adapter, resource_size_t reg_offset) { - return (void __iomem *)(adapter->hw.hw_addr + reg_offset); + struct idpf_hw *hw = &adapter->hw; + + for (int i = 0; i < hw->num_lan_regs; i++) { + struct idpf_mmio_reg *region = &hw->lan_regs[i]; + + if (reg_offset >= region->addr_start && + reg_offset < (region->addr_start + region->addr_len)) { + /* Convert the offset so that it is relative to the + * start of the region. Then add the base address of + * the region to get the final address. + */ + reg_offset -= region->addr_start; + + return region->vaddr + reg_offset; + } + } + + /* It's impossible to hit this case with offsets from the CP. But if we + * do for any other reason, the kernel will panic on that register + * access. Might as well do it here to make it clear what's happening. + */ + BUG(); + + return NULL; } /** @@ -725,7 +871,7 @@ static inline bool idpf_is_reset_detected(struct idpf_adapter *adapter) if (!adapter->hw.arq) return true; - return !(readl(idpf_get_reg_addr(adapter, adapter->hw.arq->reg.len)) & + return !(readl(idpf_get_mbx_reg_addr(adapter, adapter->hw.arq->reg.len)) & adapter->hw.arq->reg.len_mask); } @@ -834,5 +980,16 @@ int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs); u8 idpf_vport_get_hsplit(const struct idpf_vport *vport); bool idpf_vport_set_hsplit(const struct idpf_vport *vport, u8 val); - +int idpf_idc_init(struct idpf_adapter *adapter); +int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter, + enum iidc_function_type ftype); +void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info); +void idpf_idc_deinit_vport_aux_device(struct iidc_rdma_vport_dev_info *vdev_info); +void idpf_idc_issue_reset_event(struct iidc_rdma_core_dev_info *cdev_info); +void idpf_idc_vdev_mtu_event(struct iidc_rdma_vport_dev_info *vdev_info, + enum iidc_rdma_event_type event_type); + +int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter, + struct virtchnl2_flow_rule_add_del *rule, + enum virtchnl2_op opcode); #endif /* !_IDPF_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c index b28991dd1870..67894eda2d29 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c @@ -36,19 +36,19 @@ static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, { /* Update tail to post pre-allocated buffers for rx queues */ if (is_rxq) - wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1)); + idpf_mbx_wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1)); /* For non-Mailbox control queues only TAIL need to be set */ if (cq->q_id != -1) return; /* Clear Head for both send or receive */ - wr32(hw, cq->reg.head, 0); + idpf_mbx_wr32(hw, cq->reg.head, 0); /* set starting point */ - wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa)); - wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa)); - wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask)); + idpf_mbx_wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa)); + idpf_mbx_wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa)); + idpf_mbx_wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask)); } /** @@ -96,7 +96,7 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq) */ static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq) { - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); /* free ring buffers and the ring itself */ idpf_ctlq_dealloc_ring_res(hw, cq); @@ -104,8 +104,7 @@ static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq) /* Set ring_size to 0 to indicate uninitialized queue */ cq->ring_size = 0; - mutex_unlock(&cq->cq_lock); - mutex_destroy(&cq->cq_lock); + spin_unlock(&cq->cq_lock); } /** @@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw, idpf_ctlq_init_regs(hw, cq, is_rxq); - mutex_init(&cq->cq_lock); + spin_lock_init(&cq->cq_lock); list_add(&cq->cq_list, &hw->cq_list_head); @@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, int err = 0; int i; - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); /* Ensure there are enough descriptors to send all messages */ num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq); @@ -329,10 +328,10 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, */ dma_wmb(); - wr32(hw, cq->reg.tail, cq->next_to_use); + idpf_mbx_wr32(hw, cq->reg.tail, cq->next_to_use); err_unlock: - mutex_unlock(&cq->cq_lock); + spin_unlock(&cq->cq_lock); return err; } @@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, if (*clean_count > cq->ring_size) return -EBADR; - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); ntc = cq->next_to_clean; @@ -397,7 +396,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, cq->next_to_clean = ntc; - mutex_unlock(&cq->cq_lock); + spin_unlock(&cq->cq_lock); /* Return number of descriptors actually cleaned */ *clean_count = i; @@ -435,7 +434,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, if (*buff_count > 0) buffs_avail = true; - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); if (tbp >= cq->ring_size) tbp = 0; @@ -521,10 +520,10 @@ post_buffs_out: dma_wmb(); - wr32(hw, cq->reg.tail, cq->next_to_post); + idpf_mbx_wr32(hw, cq->reg.tail, cq->next_to_post); } - mutex_unlock(&cq->cq_lock); + spin_unlock(&cq->cq_lock); /* return the number of buffers that were not posted */ *buff_count = *buff_count - i; @@ -552,7 +551,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, u16 i; /* take the lock before we start messing with the ring */ - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); ntc = cq->next_to_clean; @@ -614,7 +613,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, cq->next_to_clean = ntc; - mutex_unlock(&cq->cq_lock); + spin_unlock(&cq->cq_lock); *num_q_msg = i; if (*num_q_msg == 0) diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.h b/drivers/net/ethernet/intel/idpf/idpf_controlq.h index c1aba09e9856..de4ece40c2ff 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_controlq.h +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.h @@ -94,12 +94,26 @@ struct idpf_mbxq_desc { u32 pf_vf_id; /* used by CP when sending to PF */ }; +/* Max number of MMIO regions not including the mailbox and rstat regions in + * the fallback case when the whole bar is mapped. + */ +#define IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING 3 + +struct idpf_mmio_reg { + void __iomem *vaddr; + resource_size_t addr_start; + resource_size_t addr_len; +}; + /* Define the driver hardware struct to replace other control structs as needed * Align to ctlq_hw_info */ struct idpf_hw { - void __iomem *hw_addr; - resource_size_t hw_addr_len; + struct idpf_mmio_reg mbx; + struct idpf_mmio_reg rstat; + /* Array of remaining LAN BAR regions */ + int num_lan_regs; + struct idpf_mmio_reg *lan_regs; struct idpf_adapter *back; diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h index e8e046ef2f0d..3414c5f9a831 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h @@ -99,7 +99,7 @@ struct idpf_ctlq_info { enum idpf_ctlq_type cq_type; int q_id; - struct mutex cq_lock; /* control queue lock */ + spinlock_t cq_lock; /* control queue lock */ /* used for interrupt processing */ u16 next_to_use; u16 next_to_clean; @@ -123,9 +123,12 @@ struct idpf_ctlq_info { /** * enum idpf_mbx_opc - PF/VF mailbox commands * @idpf_mbq_opc_send_msg_to_cp: used by PF or VF to send a message to its CP + * @idpf_mbq_opc_send_msg_to_peer_drv: used by PF or VF to send a message to + * any peer driver */ enum idpf_mbx_opc { idpf_mbq_opc_send_msg_to_cp = 0x0801, + idpf_mbq_opc_send_msg_to_peer_drv = 0x0804, }; /* API supported for control queue management */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c index 41e4bd49402a..bfa60f7d43de 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_dev.c +++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c @@ -4,15 +4,19 @@ #include "idpf.h" #include "idpf_lan_pf_regs.h" #include "idpf_virtchnl.h" +#include "idpf_ptp.h" #define IDPF_PF_ITR_IDX_SPACING 0x4 /** * idpf_ctlq_reg_init - initialize default mailbox registers + * @adapter: adapter structure * @cq: pointer to the array of create control queues */ -static void idpf_ctlq_reg_init(struct idpf_ctlq_create_info *cq) +static void idpf_ctlq_reg_init(struct idpf_adapter *adapter, + struct idpf_ctlq_create_info *cq) { + resource_size_t mbx_start = adapter->dev_ops.static_reg_info[0].start; int i; for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) { @@ -21,22 +25,22 @@ static void idpf_ctlq_reg_init(struct idpf_ctlq_create_info *cq) switch (ccq->type) { case IDPF_CTLQ_TYPE_MAILBOX_TX: /* set head and tail registers in our local struct */ - ccq->reg.head = PF_FW_ATQH; - ccq->reg.tail = PF_FW_ATQT; - ccq->reg.len = PF_FW_ATQLEN; - ccq->reg.bah = PF_FW_ATQBAH; - ccq->reg.bal = PF_FW_ATQBAL; + ccq->reg.head = PF_FW_ATQH - mbx_start; + ccq->reg.tail = PF_FW_ATQT - mbx_start; + ccq->reg.len = PF_FW_ATQLEN - mbx_start; + ccq->reg.bah = PF_FW_ATQBAH - mbx_start; + ccq->reg.bal = PF_FW_ATQBAL - mbx_start; ccq->reg.len_mask = PF_FW_ATQLEN_ATQLEN_M; ccq->reg.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M; ccq->reg.head_mask = PF_FW_ATQH_ATQH_M; break; case IDPF_CTLQ_TYPE_MAILBOX_RX: /* set head and tail registers in our local struct */ - ccq->reg.head = PF_FW_ARQH; - ccq->reg.tail = PF_FW_ARQT; - ccq->reg.len = PF_FW_ARQLEN; - ccq->reg.bah = PF_FW_ARQBAH; - ccq->reg.bal = PF_FW_ARQBAL; + ccq->reg.head = PF_FW_ARQH - mbx_start; + ccq->reg.tail = PF_FW_ARQT - mbx_start; + ccq->reg.len = PF_FW_ARQLEN - mbx_start; + ccq->reg.bah = PF_FW_ARQBAH - mbx_start; + ccq->reg.bal = PF_FW_ARQBAL - mbx_start; ccq->reg.len_mask = PF_FW_ARQLEN_ARQLEN_M; ccq->reg.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M; ccq->reg.head_mask = PF_FW_ARQH_ARQH_M; @@ -129,7 +133,7 @@ free_reg_vals: */ static void idpf_reset_reg_init(struct idpf_adapter *adapter) { - adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, PFGEN_RSTAT); + adapter->reset_reg.rstat = idpf_get_rstat_reg_addr(adapter, PFGEN_RSTAT); adapter->reset_reg.rstat_m = PFGEN_RSTAT_PFR_STATE_M; } @@ -143,9 +147,32 @@ static void idpf_trigger_reset(struct idpf_adapter *adapter, { u32 reset_reg; - reset_reg = readl(idpf_get_reg_addr(adapter, PFGEN_CTRL)); + reset_reg = readl(idpf_get_rstat_reg_addr(adapter, PFGEN_CTRL)); writel(reset_reg | PFGEN_CTRL_PFSWR, - idpf_get_reg_addr(adapter, PFGEN_CTRL)); + idpf_get_rstat_reg_addr(adapter, PFGEN_CTRL)); +} + +/** + * idpf_ptp_reg_init - Initialize required registers + * @adapter: Driver specific private structure + * + * Set the bits required for enabling shtime and cmd execution + */ +static void idpf_ptp_reg_init(const struct idpf_adapter *adapter) +{ + adapter->ptp->cmd.shtime_enable_mask = PF_GLTSYN_CMD_SYNC_SHTIME_EN_M; + adapter->ptp->cmd.exec_cmd_mask = PF_GLTSYN_CMD_SYNC_EXEC_CMD_M; +} + +/** + * idpf_idc_register - register for IDC callbacks + * @adapter: Driver specific private structure + * + * Return: 0 on success or error code on failure. + */ +static int idpf_idc_register(struct idpf_adapter *adapter) +{ + return idpf_idc_init_aux_core_dev(adapter, IIDC_FUNCTION_TYPE_PF); } /** @@ -159,6 +186,7 @@ static void idpf_reg_ops_init(struct idpf_adapter *adapter) adapter->dev_ops.reg_ops.mb_intr_reg_init = idpf_mb_intr_reg_init; adapter->dev_ops.reg_ops.reset_reg_init = idpf_reset_reg_init; adapter->dev_ops.reg_ops.trigger_reset = idpf_trigger_reset; + adapter->dev_ops.reg_ops.ptp_reg_init = idpf_ptp_reg_init; } /** @@ -168,4 +196,11 @@ static void idpf_reg_ops_init(struct idpf_adapter *adapter) void idpf_dev_ops_init(struct idpf_adapter *adapter) { idpf_reg_ops_init(adapter); + + adapter->dev_ops.idc_init = idpf_idc_register; + + resource_set_range(&adapter->dev_ops.static_reg_info[0], + PF_FW_BASE, IDPF_PF_MBX_REGION_SZ); + resource_set_range(&adapter->dev_ops.static_reg_info[1], + PFGEN_RTRIG, IDPF_PF_RSTAT_REGION_SZ); } diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c index 59b1a1a09996..0eb812ac19c2 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -2,6 +2,8 @@ /* Copyright (C) 2023 Intel Corporation */ #include "idpf.h" +#include "idpf_ptp.h" +#include "idpf_virtchnl.h" /** * idpf_get_rxnfc - command to get RX flow classification rules @@ -12,26 +14,312 @@ * Returns Success if the command is supported. */ static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, - u32 __always_unused *rule_locs) + u32 *rule_locs) { + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_user_config_data *user_config; + struct idpf_fsteer_fltr *f; struct idpf_vport *vport; + unsigned int cnt = 0; + int err = 0; idpf_vport_ctrl_lock(netdev); vport = idpf_netdev_to_vport(netdev); + user_config = &np->adapter->vport_config[np->vport_idx]->user_config; switch (cmd->cmd) { case ETHTOOL_GRXRINGS: cmd->data = vport->num_rxq; - idpf_vport_ctrl_unlock(netdev); - - return 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = user_config->num_fsteer_fltrs; + cmd->data = idpf_fsteer_max_rules(vport); + break; + case ETHTOOL_GRXCLSRULE: + err = -EINVAL; + list_for_each_entry(f, &user_config->flow_steer_list, list) + if (f->loc == cmd->fs.location) { + cmd->fs.ring_cookie = f->q_index; + err = 0; + break; + } + break; + case ETHTOOL_GRXCLSRLALL: + cmd->data = idpf_fsteer_max_rules(vport); + list_for_each_entry(f, &user_config->flow_steer_list, list) { + if (cnt == cmd->rule_cnt) { + err = -EMSGSIZE; + break; + } + rule_locs[cnt] = f->loc; + cnt++; + } + if (!err) + cmd->rule_cnt = user_config->num_fsteer_fltrs; + break; default: break; } idpf_vport_ctrl_unlock(netdev); - return -EOPNOTSUPP; + return err; +} + +static void idpf_fsteer_fill_ipv4(struct virtchnl2_proto_hdrs *hdrs, + struct ethtool_rx_flow_spec *fsp) +{ + struct iphdr *iph; + + hdrs->proto_hdr[0].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_IPV4); + + iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_spec; + iph->saddr = fsp->h_u.tcp_ip4_spec.ip4src; + iph->daddr = fsp->h_u.tcp_ip4_spec.ip4dst; + + iph = (struct iphdr *)hdrs->proto_hdr[0].buffer_mask; + iph->saddr = fsp->m_u.tcp_ip4_spec.ip4src; + iph->daddr = fsp->m_u.tcp_ip4_spec.ip4dst; +} + +static void idpf_fsteer_fill_udp(struct virtchnl2_proto_hdrs *hdrs, + struct ethtool_rx_flow_spec *fsp, + bool v4) +{ + struct udphdr *udph, *udpm; + + hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_UDP); + + udph = (struct udphdr *)hdrs->proto_hdr[1].buffer_spec; + udpm = (struct udphdr *)hdrs->proto_hdr[1].buffer_mask; + + if (v4) { + udph->source = fsp->h_u.udp_ip4_spec.psrc; + udph->dest = fsp->h_u.udp_ip4_spec.pdst; + udpm->source = fsp->m_u.udp_ip4_spec.psrc; + udpm->dest = fsp->m_u.udp_ip4_spec.pdst; + } else { + udph->source = fsp->h_u.udp_ip6_spec.psrc; + udph->dest = fsp->h_u.udp_ip6_spec.pdst; + udpm->source = fsp->m_u.udp_ip6_spec.psrc; + udpm->dest = fsp->m_u.udp_ip6_spec.pdst; + } +} + +static void idpf_fsteer_fill_tcp(struct virtchnl2_proto_hdrs *hdrs, + struct ethtool_rx_flow_spec *fsp, + bool v4) +{ + struct tcphdr *tcph, *tcpm; + + hdrs->proto_hdr[1].hdr_type = cpu_to_le32(VIRTCHNL2_PROTO_HDR_TCP); + + tcph = (struct tcphdr *)hdrs->proto_hdr[1].buffer_spec; + tcpm = (struct tcphdr *)hdrs->proto_hdr[1].buffer_mask; + + if (v4) { + tcph->source = fsp->h_u.tcp_ip4_spec.psrc; + tcph->dest = fsp->h_u.tcp_ip4_spec.pdst; + tcpm->source = fsp->m_u.tcp_ip4_spec.psrc; + tcpm->dest = fsp->m_u.tcp_ip4_spec.pdst; + } else { + tcph->source = fsp->h_u.tcp_ip6_spec.psrc; + tcph->dest = fsp->h_u.tcp_ip6_spec.pdst; + tcpm->source = fsp->m_u.tcp_ip6_spec.psrc; + tcpm->dest = fsp->m_u.tcp_ip6_spec.pdst; + } +} + +/** + * idpf_add_flow_steer - add a Flow Steering filter + * @netdev: network interface device structure + * @cmd: command to add Flow Steering filter + * + * Return: 0 on success and negative values for failure + */ +static int idpf_add_flow_steer(struct net_device *netdev, + struct ethtool_rxnfc *cmd) +{ + struct idpf_fsteer_fltr *fltr, *parent = NULL, *f; + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_user_config_data *user_config; + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct virtchnl2_flow_rule_add_del *rule; + struct idpf_vport_config *vport_config; + struct virtchnl2_rule_action_set *acts; + struct virtchnl2_flow_rule_info *info; + struct virtchnl2_proto_hdrs *hdrs; + struct idpf_vport *vport; + u32 flow_type, q_index; + u16 num_rxq; + int err; + + vport = idpf_netdev_to_vport(netdev); + vport_config = vport->adapter->vport_config[np->vport_idx]; + user_config = &vport_config->user_config; + num_rxq = user_config->num_req_rx_qs; + + flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); + if (flow_type != fsp->flow_type) + return -EINVAL; + + if (!idpf_sideband_action_ena(vport, fsp) || + !idpf_sideband_flow_type_ena(vport, flow_type)) + return -EOPNOTSUPP; + + if (user_config->num_fsteer_fltrs > idpf_fsteer_max_rules(vport)) + return -ENOSPC; + + q_index = fsp->ring_cookie; + if (q_index >= num_rxq) + return -EINVAL; + + rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + rule->vport_id = cpu_to_le32(vport->vport_id); + rule->count = cpu_to_le32(1); + info = &rule->rule_info[0]; + info->rule_id = cpu_to_le32(fsp->location); + + hdrs = &info->rule_cfg.proto_hdrs; + hdrs->tunnel_level = 0; + hdrs->count = cpu_to_le32(2); + + acts = &info->rule_cfg.action_set; + acts->count = cpu_to_le32(1); + acts->actions[0].action_type = cpu_to_le32(VIRTCHNL2_ACTION_QUEUE); + acts->actions[0].act_conf.q_id = cpu_to_le32(q_index); + + switch (flow_type) { + case UDP_V4_FLOW: + idpf_fsteer_fill_ipv4(hdrs, fsp); + idpf_fsteer_fill_udp(hdrs, fsp, true); + break; + case TCP_V4_FLOW: + idpf_fsteer_fill_ipv4(hdrs, fsp); + idpf_fsteer_fill_tcp(hdrs, fsp, true); + break; + default: + err = -EINVAL; + goto out; + } + + err = idpf_add_del_fsteer_filters(vport->adapter, rule, + VIRTCHNL2_OP_ADD_FLOW_RULE); + if (err) + goto out; + + if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) { + err = -EIO; + goto out; + } + + fltr = kzalloc(sizeof(*fltr), GFP_KERNEL); + if (!fltr) { + err = -ENOMEM; + goto out; + } + + fltr->loc = fsp->location; + fltr->q_index = q_index; + list_for_each_entry(f, &user_config->flow_steer_list, list) { + if (f->loc >= fltr->loc) + break; + parent = f; + } + + parent ? list_add(&fltr->list, &parent->list) : + list_add(&fltr->list, &user_config->flow_steer_list); + + user_config->num_fsteer_fltrs++; + +out: + kfree(rule); + return err; +} + +/** + * idpf_del_flow_steer - delete a Flow Steering filter + * @netdev: network interface device structure + * @cmd: command to add Flow Steering filter + * + * Return: 0 on success and negative values for failure + */ +static int idpf_del_flow_steer(struct net_device *netdev, + struct ethtool_rxnfc *cmd) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_user_config_data *user_config; + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct virtchnl2_flow_rule_add_del *rule; + struct idpf_vport_config *vport_config; + struct virtchnl2_flow_rule_info *info; + struct idpf_fsteer_fltr *f, *iter; + struct idpf_vport *vport; + int err; + + vport = idpf_netdev_to_vport(netdev); + vport_config = vport->adapter->vport_config[np->vport_idx]; + user_config = &vport_config->user_config; + + if (!idpf_sideband_action_ena(vport, fsp)) + return -EOPNOTSUPP; + + rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + rule->vport_id = cpu_to_le32(vport->vport_id); + rule->count = cpu_to_le32(1); + info = &rule->rule_info[0]; + info->rule_id = cpu_to_le32(fsp->location); + + err = idpf_add_del_fsteer_filters(vport->adapter, rule, + VIRTCHNL2_OP_DEL_FLOW_RULE); + if (err) + goto out; + + if (info->status != cpu_to_le32(VIRTCHNL2_FLOW_RULE_SUCCESS)) { + err = -EIO; + goto out; + } + + list_for_each_entry_safe(f, iter, + &user_config->flow_steer_list, list) { + if (f->loc == fsp->location) { + list_del(&f->list); + kfree(f); + user_config->num_fsteer_fltrs--; + goto out; + } + } + err = -EINVAL; + +out: + kfree(rule); + return err; +} + +static int idpf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + int ret = -EOPNOTSUPP; + + idpf_vport_ctrl_lock(netdev); + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = idpf_add_flow_steer(netdev, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = idpf_del_flow_steer(netdev, cmd); + break; + default: + break; + } + + idpf_vport_ctrl_unlock(netdev); + return ret; } /** @@ -46,7 +334,7 @@ static u32 idpf_get_rxfh_key_size(struct net_device *netdev) struct idpf_vport_user_config_data *user_config; if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) - return -EOPNOTSUPP; + return 0; user_config = &np->adapter->vport_config[np->vport_idx]->user_config; @@ -65,7 +353,7 @@ static u32 idpf_get_rxfh_indir_size(struct net_device *netdev) struct idpf_vport_user_config_data *user_config; if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) - return -EOPNOTSUPP; + return 0; user_config = &np->adapter->vport_config[np->vport_idx]->user_config; @@ -1089,12 +1377,14 @@ static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num, /** * __idpf_set_q_coalesce - set ITR values for specific queue * @ec: ethtool structure from user to update ITR settings + * @q_coal: per queue coalesce settings * @qv: queue vector for which itr values has to be set * @is_rxq: is queue type rx * * Returns 0 on success, negative otherwise. */ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec, + struct idpf_q_coalesce *q_coal, struct idpf_q_vector *qv, bool is_rxq) { u32 use_adaptive_coalesce, coalesce_usecs; @@ -1138,20 +1428,25 @@ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec, if (is_rxq) { qv->rx_itr_value = coalesce_usecs; + q_coal->rx_coalesce_usecs = coalesce_usecs; if (use_adaptive_coalesce) { qv->rx_intr_mode = IDPF_ITR_DYNAMIC; + q_coal->rx_intr_mode = IDPF_ITR_DYNAMIC; } else { qv->rx_intr_mode = !IDPF_ITR_DYNAMIC; - idpf_vport_intr_write_itr(qv, qv->rx_itr_value, - false); + q_coal->rx_intr_mode = !IDPF_ITR_DYNAMIC; + idpf_vport_intr_write_itr(qv, coalesce_usecs, false); } } else { qv->tx_itr_value = coalesce_usecs; + q_coal->tx_coalesce_usecs = coalesce_usecs; if (use_adaptive_coalesce) { qv->tx_intr_mode = IDPF_ITR_DYNAMIC; + q_coal->tx_intr_mode = IDPF_ITR_DYNAMIC; } else { qv->tx_intr_mode = !IDPF_ITR_DYNAMIC; - idpf_vport_intr_write_itr(qv, qv->tx_itr_value, true); + q_coal->tx_intr_mode = !IDPF_ITR_DYNAMIC; + idpf_vport_intr_write_itr(qv, coalesce_usecs, true); } } @@ -1164,6 +1459,7 @@ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec, /** * idpf_set_q_coalesce - set ITR values for specific queue * @vport: vport associated to the queue that need updating + * @q_coal: per queue coalesce settings * @ec: coalesce settings to program the device with * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index * @is_rxq: is queue type rx @@ -1171,6 +1467,7 @@ static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec, * Return 0 on success, and negative on failure */ static int idpf_set_q_coalesce(const struct idpf_vport *vport, + struct idpf_q_coalesce *q_coal, const struct ethtool_coalesce *ec, int q_num, bool is_rxq) { @@ -1179,7 +1476,7 @@ static int idpf_set_q_coalesce(const struct idpf_vport *vport, qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) : idpf_find_txq_vec(vport, q_num); - if (qv && __idpf_set_q_coalesce(ec, qv, is_rxq)) + if (qv && __idpf_set_q_coalesce(ec, q_coal, qv, is_rxq)) return -EINVAL; return 0; @@ -1200,9 +1497,13 @@ static int idpf_set_coalesce(struct net_device *netdev, struct netlink_ext_ack *extack) { struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_user_config_data *user_config; + struct idpf_q_coalesce *q_coal; struct idpf_vport *vport; int i, err = 0; + user_config = &np->adapter->vport_config[np->vport_idx]->user_config; + idpf_vport_ctrl_lock(netdev); vport = idpf_netdev_to_vport(netdev); @@ -1210,13 +1511,15 @@ static int idpf_set_coalesce(struct net_device *netdev, goto unlock_mutex; for (i = 0; i < vport->num_txq; i++) { - err = idpf_set_q_coalesce(vport, ec, i, false); + q_coal = &user_config->q_coalesce[i]; + err = idpf_set_q_coalesce(vport, q_coal, ec, i, false); if (err) goto unlock_mutex; } for (i = 0; i < vport->num_rxq; i++) { - err = idpf_set_q_coalesce(vport, ec, i, true); + q_coal = &user_config->q_coalesce[i]; + err = idpf_set_q_coalesce(vport, q_coal, ec, i, true); if (err) goto unlock_mutex; } @@ -1238,20 +1541,25 @@ unlock_mutex: static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num, struct ethtool_coalesce *ec) { + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport_user_config_data *user_config; + struct idpf_q_coalesce *q_coal; struct idpf_vport *vport; int err; idpf_vport_ctrl_lock(netdev); vport = idpf_netdev_to_vport(netdev); + user_config = &np->adapter->vport_config[np->vport_idx]->user_config; + q_coal = &user_config->q_coalesce[q_num]; - err = idpf_set_q_coalesce(vport, ec, q_num, false); + err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, false); if (err) { idpf_vport_ctrl_unlock(netdev); return err; } - err = idpf_set_q_coalesce(vport, ec, q_num, true); + err = idpf_set_q_coalesce(vport, q_coal, ec, q_num, true); idpf_vport_ctrl_unlock(netdev); @@ -1312,6 +1620,71 @@ static int idpf_get_link_ksettings(struct net_device *netdev, return 0; } +/** + * idpf_get_timestamp_filters - Get the supported timestamping mode + * @vport: Virtual port structure + * @info: ethtool timestamping info structure + * + * Get the Tx/Rx timestamp filters. + */ +static void idpf_get_timestamp_filters(const struct idpf_vport *vport, + struct kernel_ethtool_ts_info *info) +{ + info->so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->tx_types = BIT(HWTSTAMP_TX_OFF); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); + + if (!vport->tx_tstamp_caps || + vport->adapter->ptp->tx_tstamp_access == IDPF_PTP_NONE) + return; + + info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE; + + info->tx_types |= BIT(HWTSTAMP_TX_ON); +} + +/** + * idpf_get_ts_info - Get device PHC association + * @netdev: network interface device structure + * @info: ethtool timestamping info structure + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_get_ts_info(struct net_device *netdev, + struct kernel_ethtool_ts_info *info) +{ + struct idpf_netdev_priv *np = netdev_priv(netdev); + struct idpf_vport *vport; + int err = 0; + + if (!mutex_trylock(&np->adapter->vport_ctrl_lock)) + return -EBUSY; + + vport = idpf_netdev_to_vport(netdev); + + if (!vport->adapter->ptp) { + err = -EOPNOTSUPP; + goto unlock; + } + + if (idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP) && + vport->adapter->ptp->clock) { + info->phc_index = ptp_clock_index(vport->adapter->ptp->clock); + idpf_get_timestamp_filters(vport, info); + } else { + pci_dbg(vport->adapter->pdev, "PTP clock not detected\n"); + err = ethtool_op_get_ts_info(netdev, info); + } + +unlock: + mutex_unlock(&np->adapter->vport_ctrl_lock); + + return err; +} + static const struct ethtool_ops idpf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE, @@ -1328,6 +1701,7 @@ static const struct ethtool_ops idpf_ethtool_ops = { .get_sset_count = idpf_get_sset_count, .get_channels = idpf_get_channels, .get_rxnfc = idpf_get_rxnfc, + .set_rxnfc = idpf_set_rxnfc, .get_rxfh_key_size = idpf_get_rxfh_key_size, .get_rxfh_indir_size = idpf_get_rxfh_indir_size, .get_rxfh = idpf_get_rxfh, @@ -1336,6 +1710,7 @@ static const struct ethtool_ops idpf_ethtool_ops = { .get_ringparam = idpf_get_ringparam, .set_ringparam = idpf_set_ringparam, .get_link_ksettings = idpf_get_link_ksettings, + .get_ts_info = idpf_get_ts_info, }; /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_idc.c b/drivers/net/ethernet/intel/idpf/idpf_idc.c new file mode 100644 index 000000000000..4d2905103215 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_idc.c @@ -0,0 +1,503 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2025 Intel Corporation */ + +#include <linux/export.h> + +#include "idpf.h" +#include "idpf_virtchnl.h" + +static DEFINE_IDA(idpf_idc_ida); + +#define IDPF_IDC_MAX_ADEV_NAME_LEN 15 + +/** + * idpf_idc_init - Called to initialize IDC + * @adapter: driver private data structure + * + * Return: 0 on success or cap not enabled, error code on failure. + */ +int idpf_idc_init(struct idpf_adapter *adapter) +{ + int err; + + if (!idpf_is_rdma_cap_ena(adapter) || + !adapter->dev_ops.idc_init) + return 0; + + err = adapter->dev_ops.idc_init(adapter); + if (err) + dev_err(&adapter->pdev->dev, "failed to initialize idc: %d\n", + err); + + return err; +} + +/** + * idpf_vport_adev_release - function to be mapped to aux dev's release op + * @dev: pointer to device to free + */ +static void idpf_vport_adev_release(struct device *dev) +{ + struct iidc_rdma_vport_auxiliary_dev *iadev; + + iadev = container_of(dev, struct iidc_rdma_vport_auxiliary_dev, adev.dev); + kfree(iadev); + iadev = NULL; +} + +/** + * idpf_plug_vport_aux_dev - allocate and register a vport Auxiliary device + * @cdev_info: IDC core device info pointer + * @vdev_info: IDC vport device info pointer + * + * Return: 0 on success or error code on failure. + */ +static int idpf_plug_vport_aux_dev(struct iidc_rdma_core_dev_info *cdev_info, + struct iidc_rdma_vport_dev_info *vdev_info) +{ + struct iidc_rdma_vport_auxiliary_dev *iadev; + char name[IDPF_IDC_MAX_ADEV_NAME_LEN]; + struct auxiliary_device *adev; + int ret; + + iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); + if (!iadev) + return -ENOMEM; + + adev = &iadev->adev; + vdev_info->adev = &iadev->adev; + iadev->vdev_info = vdev_info; + + ret = ida_alloc(&idpf_idc_ida, GFP_KERNEL); + if (ret < 0) { + pr_err("failed to allocate unique device ID for Auxiliary driver\n"); + goto err_ida_alloc; + } + adev->id = ret; + adev->dev.release = idpf_vport_adev_release; + adev->dev.parent = &cdev_info->pdev->dev; + sprintf(name, "%04x.rdma.vdev", cdev_info->pdev->vendor); + adev->name = name; + + ret = auxiliary_device_init(adev); + if (ret) + goto err_aux_dev_init; + + ret = auxiliary_device_add(adev); + if (ret) + goto err_aux_dev_add; + + return 0; + +err_aux_dev_add: + auxiliary_device_uninit(adev); +err_aux_dev_init: + ida_free(&idpf_idc_ida, adev->id); +err_ida_alloc: + vdev_info->adev = NULL; + kfree(iadev); + + return ret; +} + +/** + * idpf_idc_init_aux_vport_dev - initialize vport Auxiliary Device(s) + * @vport: virtual port data struct + * + * Return: 0 on success or error code on failure. + */ +static int idpf_idc_init_aux_vport_dev(struct idpf_vport *vport) +{ + struct idpf_adapter *adapter = vport->adapter; + struct iidc_rdma_vport_dev_info *vdev_info; + struct iidc_rdma_core_dev_info *cdev_info; + struct virtchnl2_create_vport *vport_msg; + int err; + + vport_msg = (struct virtchnl2_create_vport *) + adapter->vport_params_recvd[vport->idx]; + + if (!(le16_to_cpu(vport_msg->vport_flags) & VIRTCHNL2_VPORT_ENABLE_RDMA)) + return 0; + + vport->vdev_info = kzalloc(sizeof(*vdev_info), GFP_KERNEL); + if (!vport->vdev_info) + return -ENOMEM; + + cdev_info = vport->adapter->cdev_info; + + vdev_info = vport->vdev_info; + vdev_info->vport_id = vport->vport_id; + vdev_info->netdev = vport->netdev; + vdev_info->core_adev = cdev_info->adev; + + err = idpf_plug_vport_aux_dev(cdev_info, vdev_info); + if (err) { + vport->vdev_info = NULL; + kfree(vdev_info); + return err; + } + + return 0; +} + +/** + * idpf_idc_vdev_mtu_event - Function to handle IDC vport mtu change events + * @vdev_info: IDC vport device info pointer + * @event_type: type of event to pass to handler + */ +void idpf_idc_vdev_mtu_event(struct iidc_rdma_vport_dev_info *vdev_info, + enum iidc_rdma_event_type event_type) +{ + struct iidc_rdma_vport_auxiliary_drv *iadrv; + struct iidc_rdma_event event = { }; + struct auxiliary_device *adev; + + if (!vdev_info) + /* RDMA is not enabled */ + return; + + set_bit(event_type, event.type); + + device_lock(&vdev_info->adev->dev); + adev = vdev_info->adev; + if (!adev || !adev->dev.driver) + goto unlock; + iadrv = container_of(adev->dev.driver, + struct iidc_rdma_vport_auxiliary_drv, + adrv.driver); + if (iadrv->event_handler) + iadrv->event_handler(vdev_info, &event); +unlock: + device_unlock(&vdev_info->adev->dev); +} + +/** + * idpf_core_adev_release - function to be mapped to aux dev's release op + * @dev: pointer to device to free + */ +static void idpf_core_adev_release(struct device *dev) +{ + struct iidc_rdma_core_auxiliary_dev *iadev; + + iadev = container_of(dev, struct iidc_rdma_core_auxiliary_dev, adev.dev); + kfree(iadev); + iadev = NULL; +} + +/** + * idpf_plug_core_aux_dev - allocate and register an Auxiliary device + * @cdev_info: IDC core device info pointer + * + * Return: 0 on success or error code on failure. + */ +static int idpf_plug_core_aux_dev(struct iidc_rdma_core_dev_info *cdev_info) +{ + struct iidc_rdma_core_auxiliary_dev *iadev; + char name[IDPF_IDC_MAX_ADEV_NAME_LEN]; + struct auxiliary_device *adev; + int ret; + + iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); + if (!iadev) + return -ENOMEM; + + adev = &iadev->adev; + cdev_info->adev = adev; + iadev->cdev_info = cdev_info; + + ret = ida_alloc(&idpf_idc_ida, GFP_KERNEL); + if (ret < 0) { + pr_err("failed to allocate unique device ID for Auxiliary driver\n"); + goto err_ida_alloc; + } + adev->id = ret; + adev->dev.release = idpf_core_adev_release; + adev->dev.parent = &cdev_info->pdev->dev; + sprintf(name, "%04x.rdma.core", cdev_info->pdev->vendor); + adev->name = name; + + ret = auxiliary_device_init(adev); + if (ret) + goto err_aux_dev_init; + + ret = auxiliary_device_add(adev); + if (ret) + goto err_aux_dev_add; + + return 0; + +err_aux_dev_add: + auxiliary_device_uninit(adev); +err_aux_dev_init: + ida_free(&idpf_idc_ida, adev->id); +err_ida_alloc: + cdev_info->adev = NULL; + kfree(iadev); + + return ret; +} + +/** + * idpf_unplug_aux_dev - unregister and free an Auxiliary device + * @adev: auxiliary device struct + */ +static void idpf_unplug_aux_dev(struct auxiliary_device *adev) +{ + if (!adev) + return; + + auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); + + ida_free(&idpf_idc_ida, adev->id); +} + +/** + * idpf_idc_issue_reset_event - Function to handle reset IDC event + * @cdev_info: IDC core device info pointer + */ +void idpf_idc_issue_reset_event(struct iidc_rdma_core_dev_info *cdev_info) +{ + enum iidc_rdma_event_type event_type = IIDC_RDMA_EVENT_WARN_RESET; + struct iidc_rdma_core_auxiliary_drv *iadrv; + struct iidc_rdma_event event = { }; + struct auxiliary_device *adev; + + if (!cdev_info) + /* RDMA is not enabled */ + return; + + set_bit(event_type, event.type); + + device_lock(&cdev_info->adev->dev); + + adev = cdev_info->adev; + if (!adev || !adev->dev.driver) + goto unlock; + + iadrv = container_of(adev->dev.driver, + struct iidc_rdma_core_auxiliary_drv, + adrv.driver); + if (iadrv->event_handler) + iadrv->event_handler(cdev_info, &event); +unlock: + device_unlock(&cdev_info->adev->dev); +} + +/** + * idpf_idc_vport_dev_up - called when CORE is ready for vport aux devs + * @adapter: private data struct + * + * Return: 0 on success or error code on failure. + */ +static int idpf_idc_vport_dev_up(struct idpf_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_alloc_vports; i++) { + struct idpf_vport *vport = adapter->vports[i]; + + if (!vport) + continue; + + if (!vport->vdev_info) + err = idpf_idc_init_aux_vport_dev(vport); + else + err = idpf_plug_vport_aux_dev(vport->adapter->cdev_info, + vport->vdev_info); + } + + return err; +} + +/** + * idpf_idc_vport_dev_down - called CORE is leaving vport aux dev support state + * @adapter: private data struct + */ +static void idpf_idc_vport_dev_down(struct idpf_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_alloc_vports; i++) { + struct idpf_vport *vport = adapter->vports[i]; + + if (!vport) + continue; + + idpf_unplug_aux_dev(vport->vdev_info->adev); + vport->vdev_info->adev = NULL; + } +} + +/** + * idpf_idc_vport_dev_ctrl - Called by an Auxiliary Driver + * @cdev_info: IDC core device info pointer + * @up: RDMA core driver status + * + * This callback function is accessed by an Auxiliary Driver to indicate + * whether core driver is ready to support vport driver load or if vport + * drivers need to be taken down. + * + * Return: 0 on success or error code on failure. + */ +int idpf_idc_vport_dev_ctrl(struct iidc_rdma_core_dev_info *cdev_info, bool up) +{ + struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev); + + if (up) + return idpf_idc_vport_dev_up(adapter); + + idpf_idc_vport_dev_down(adapter); + + return 0; +} +EXPORT_SYMBOL_GPL(idpf_idc_vport_dev_ctrl); + +/** + * idpf_idc_request_reset - Called by an Auxiliary Driver + * @cdev_info: IDC core device info pointer + * @reset_type: function, core or other + * + * This callback function is accessed by an Auxiliary Driver to request a reset + * on the Auxiliary Device. + * + * Return: 0 on success or error code on failure. + */ +int idpf_idc_request_reset(struct iidc_rdma_core_dev_info *cdev_info, + enum iidc_rdma_reset_type __always_unused reset_type) +{ + struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev); + + if (!idpf_is_reset_in_prog(adapter)) { + set_bit(IDPF_HR_FUNC_RESET, adapter->flags); + queue_delayed_work(adapter->vc_event_wq, + &adapter->vc_event_task, + msecs_to_jiffies(10)); + } + + return 0; +} +EXPORT_SYMBOL_GPL(idpf_idc_request_reset); + +/** + * idpf_idc_init_msix_data - initialize MSIX data for the cdev_info structure + * @adapter: driver private data structure + */ +static void +idpf_idc_init_msix_data(struct idpf_adapter *adapter) +{ + struct iidc_rdma_core_dev_info *cdev_info; + struct iidc_rdma_priv_dev_info *privd; + + if (!adapter->rdma_msix_entries) + return; + + cdev_info = adapter->cdev_info; + privd = cdev_info->iidc_priv; + + privd->msix_entries = adapter->rdma_msix_entries; + privd->msix_count = adapter->num_rdma_msix_entries; +} + +/** + * idpf_idc_init_aux_core_dev - initialize Auxiliary Device(s) + * @adapter: driver private data structure + * @ftype: PF or VF + * + * Return: 0 on success or error code on failure. + */ +int idpf_idc_init_aux_core_dev(struct idpf_adapter *adapter, + enum iidc_function_type ftype) +{ + struct iidc_rdma_core_dev_info *cdev_info; + struct iidc_rdma_priv_dev_info *privd; + int err, i; + + adapter->cdev_info = kzalloc(sizeof(*cdev_info), GFP_KERNEL); + if (!adapter->cdev_info) + return -ENOMEM; + cdev_info = adapter->cdev_info; + + privd = kzalloc(sizeof(*privd), GFP_KERNEL); + if (!privd) { + err = -ENOMEM; + goto err_privd_alloc; + } + + cdev_info->iidc_priv = privd; + cdev_info->pdev = adapter->pdev; + cdev_info->rdma_protocol = IIDC_RDMA_PROTOCOL_ROCEV2; + privd->ftype = ftype; + + privd->mapped_mem_regions = + kcalloc(adapter->hw.num_lan_regs, + sizeof(struct iidc_rdma_lan_mapped_mem_region), + GFP_KERNEL); + if (!privd->mapped_mem_regions) { + err = -ENOMEM; + goto err_plug_aux_dev; + } + + privd->num_memory_regions = cpu_to_le16(adapter->hw.num_lan_regs); + for (i = 0; i < adapter->hw.num_lan_regs; i++) { + privd->mapped_mem_regions[i].region_addr = + adapter->hw.lan_regs[i].vaddr; + privd->mapped_mem_regions[i].size = + cpu_to_le64(adapter->hw.lan_regs[i].addr_len); + privd->mapped_mem_regions[i].start_offset = + cpu_to_le64(adapter->hw.lan_regs[i].addr_start); + } + + idpf_idc_init_msix_data(adapter); + + err = idpf_plug_core_aux_dev(cdev_info); + if (err) + goto err_free_mem_regions; + + return 0; + +err_free_mem_regions: + kfree(privd->mapped_mem_regions); + privd->mapped_mem_regions = NULL; +err_plug_aux_dev: + kfree(privd); +err_privd_alloc: + kfree(cdev_info); + adapter->cdev_info = NULL; + + return err; +} + +/** + * idpf_idc_deinit_core_aux_device - de-initialize Auxiliary Device(s) + * @cdev_info: IDC core device info pointer + */ +void idpf_idc_deinit_core_aux_device(struct iidc_rdma_core_dev_info *cdev_info) +{ + struct iidc_rdma_priv_dev_info *privd; + + if (!cdev_info) + return; + + idpf_unplug_aux_dev(cdev_info->adev); + + privd = cdev_info->iidc_priv; + kfree(privd->mapped_mem_regions); + kfree(privd); + kfree(cdev_info); +} + +/** + * idpf_idc_deinit_vport_aux_device - de-initialize Auxiliary Device(s) + * @vdev_info: IDC vport device info pointer + */ +void idpf_idc_deinit_vport_aux_device(struct iidc_rdma_vport_dev_info *vdev_info) +{ + if (!vdev_info) + return; + + idpf_unplug_aux_dev(vdev_info->adev); + + kfree(vdev_info); +} diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h index 24edb8a6ec2e..cc9aa2b6a14a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h +++ b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h @@ -53,6 +53,10 @@ #define PF_FW_ATQH_ATQH_M GENMASK(9, 0) #define PF_FW_ATQT (PF_FW_BASE + 0x24) +/* Timesync registers */ +#define PF_GLTSYN_CMD_SYNC_EXEC_CMD_M GENMASK(1, 0) +#define PF_GLTSYN_CMD_SYNC_SHTIME_EN_M BIT(2) + /* Interrupts */ #define PF_GLINT_BASE 0x08900000 #define PF_GLINT_DYN_CTL(_INT) (PF_GLINT_BASE + ((_INT) * 0x1000)) diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h index 8c7f8ef8f1a1..7492d1713243 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h @@ -282,7 +282,18 @@ struct idpf_flex_tx_tso_ctx_qw { u8 flex; }; -struct idpf_flex_tx_ctx_desc { +union idpf_flex_tx_ctx_desc { + /* DTYPE = IDPF_TX_DESC_DTYPE_CTX (0x01) */ + struct { + __le64 qw0; +#define IDPF_TX_CTX_L2TAG2_M GENMASK_ULL(47, 32) + __le64 qw1; +#define IDPF_TX_CTX_DTYPE_M GENMASK_ULL(3, 0) +#define IDPF_TX_CTX_CMD_M GENMASK_ULL(15, 4) +#define IDPF_TX_CTX_TSYN_REG_M GENMASK_ULL(47, 30) +#define IDPF_TX_CTX_MSS_M GENMASK_ULL(50, 63) + } tsyn; + /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX (0x05) */ struct { struct idpf_flex_tx_tso_ctx_qw qw0; diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index aa755dedb41d..2c2a3e85d693 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -3,6 +3,7 @@ #include "idpf.h" #include "idpf_virtchnl.h" +#include "idpf_ptp.h" static const struct net_device_ops idpf_netdev_ops; @@ -87,6 +88,8 @@ void idpf_intr_rel(struct idpf_adapter *adapter) idpf_deinit_vector_stack(adapter); kfree(adapter->msix_entries); adapter->msix_entries = NULL; + kfree(adapter->rdma_msix_entries); + adapter->rdma_msix_entries = NULL; } /** @@ -144,22 +147,6 @@ static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter) } /** - * idpf_set_mb_vec_id - Set vector index for mailbox - * @adapter: adapter structure to access the vector chunks - * - * The first vector id in the requested vector chunks from the CP is for - * the mailbox - */ -static void idpf_set_mb_vec_id(struct idpf_adapter *adapter) -{ - if (adapter->req_vec_chunks) - adapter->mb_vector.v_idx = - le16_to_cpu(adapter->caps.mailbox_vector_id); - else - adapter->mb_vector.v_idx = 0; -} - -/** * idpf_mb_intr_init - Initialize the mailbox interrupt * @adapter: adapter structure to store the mailbox vector */ @@ -314,13 +301,33 @@ rel_lock: */ int idpf_intr_req(struct idpf_adapter *adapter) { + u16 num_lan_vecs, min_lan_vecs, num_rdma_vecs = 0, min_rdma_vecs = 0; u16 default_vports = idpf_get_default_vports(adapter); int num_q_vecs, total_vecs, num_vec_ids; - int min_vectors, v_actual, err; + int min_vectors, actual_vecs, err; unsigned int vector; u16 *vecids; + int i; total_vecs = idpf_get_reserved_vecs(adapter); + num_lan_vecs = total_vecs; + if (idpf_is_rdma_cap_ena(adapter)) { + num_rdma_vecs = idpf_get_reserved_rdma_vecs(adapter); + min_rdma_vecs = IDPF_MIN_RDMA_VEC; + + if (!num_rdma_vecs) { + /* If idpf_get_reserved_rdma_vecs is 0, vectors are + * pulled from the LAN pool. + */ + num_rdma_vecs = min_rdma_vecs; + } else if (num_rdma_vecs < min_rdma_vecs) { + dev_err(&adapter->pdev->dev, + "Not enough vectors reserved for RDMA (min: %u, current: %u)\n", + min_rdma_vecs, num_rdma_vecs); + return -EINVAL; + } + } + num_q_vecs = total_vecs - IDPF_MBX_Q_VEC; err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs); @@ -331,52 +338,76 @@ int idpf_intr_req(struct idpf_adapter *adapter) return -EAGAIN; } - min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports; - v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors, - total_vecs, PCI_IRQ_MSIX); - if (v_actual < min_vectors) { - dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n", - v_actual); - err = -EAGAIN; + min_lan_vecs = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports; + min_vectors = min_lan_vecs + min_rdma_vecs; + actual_vecs = pci_alloc_irq_vectors(adapter->pdev, min_vectors, + total_vecs, PCI_IRQ_MSIX); + if (actual_vecs < 0) { + dev_err(&adapter->pdev->dev, "Failed to allocate minimum MSIX vectors required: %d\n", + min_vectors); + err = actual_vecs; goto send_dealloc_vecs; } - adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry), - GFP_KERNEL); + if (idpf_is_rdma_cap_ena(adapter)) { + if (actual_vecs < total_vecs) { + dev_warn(&adapter->pdev->dev, + "Warning: %d vectors requested, only %d available. Defaulting to minimum (%d) for RDMA and remaining for LAN.\n", + total_vecs, actual_vecs, IDPF_MIN_RDMA_VEC); + num_rdma_vecs = IDPF_MIN_RDMA_VEC; + } + + adapter->rdma_msix_entries = kcalloc(num_rdma_vecs, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!adapter->rdma_msix_entries) { + err = -ENOMEM; + goto free_irq; + } + } + num_lan_vecs = actual_vecs - num_rdma_vecs; + adapter->msix_entries = kcalloc(num_lan_vecs, sizeof(struct msix_entry), + GFP_KERNEL); if (!adapter->msix_entries) { err = -ENOMEM; - goto free_irq; + goto free_rdma_msix; } - idpf_set_mb_vec_id(adapter); + adapter->mb_vector.v_idx = le16_to_cpu(adapter->caps.mailbox_vector_id); - vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL); + vecids = kcalloc(actual_vecs, sizeof(u16), GFP_KERNEL); if (!vecids) { err = -ENOMEM; goto free_msix; } - num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs, + num_vec_ids = idpf_get_vec_ids(adapter, vecids, actual_vecs, &adapter->req_vec_chunks->vchunks); - if (num_vec_ids < v_actual) { + if (num_vec_ids < actual_vecs) { err = -EINVAL; goto free_vecids; } - for (vector = 0; vector < v_actual; vector++) { + for (vector = 0; vector < num_lan_vecs; vector++) { adapter->msix_entries[vector].entry = vecids[vector]; adapter->msix_entries[vector].vector = pci_irq_vector(adapter->pdev, vector); } + for (i = 0; i < num_rdma_vecs; vector++, i++) { + adapter->rdma_msix_entries[i].entry = vecids[vector]; + adapter->rdma_msix_entries[i].vector = + pci_irq_vector(adapter->pdev, vector); + } - adapter->num_req_msix = total_vecs; - adapter->num_msix_entries = v_actual; /* 'num_avail_msix' is used to distribute excess vectors to the vports * after considering the minimum vectors required per each default * vport */ - adapter->num_avail_msix = v_actual - min_vectors; + adapter->num_avail_msix = num_lan_vecs - min_lan_vecs; + adapter->num_msix_entries = num_lan_vecs; + if (idpf_is_rdma_cap_ena(adapter)) + adapter->num_rdma_msix_entries = num_rdma_vecs; /* Fill MSIX vector lifo stack with vector indexes */ err = idpf_init_vector_stack(adapter); @@ -398,6 +429,9 @@ free_vecids: free_msix: kfree(adapter->msix_entries); adapter->msix_entries = NULL; +free_rdma_msix: + kfree(adapter->rdma_msix_entries); + adapter->rdma_msix_entries = NULL; free_irq: pci_free_irq_vectors(adapter->pdev); send_dealloc_vecs: @@ -703,8 +737,10 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) { struct idpf_adapter *adapter = vport->adapter; struct idpf_vport_config *vport_config; + netdev_features_t other_offloads = 0; + netdev_features_t csum_offloads = 0; + netdev_features_t tso_offloads = 0; netdev_features_t dflt_features; - netdev_features_t offloads = 0; struct idpf_netdev_priv *np; struct net_device *netdev; u16 idx = vport->idx; @@ -721,6 +757,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) np->vport = vport; np->vport_idx = vport->idx; np->vport_id = vport->vport_id; + np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter); vport->netdev = netdev; return idpf_init_mac_addr(vport, netdev); @@ -738,6 +775,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) np->adapter = adapter; np->vport_idx = vport->idx; np->vport_id = vport->vport_id; + np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter); spin_lock_init(&np->stats_lock); @@ -766,53 +804,36 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) dflt_features |= NETIF_F_RXHASH; - if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4)) - dflt_features |= NETIF_F_IP_CSUM; - if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6)) - dflt_features |= NETIF_F_IPV6_CSUM; + if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, + VIRTCHNL2_CAP_FLOW_STEER) && + idpf_vport_is_cap_ena(vport, VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER)) + dflt_features |= NETIF_F_NTUPLE; + if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4)) + csum_offloads |= NETIF_F_IP_CSUM; + if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6)) + csum_offloads |= NETIF_F_IPV6_CSUM; if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM)) - dflt_features |= NETIF_F_RXCSUM; - if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM)) - dflt_features |= NETIF_F_SCTP_CRC; + csum_offloads |= NETIF_F_RXCSUM; + if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_SCTP_CSUM)) + csum_offloads |= NETIF_F_SCTP_CRC; if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP)) - dflt_features |= NETIF_F_TSO; + tso_offloads |= NETIF_F_TSO; if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP)) - dflt_features |= NETIF_F_TSO6; + tso_offloads |= NETIF_F_TSO6; if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_UDP | VIRTCHNL2_CAP_SEG_IPV6_UDP)) - dflt_features |= NETIF_F_GSO_UDP_L4; + tso_offloads |= NETIF_F_GSO_UDP_L4; if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC)) - offloads |= NETIF_F_GRO_HW; - /* advertise to stack only if offloads for encapsulated packets is - * supported - */ - if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS, - VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) { - offloads |= NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | - NETIF_F_GSO_PARTIAL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_GSO_IPXIP4 | - NETIF_F_GSO_IPXIP6 | - 0; - - if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS, - IDPF_CAP_TUNNEL_TX_CSUM)) - netdev->gso_partial_features |= - NETIF_F_GSO_UDP_TUNNEL_CSUM; - - netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; - offloads |= NETIF_F_TSO_MANGLEID; - } + other_offloads |= NETIF_F_GRO_HW; if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK)) - offloads |= NETIF_F_LOOPBACK; + other_offloads |= NETIF_F_LOOPBACK; - netdev->features |= dflt_features; - netdev->hw_features |= dflt_features | offloads; - netdev->hw_enc_features |= dflt_features | offloads; + netdev->features |= dflt_features | csum_offloads | tso_offloads; + netdev->hw_features |= netdev->features | other_offloads; + netdev->vlan_features |= netdev->features | other_offloads; + netdev->hw_enc_features |= dflt_features | other_offloads; idpf_set_ethtool_ops(netdev); netif_set_affinity_auto(netdev); SET_NETDEV_DEV(netdev, &adapter->pdev->dev); @@ -1004,6 +1025,8 @@ static void idpf_vport_dealloc(struct idpf_vport *vport) struct idpf_adapter *adapter = vport->adapter; unsigned int i = vport->idx; + idpf_idc_deinit_vport_aux_device(vport->vdev_info); + idpf_deinit_mac_addr(vport); idpf_vport_stop(vport); @@ -1111,8 +1134,10 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, if (!vport) return vport; + num_max_q = max(max_q->max_txq, max_q->max_rxq); if (!adapter->vport_config[idx]) { struct idpf_vport_config *vport_config; + struct idpf_q_coalesce *q_coal; vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL); if (!vport_config) { @@ -1121,6 +1146,21 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, return NULL; } + q_coal = kcalloc(num_max_q, sizeof(*q_coal), GFP_KERNEL); + if (!q_coal) { + kfree(vport_config); + kfree(vport); + + return NULL; + } + for (int i = 0; i < num_max_q; i++) { + q_coal[i].tx_intr_mode = IDPF_ITR_DYNAMIC; + q_coal[i].tx_coalesce_usecs = IDPF_ITR_TX_DEF; + q_coal[i].rx_intr_mode = IDPF_ITR_DYNAMIC; + q_coal[i].rx_coalesce_usecs = IDPF_ITR_RX_DEF; + } + vport_config->user_config.q_coalesce = q_coal; + adapter->vport_config[idx] = vport_config; } @@ -1130,13 +1170,10 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, vport->default_vport = adapter->num_alloc_vports < idpf_get_default_vports(adapter); - num_max_q = max(max_q->max_txq, max_q->max_rxq); vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL); - if (!vport->q_vector_idxs) { - kfree(vport); + if (!vport->q_vector_idxs) + goto free_vport; - return NULL; - } idpf_vport_init(vport, max_q); /* This alloc is done separate from the LUT because it's not strictly @@ -1146,11 +1183,9 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, */ rss_data = &adapter->vport_config[idx]->user_config.rss_data; rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL); - if (!rss_data->rss_key) { - kfree(vport); + if (!rss_data->rss_key) + goto free_vector_idxs; - return NULL; - } /* Initialize default rss key */ netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size); @@ -1163,6 +1198,13 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, adapter->next_vport = idpf_get_free_slot(adapter); return vport; + +free_vector_idxs: + kfree(vport->q_vector_idxs); +free_vport: + kfree(vport); + + return NULL; } /** @@ -1510,6 +1552,7 @@ void idpf_init_task(struct work_struct *work) spin_lock_init(&vport_config->mac_filter_list_lock); INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list); + INIT_LIST_HEAD(&vport_config->user_config.flow_steer_list); err = idpf_check_supported_desc_ids(vport); if (err) { @@ -1767,6 +1810,8 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter) } else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) { bool is_reset = idpf_is_reset_detected(adapter); + idpf_idc_issue_reset_event(adapter->cdev_info); + idpf_set_vport_state(adapter); idpf_vc_core_deinit(adapter); if (!is_reset) @@ -1814,6 +1859,10 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter) unlock_mutex: mutex_unlock(&adapter->vport_ctrl_lock); + /* Wait until all vports are created to init RDMA CORE AUX */ + if (!err) + err = idpf_idc_init(adapter); + return err; } @@ -1830,11 +1879,19 @@ void idpf_vc_event_task(struct work_struct *work) if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) return; - if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) || - test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) { - set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); - idpf_init_hard_reset(adapter); - } + if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags)) + goto func_reset; + + if (test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) + goto drv_load; + + return; + +func_reset: + idpf_vc_xn_shutdown(adapter->vcxn_mngr); +drv_load: + set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags); + idpf_init_hard_reset(adapter); } /** @@ -1889,6 +1946,9 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, idpf_vport_calc_num_q_desc(new_vport); break; case IDPF_SR_MTU_CHANGE: + idpf_idc_vdev_mtu_event(vport->vdev_info, + IIDC_RDMA_EVENT_BEFORE_MTU_CHANGE); + break; case IDPF_SR_RSC_CHANGE: break; default: @@ -1933,9 +1993,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, if (current_state == __IDPF_VPORT_UP) err = idpf_vport_open(vport); - kfree(new_vport); - - return err; + goto free_vport; err_reset: idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq, @@ -1948,6 +2006,10 @@ err_open: free_vport: kfree(new_vport); + if (reset_cause == IDPF_SR_MTU_CHANGE) + idpf_idc_vdev_mtu_event(vport->vdev_info, + IIDC_RDMA_EVENT_AFTER_MTU_CHANGE); + return err; } @@ -2219,8 +2281,8 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb, struct net_device *netdev, netdev_features_t features) { - struct idpf_vport *vport = idpf_netdev_to_vport(netdev); - struct idpf_adapter *adapter = vport->adapter; + struct idpf_netdev_priv *np = netdev_priv(netdev); + u16 max_tx_hdr_size = np->max_tx_hdr_size; size_t len; /* No point in doing any of this if neither checksum nor GSO are @@ -2243,7 +2305,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb, goto unsupported; len = skb_network_header_len(skb); - if (unlikely(len > idpf_get_max_tx_hdr_size(adapter))) + if (unlikely(len > max_tx_hdr_size)) goto unsupported; if (!skb->encapsulation) @@ -2256,7 +2318,7 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb, /* IPLEN can support at most 127 dwords */ len = skb_inner_network_header_len(skb); - if (unlikely(len > idpf_get_max_tx_hdr_size(adapter))) + if (unlikely(len > max_tx_hdr_size)) goto unsupported; /* No need to validate L4LEN as TCP is the only protocol with a @@ -2335,8 +2397,12 @@ void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size) struct idpf_adapter *adapter = hw->back; size_t sz = ALIGN(size, 4096); - mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz, - &mem->pa, GFP_KERNEL); + /* The control queue resources are freed under a spinlock, contiguous + * pages will avoid IOMMU remapping and the use vmap (and vunmap in + * dma_free_*() path. + */ + mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa, + GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS); mem->size = sz; return mem->va; @@ -2351,13 +2417,67 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem) { struct idpf_adapter *adapter = hw->back; - dma_free_coherent(&adapter->pdev->dev, mem->size, - mem->va, mem->pa); + dma_free_attrs(&adapter->pdev->dev, mem->size, + mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS); mem->size = 0; mem->va = NULL; mem->pa = 0; } +static int idpf_hwtstamp_set(struct net_device *netdev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct idpf_vport *vport; + int err; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + if (!vport->link_up) { + idpf_vport_ctrl_unlock(netdev); + return -EPERM; + } + + if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) && + !idpf_ptp_is_vport_rx_tstamp_ena(vport)) { + idpf_vport_ctrl_unlock(netdev); + return -EOPNOTSUPP; + } + + err = idpf_ptp_set_timestamp_mode(vport, config); + + idpf_vport_ctrl_unlock(netdev); + + return err; +} + +static int idpf_hwtstamp_get(struct net_device *netdev, + struct kernel_hwtstamp_config *config) +{ + struct idpf_vport *vport; + + idpf_vport_ctrl_lock(netdev); + vport = idpf_netdev_to_vport(netdev); + + if (!vport->link_up) { + idpf_vport_ctrl_unlock(netdev); + return -EPERM; + } + + if (!idpf_ptp_is_vport_tx_tstamp_ena(vport) && + !idpf_ptp_is_vport_rx_tstamp_ena(vport)) { + idpf_vport_ctrl_unlock(netdev); + return 0; + } + + *config = vport->tstamp_config; + + idpf_vport_ctrl_unlock(netdev); + + return 0; +} + static const struct net_device_ops idpf_netdev_ops = { .ndo_open = idpf_open, .ndo_stop = idpf_stop, @@ -2370,4 +2490,6 @@ static const struct net_device_ops idpf_netdev_ops = { .ndo_get_stats64 = idpf_get_stats64, .ndo_set_features = idpf_set_features, .ndo_tx_timeout = idpf_tx_timeout, + .ndo_hwtstamp_get = idpf_hwtstamp_get, + .ndo_hwtstamp_set = idpf_hwtstamp_set, }; diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c index b6c515d14cbf..dfe9126f1f4a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_main.c +++ b/drivers/net/ethernet/intel/idpf/idpf_main.c @@ -62,6 +62,7 @@ destroy_wqs: destroy_workqueue(adapter->vc_event_wq); for (i = 0; i < adapter->max_vports; i++) { + kfree(adapter->vport_config[i]->user_config.q_coalesce); kfree(adapter->vport_config[i]); adapter->vport_config[i] = NULL; } @@ -87,7 +88,12 @@ destroy_wqs: */ static void idpf_shutdown(struct pci_dev *pdev) { - idpf_remove(pdev); + struct idpf_adapter *adapter = pci_get_drvdata(pdev); + + cancel_delayed_work_sync(&adapter->serv_task); + cancel_delayed_work_sync(&adapter->vc_event_task); + idpf_vc_core_deinit(adapter); + idpf_deinit_dflt_mbx(adapter); if (system_state == SYSTEM_POWER_OFF) pci_set_power_state(pdev, PCI_D3hot); @@ -101,15 +107,37 @@ static void idpf_shutdown(struct pci_dev *pdev) */ static int idpf_cfg_hw(struct idpf_adapter *adapter) { + resource_size_t res_start, mbx_start, rstat_start; struct pci_dev *pdev = adapter->pdev; struct idpf_hw *hw = &adapter->hw; + struct device *dev = &pdev->dev; + long len; + + res_start = pci_resource_start(pdev, 0); - hw->hw_addr = pcim_iomap_table(pdev)[0]; - if (!hw->hw_addr) { - pci_err(pdev, "failed to allocate PCI iomap table\n"); + /* Map mailbox space for virtchnl communication */ + mbx_start = res_start + adapter->dev_ops.static_reg_info[0].start; + len = resource_size(&adapter->dev_ops.static_reg_info[0]); + hw->mbx.vaddr = devm_ioremap(dev, mbx_start, len); + if (!hw->mbx.vaddr) { + pci_err(pdev, "failed to allocate BAR0 mbx region\n"); return -ENOMEM; } + hw->mbx.addr_start = adapter->dev_ops.static_reg_info[0].start; + hw->mbx.addr_len = len; + + /* Map rstat space for resets */ + rstat_start = res_start + adapter->dev_ops.static_reg_info[1].start; + len = resource_size(&adapter->dev_ops.static_reg_info[1]); + hw->rstat.vaddr = devm_ioremap(dev, rstat_start, len); + if (!hw->rstat.vaddr) { + pci_err(pdev, "failed to allocate BAR0 rstat region\n"); + + return -ENOMEM; + } + hw->rstat.addr_start = adapter->dev_ops.static_reg_info[1].start; + hw->rstat.addr_len = len; hw->back = adapter; @@ -156,13 +184,17 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_free; - err = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); + err = pcim_request_region(pdev, 0, pci_name(pdev)); if (err) { - pci_err(pdev, "pcim_iomap_regions failed %pe\n", ERR_PTR(err)); + pci_err(pdev, "pcim_request_region failed %pe\n", ERR_PTR(err)); goto err_free; } + err = pci_enable_ptm(pdev, NULL); + if (err) + pci_dbg(pdev, "PCIe PTM is not supported by PCIe bus/controller\n"); + /* set up for high or low dma */ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (err) { @@ -194,9 +226,8 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_serv_wq_alloc; } - adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", - WQ_UNBOUND | WQ_MEM_RECLAIM, 0, - dev_driver_string(dev), + adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", WQ_UNBOUND | WQ_HIGHPRI, + 0, dev_driver_string(dev), dev_name(dev)); if (!adapter->mbx_wq) { dev_err(dev, "Failed to allocate mailbox workqueue\n"); diff --git a/drivers/net/ethernet/intel/idpf/idpf_mem.h b/drivers/net/ethernet/intel/idpf/idpf_mem.h index b21a04fccf0f..2aaabdc02dd2 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_mem.h +++ b/drivers/net/ethernet/intel/idpf/idpf_mem.h @@ -12,9 +12,9 @@ struct idpf_dma_mem { size_t size; }; -#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) -#define rd32(a, reg) readl((a)->hw_addr + (reg)) -#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) -#define rd64(a, reg) readq((a)->hw_addr + (reg)) +#define idpf_mbx_wr32(a, reg, value) writel((value), ((a)->mbx.vaddr + (reg))) +#define idpf_mbx_rd32(a, reg) readl((a)->mbx.vaddr + (reg)) +#define idpf_mbx_wr64(a, reg, value) writeq((value), ((a)->mbx.vaddr + (reg))) +#define idpf_mbx_rd64(a, reg) readq((a)->mbx.vaddr + (reg)) #endif /* _IDPF_MEM_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_ptp.c new file mode 100644 index 000000000000..ee21f2ff0cad --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.c @@ -0,0 +1,1009 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2024 Intel Corporation */ + +#include "idpf.h" +#include "idpf_ptp.h" + +/** + * idpf_ptp_get_access - Determine the access type of the PTP features + * @adapter: Driver specific private structure + * @direct: Capability that indicates the direct access + * @mailbox: Capability that indicates the mailbox access + * + * Return: the type of supported access for the PTP feature. + */ +static enum idpf_ptp_access +idpf_ptp_get_access(const struct idpf_adapter *adapter, u32 direct, u32 mailbox) +{ + if (adapter->ptp->caps & direct) + return IDPF_PTP_DIRECT; + else if (adapter->ptp->caps & mailbox) + return IDPF_PTP_MAILBOX; + else + return IDPF_PTP_NONE; +} + +/** + * idpf_ptp_get_features_access - Determine the access type of PTP features + * @adapter: Driver specific private structure + * + * Fulfill the adapter structure with type of the supported PTP features + * access. + */ +void idpf_ptp_get_features_access(const struct idpf_adapter *adapter) +{ + struct idpf_ptp *ptp = adapter->ptp; + u32 direct, mailbox; + + /* Get the device clock time */ + direct = VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME; + mailbox = VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB; + ptp->get_dev_clk_time_access = idpf_ptp_get_access(adapter, + direct, + mailbox); + + /* Get the cross timestamp */ + direct = VIRTCHNL2_CAP_PTP_GET_CROSS_TIME; + mailbox = VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB; + ptp->get_cross_tstamp_access = idpf_ptp_get_access(adapter, + direct, + mailbox); + + /* Set the device clock time */ + direct = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME; + mailbox = VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME; + ptp->set_dev_clk_time_access = idpf_ptp_get_access(adapter, + direct, + mailbox); + + /* Adjust the device clock time */ + direct = VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK; + mailbox = VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB; + ptp->adj_dev_clk_time_access = idpf_ptp_get_access(adapter, + direct, + mailbox); + + /* Tx timestamping */ + direct = VIRTCHNL2_CAP_PTP_TX_TSTAMPS; + mailbox = VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB; + ptp->tx_tstamp_access = idpf_ptp_get_access(adapter, + direct, + mailbox); +} + +/** + * idpf_ptp_enable_shtime - Enable shadow time and execute a command + * @adapter: Driver specific private structure + */ +static void idpf_ptp_enable_shtime(struct idpf_adapter *adapter) +{ + u32 shtime_enable, exec_cmd; + + /* Get offsets */ + shtime_enable = adapter->ptp->cmd.shtime_enable_mask; + exec_cmd = adapter->ptp->cmd.exec_cmd_mask; + + /* Set the shtime en and the sync field */ + writel(shtime_enable, adapter->ptp->dev_clk_regs.cmd_sync); + writel(exec_cmd | shtime_enable, adapter->ptp->dev_clk_regs.cmd_sync); +} + +/** + * idpf_ptp_read_src_clk_reg_direct - Read directly the main timer value + * @adapter: Driver specific private structure + * @sts: Optional parameter for holding a pair of system timestamps from + * the system clock. Will be ignored when NULL is given. + * + * Return: the device clock time. + */ +static u64 idpf_ptp_read_src_clk_reg_direct(struct idpf_adapter *adapter, + struct ptp_system_timestamp *sts) +{ + struct idpf_ptp *ptp = adapter->ptp; + u32 hi, lo; + + spin_lock(&ptp->read_dev_clk_lock); + + /* Read the system timestamp pre PHC read */ + ptp_read_system_prets(sts); + + idpf_ptp_enable_shtime(adapter); + + /* Read the system timestamp post PHC read */ + ptp_read_system_postts(sts); + + lo = readl(ptp->dev_clk_regs.dev_clk_ns_l); + hi = readl(ptp->dev_clk_regs.dev_clk_ns_h); + + spin_unlock(&ptp->read_dev_clk_lock); + + return ((u64)hi << 32) | lo; +} + +/** + * idpf_ptp_read_src_clk_reg_mailbox - Read the main timer value through mailbox + * @adapter: Driver specific private structure + * @sts: Optional parameter for holding a pair of system timestamps from + * the system clock. Will be ignored when NULL is given. + * @src_clk: Returned main timer value in nanoseconds unit + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_read_src_clk_reg_mailbox(struct idpf_adapter *adapter, + struct ptp_system_timestamp *sts, + u64 *src_clk) +{ + struct idpf_ptp_dev_timers clk_time; + int err; + + /* Read the system timestamp pre PHC read */ + ptp_read_system_prets(sts); + + err = idpf_ptp_get_dev_clk_time(adapter, &clk_time); + if (err) + return err; + + /* Read the system timestamp post PHC read */ + ptp_read_system_postts(sts); + + *src_clk = clk_time.dev_clk_time_ns; + + return 0; +} + +/** + * idpf_ptp_read_src_clk_reg - Read the main timer value + * @adapter: Driver specific private structure + * @src_clk: Returned main timer value in nanoseconds unit + * @sts: Optional parameter for holding a pair of system timestamps from + * the system clock. Will be ignored if NULL is given. + * + * Return: the device clock time on success, -errno otherwise. + */ +static int idpf_ptp_read_src_clk_reg(struct idpf_adapter *adapter, u64 *src_clk, + struct ptp_system_timestamp *sts) +{ + switch (adapter->ptp->get_dev_clk_time_access) { + case IDPF_PTP_NONE: + return -EOPNOTSUPP; + case IDPF_PTP_MAILBOX: + return idpf_ptp_read_src_clk_reg_mailbox(adapter, sts, src_clk); + case IDPF_PTP_DIRECT: + *src_clk = idpf_ptp_read_src_clk_reg_direct(adapter, sts); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER) || IS_ENABLED(CONFIG_X86) +/** + * idpf_ptp_get_sync_device_time_direct - Get the cross time stamp values + * directly + * @adapter: Driver specific private structure + * @dev_time: 64bit main timer value + * @sys_time: 64bit system time value + */ +static void idpf_ptp_get_sync_device_time_direct(struct idpf_adapter *adapter, + u64 *dev_time, u64 *sys_time) +{ + u32 dev_time_lo, dev_time_hi, sys_time_lo, sys_time_hi; + struct idpf_ptp *ptp = adapter->ptp; + + spin_lock(&ptp->read_dev_clk_lock); + + idpf_ptp_enable_shtime(adapter); + + dev_time_lo = readl(ptp->dev_clk_regs.dev_clk_ns_l); + dev_time_hi = readl(ptp->dev_clk_regs.dev_clk_ns_h); + + sys_time_lo = readl(ptp->dev_clk_regs.sys_time_ns_l); + sys_time_hi = readl(ptp->dev_clk_regs.sys_time_ns_h); + + spin_unlock(&ptp->read_dev_clk_lock); + + *dev_time = (u64)dev_time_hi << 32 | dev_time_lo; + *sys_time = (u64)sys_time_hi << 32 | sys_time_lo; +} + +/** + * idpf_ptp_get_sync_device_time_mailbox - Get the cross time stamp values + * through mailbox + * @adapter: Driver specific private structure + * @dev_time: 64bit main timer value expressed in nanoseconds + * @sys_time: 64bit system time value expressed in nanoseconds + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_get_sync_device_time_mailbox(struct idpf_adapter *adapter, + u64 *dev_time, u64 *sys_time) +{ + struct idpf_ptp_dev_timers cross_time; + int err; + + err = idpf_ptp_get_cross_time(adapter, &cross_time); + if (err) + return err; + + *dev_time = cross_time.dev_clk_time_ns; + *sys_time = cross_time.sys_time_ns; + + return err; +} + +/** + * idpf_ptp_get_sync_device_time - Get the cross time stamp info + * @device: Current device time + * @system: System counter value read synchronously with device time + * @ctx: Context provided by timekeeping code + * + * The device and the system clocks time read simultaneously. + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_get_sync_device_time(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + struct idpf_adapter *adapter = ctx; + u64 ns_time_dev, ns_time_sys; + int err; + + switch (adapter->ptp->get_cross_tstamp_access) { + case IDPF_PTP_NONE: + return -EOPNOTSUPP; + case IDPF_PTP_DIRECT: + idpf_ptp_get_sync_device_time_direct(adapter, &ns_time_dev, + &ns_time_sys); + break; + case IDPF_PTP_MAILBOX: + err = idpf_ptp_get_sync_device_time_mailbox(adapter, + &ns_time_dev, + &ns_time_sys); + if (err) + return err; + break; + default: + return -EOPNOTSUPP; + } + + *device = ns_to_ktime(ns_time_dev); + + system->cs_id = IS_ENABLED(CONFIG_X86) ? CSID_X86_ART + : CSID_ARM_ARCH_COUNTER; + system->cycles = ns_time_sys; + system->use_nsecs = true; + + return 0; +} + +/** + * idpf_ptp_get_crosststamp - Capture a device cross timestamp + * @info: the driver's PTP info structure + * @cts: The memory to fill the cross timestamp info + * + * Capture a cross timestamp between the system time and the device PTP hardware + * clock. + * + * Return: cross timestamp value on success, -errno on failure. + */ +static int idpf_ptp_get_crosststamp(struct ptp_clock_info *info, + struct system_device_crosststamp *cts) +{ + struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info); + + return get_device_system_crosststamp(idpf_ptp_get_sync_device_time, + adapter, NULL, cts); +} +#endif /* CONFIG_ARM_ARCH_TIMER || CONFIG_X86 */ + +/** + * idpf_ptp_gettimex64 - Get the time of the clock + * @info: the driver's PTP info structure + * @ts: timespec64 structure to hold the current time value + * @sts: Optional parameter for holding a pair of system timestamps from + * the system clock. Will be ignored if NULL is given. + * + * Return: the device clock value in ns, after converting it into a timespec + * struct on success, -errno otherwise. + */ +static int idpf_ptp_gettimex64(struct ptp_clock_info *info, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info); + u64 time_ns; + int err; + + err = idpf_ptp_read_src_clk_reg(adapter, &time_ns, sts); + if (err) + return -EACCES; + + *ts = ns_to_timespec64(time_ns); + + return 0; +} + +/** + * idpf_ptp_update_phctime_rxq_grp - Update the cached PHC time for a given Rx + * queue group. + * @grp: receive queue group in which Rx timestamp is enabled + * @split: Indicates whether the queue model is split or single queue + * @systime: Cached system time + */ +static void +idpf_ptp_update_phctime_rxq_grp(const struct idpf_rxq_group *grp, bool split, + u64 systime) +{ + struct idpf_rx_queue *rxq; + u16 i; + + if (!split) { + for (i = 0; i < grp->singleq.num_rxq; i++) { + rxq = grp->singleq.rxqs[i]; + if (rxq) + WRITE_ONCE(rxq->cached_phc_time, systime); + } + } else { + for (i = 0; i < grp->splitq.num_rxq_sets; i++) { + rxq = &grp->splitq.rxq_sets[i]->rxq; + if (rxq) + WRITE_ONCE(rxq->cached_phc_time, systime); + } + } +} + +/** + * idpf_ptp_update_cached_phctime - Update the cached PHC time values + * @adapter: Driver specific private structure + * + * This function updates the system time values which are cached in the adapter + * structure and the Rx queues. + * + * This function must be called periodically to ensure that the cached value + * is never more than 2 seconds old. + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_update_cached_phctime(struct idpf_adapter *adapter) +{ + u64 systime; + int err; + + err = idpf_ptp_read_src_clk_reg(adapter, &systime, NULL); + if (err) + return -EACCES; + + /* Update the cached PHC time stored in the adapter structure. + * These values are used to extend Tx timestamp values to 64 bit + * expected by the stack. + */ + WRITE_ONCE(adapter->ptp->cached_phc_time, systime); + WRITE_ONCE(adapter->ptp->cached_phc_jiffies, jiffies); + + idpf_for_each_vport(adapter, vport) { + bool split; + + if (!vport || !vport->rxq_grps) + continue; + + split = idpf_is_queue_model_split(vport->rxq_model); + + for (u16 i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *grp = &vport->rxq_grps[i]; + + idpf_ptp_update_phctime_rxq_grp(grp, split, systime); + } + } + + return 0; +} + +/** + * idpf_ptp_settime64 - Set the time of the clock + * @info: the driver's PTP info structure + * @ts: timespec64 structure that holds the new time value + * + * Set the device clock to the user input value. The conversion from timespec + * to ns happens in the write function. + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_settime64(struct ptp_clock_info *info, + const struct timespec64 *ts) +{ + struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info); + enum idpf_ptp_access access; + int err; + u64 ns; + + access = adapter->ptp->set_dev_clk_time_access; + if (access != IDPF_PTP_MAILBOX) + return -EOPNOTSUPP; + + ns = timespec64_to_ns(ts); + + err = idpf_ptp_set_dev_clk_time(adapter, ns); + if (err) { + pci_err(adapter->pdev, "Failed to set the time, err: %pe\n", + ERR_PTR(err)); + return err; + } + + err = idpf_ptp_update_cached_phctime(adapter); + if (err) + pci_warn(adapter->pdev, + "Unable to immediately update cached PHC time\n"); + + return 0; +} + +/** + * idpf_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment + * @info: the driver's PTP info structure + * @delta: Offset in nanoseconds to adjust the time by + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta) +{ + struct timespec64 now, then; + int err; + + err = idpf_ptp_gettimex64(info, &now, NULL); + if (err) + return err; + + then = ns_to_timespec64(delta); + now = timespec64_add(now, then); + + return idpf_ptp_settime64(info, &now); +} + +/** + * idpf_ptp_adjtime - Adjust the time of the clock by the indicated delta + * @info: the driver's PTP info structure + * @delta: Offset in nanoseconds to adjust the time by + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_adjtime(struct ptp_clock_info *info, s64 delta) +{ + struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info); + enum idpf_ptp_access access; + int err; + + access = adapter->ptp->adj_dev_clk_time_access; + if (access != IDPF_PTP_MAILBOX) + return -EOPNOTSUPP; + + /* Hardware only supports atomic adjustments using signed 32-bit + * integers. For any adjustment outside this range, perform + * a non-atomic get->adjust->set flow. + */ + if (delta > S32_MAX || delta < S32_MIN) + return idpf_ptp_adjtime_nonatomic(info, delta); + + err = idpf_ptp_adj_dev_clk_time(adapter, delta); + if (err) { + pci_err(adapter->pdev, "Failed to adjust the clock with delta %lld err: %pe\n", + delta, ERR_PTR(err)); + return err; + } + + err = idpf_ptp_update_cached_phctime(adapter); + if (err) + pci_warn(adapter->pdev, + "Unable to immediately update cached PHC time\n"); + + return 0; +} + +/** + * idpf_ptp_adjfine - Adjust clock increment rate + * @info: the driver's PTP info structure + * @scaled_ppm: Parts per million with 16-bit fractional field + * + * Adjust the frequency of the clock by the indicated scaled ppm from the + * base frequency. + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) +{ + struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info); + enum idpf_ptp_access access; + u64 incval, diff; + int err; + + access = adapter->ptp->adj_dev_clk_time_access; + if (access != IDPF_PTP_MAILBOX) + return -EOPNOTSUPP; + + incval = adapter->ptp->base_incval; + + diff = adjust_by_scaled_ppm(incval, scaled_ppm); + err = idpf_ptp_adj_dev_clk_fine(adapter, diff); + if (err) + pci_err(adapter->pdev, "Failed to adjust clock increment rate for scaled ppm %ld %pe\n", + scaled_ppm, ERR_PTR(err)); + + return 0; +} + +/** + * idpf_ptp_verify_pin - Verify if pin supports requested pin function + * @info: the driver's PTP info structure + * @pin: Pin index + * @func: Assigned function + * @chan: Assigned channel + * + * Return: EOPNOTSUPP as not supported yet. + */ +static int idpf_ptp_verify_pin(struct ptp_clock_info *info, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + return -EOPNOTSUPP; +} + +/** + * idpf_ptp_gpio_enable - Enable/disable ancillary features of PHC + * @info: the driver's PTP info structure + * @rq: The requested feature to change + * @on: Enable/disable flag + * + * Return: EOPNOTSUPP as not supported yet. + */ +static int idpf_ptp_gpio_enable(struct ptp_clock_info *info, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +/** + * idpf_ptp_tstamp_extend_32b_to_64b - Convert a 32b nanoseconds Tx or Rx + * timestamp value to 64b. + * @cached_phc_time: recently cached copy of PHC time + * @in_timestamp: Ingress/egress 32b nanoseconds timestamp value + * + * Hardware captures timestamps which contain only 32 bits of nominal + * nanoseconds, as opposed to the 64bit timestamps that the stack expects. + * + * Return: Tx timestamp value extended to 64 bits based on cached PHC time. + */ +u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time, u32 in_timestamp) +{ + u32 delta, phc_time_lo; + u64 ns; + + /* Extract the lower 32 bits of the PHC time */ + phc_time_lo = (u32)cached_phc_time; + + /* Calculate the delta between the lower 32bits of the cached PHC + * time and the in_timestamp value. + */ + delta = in_timestamp - phc_time_lo; + + if (delta > U32_MAX / 2) { + /* Reverse the delta calculation here */ + delta = phc_time_lo - in_timestamp; + ns = cached_phc_time - delta; + } else { + ns = cached_phc_time + delta; + } + + return ns; +} + +/** + * idpf_ptp_extend_ts - Convert a 40b timestamp to 64b nanoseconds + * @vport: Virtual port structure + * @in_tstamp: Ingress/egress timestamp value + * + * It is assumed that the caller verifies the timestamp is valid prior to + * calling this function. + * + * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC + * time stored in the device private PTP structure as the basis for timestamp + * extension. + * + * Return: Tx timestamp value extended to 64 bits. + */ +u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u64 in_tstamp) +{ + struct idpf_ptp *ptp = vport->adapter->ptp; + unsigned long discard_time; + + discard_time = ptp->cached_phc_jiffies + 2 * HZ; + + if (time_is_before_jiffies(discard_time)) + return 0; + + return idpf_ptp_tstamp_extend_32b_to_64b(ptp->cached_phc_time, + lower_32_bits(in_tstamp)); +} + +/** + * idpf_ptp_request_ts - Request an available Tx timestamp index + * @tx_q: Transmit queue on which the Tx timestamp is requested + * @skb: The SKB to associate with this timestamp request + * @idx: Index of the Tx timestamp latch + * + * Request tx timestamp index negotiated during PTP init that will be set into + * Tx descriptor. + * + * Return: 0 and the index that can be provided to Tx descriptor on success, + * -errno otherwise. + */ +int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb, + u32 *idx) +{ + struct idpf_ptp_tx_tstamp *ptp_tx_tstamp; + struct list_head *head; + + /* Get the index from the free latches list */ + spin_lock(&tx_q->cached_tstamp_caps->latches_lock); + + head = &tx_q->cached_tstamp_caps->latches_free; + if (list_empty(head)) { + spin_unlock(&tx_q->cached_tstamp_caps->latches_lock); + return -ENOBUFS; + } + + ptp_tx_tstamp = list_first_entry(head, struct idpf_ptp_tx_tstamp, + list_member); + list_del(&ptp_tx_tstamp->list_member); + + ptp_tx_tstamp->skb = skb_get(skb); + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + /* Move the element to the used latches list */ + list_add(&ptp_tx_tstamp->list_member, + &tx_q->cached_tstamp_caps->latches_in_use); + spin_unlock(&tx_q->cached_tstamp_caps->latches_lock); + + *idx = ptp_tx_tstamp->idx; + + return 0; +} + +/** + * idpf_ptp_set_rx_tstamp - Enable or disable Rx timestamping + * @vport: Virtual port structure + * @rx_filter: Receive timestamp filter + */ +static void idpf_ptp_set_rx_tstamp(struct idpf_vport *vport, int rx_filter) +{ + bool enable = true, splitq; + + splitq = idpf_is_queue_model_split(vport->rxq_model); + + if (rx_filter == HWTSTAMP_FILTER_NONE) { + enable = false; + vport->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + } else { + vport->tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL; + } + + for (u16 i = 0; i < vport->num_rxq_grp; i++) { + struct idpf_rxq_group *grp = &vport->rxq_grps[i]; + struct idpf_rx_queue *rx_queue; + u16 j, num_rxq; + + if (splitq) + num_rxq = grp->splitq.num_rxq_sets; + else + num_rxq = grp->singleq.num_rxq; + + for (j = 0; j < num_rxq; j++) { + if (splitq) + rx_queue = &grp->splitq.rxq_sets[j]->rxq; + else + rx_queue = grp->singleq.rxqs[j]; + + if (enable) + idpf_queue_set(PTP, rx_queue); + else + idpf_queue_clear(PTP, rx_queue); + } + } +} + +/** + * idpf_ptp_set_timestamp_mode - Setup driver for requested timestamp mode + * @vport: Virtual port structure + * @config: Hwtstamp settings requested or saved + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_set_timestamp_mode(struct idpf_vport *vport, + struct kernel_hwtstamp_config *config) +{ + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + break; + case HWTSTAMP_TX_ON: + if (!idpf_ptp_is_vport_tx_tstamp_ena(vport)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + vport->tstamp_config.tx_type = config->tx_type; + idpf_ptp_set_rx_tstamp(vport, config->rx_filter); + *config = vport->tstamp_config; + + return 0; +} + +/** + * idpf_tstamp_task - Delayed task to handle Tx tstamps + * @work: work_struct handle + */ +void idpf_tstamp_task(struct work_struct *work) +{ + struct idpf_vport *vport; + + vport = container_of(work, struct idpf_vport, tstamp_task); + + idpf_ptp_get_tx_tstamp(vport); +} + +/** + * idpf_ptp_do_aux_work - Do PTP periodic work + * @info: Driver's PTP info structure + * + * Return: Number of jiffies to periodic work. + */ +static long idpf_ptp_do_aux_work(struct ptp_clock_info *info) +{ + struct idpf_adapter *adapter = idpf_ptp_info_to_adapter(info); + + idpf_ptp_update_cached_phctime(adapter); + + return msecs_to_jiffies(500); +} + +/** + * idpf_ptp_set_caps - Set PTP capabilities + * @adapter: Driver specific private structure + * + * This function sets the PTP functions. + */ +static void idpf_ptp_set_caps(const struct idpf_adapter *adapter) +{ + struct ptp_clock_info *info = &adapter->ptp->info; + + snprintf(info->name, sizeof(info->name), "%s-%s-clk", + KBUILD_MODNAME, pci_name(adapter->pdev)); + + info->owner = THIS_MODULE; + info->max_adj = adapter->ptp->max_adj; + info->gettimex64 = idpf_ptp_gettimex64; + info->settime64 = idpf_ptp_settime64; + info->adjfine = idpf_ptp_adjfine; + info->adjtime = idpf_ptp_adjtime; + info->verify = idpf_ptp_verify_pin; + info->enable = idpf_ptp_gpio_enable; + info->do_aux_work = idpf_ptp_do_aux_work; +#if IS_ENABLED(CONFIG_ARM_ARCH_TIMER) + info->getcrosststamp = idpf_ptp_get_crosststamp; +#elif IS_ENABLED(CONFIG_X86) + if (pcie_ptm_enabled(adapter->pdev) && + boot_cpu_has(X86_FEATURE_ART) && + boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) + info->getcrosststamp = idpf_ptp_get_crosststamp; +#endif /* CONFIG_ARM_ARCH_TIMER */ +} + +/** + * idpf_ptp_create_clock - Create PTP clock device for userspace + * @adapter: Driver specific private structure + * + * This function creates a new PTP clock device. + * + * Return: 0 on success, -errno otherwise. + */ +static int idpf_ptp_create_clock(const struct idpf_adapter *adapter) +{ + struct ptp_clock *clock; + + idpf_ptp_set_caps(adapter); + + /* Attempt to register the clock before enabling the hardware. */ + clock = ptp_clock_register(&adapter->ptp->info, + &adapter->pdev->dev); + if (IS_ERR(clock)) { + pci_err(adapter->pdev, "PTP clock creation failed: %pe\n", + clock); + return PTR_ERR(clock); + } + + adapter->ptp->clock = clock; + + return 0; +} + +/** + * idpf_ptp_release_vport_tstamp - Release the Tx timestamps trakcers for a + * given vport. + * @vport: Virtual port structure + * + * Remove the queues and delete lists that tracks Tx timestamp entries for a + * given vport. + */ +static void idpf_ptp_release_vport_tstamp(struct idpf_vport *vport) +{ + struct idpf_ptp_tx_tstamp *ptp_tx_tstamp, *tmp; + struct list_head *head; + + cancel_work_sync(&vport->tstamp_task); + + /* Remove list with free latches */ + spin_lock_bh(&vport->tx_tstamp_caps->latches_lock); + + head = &vport->tx_tstamp_caps->latches_free; + list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) { + list_del(&ptp_tx_tstamp->list_member); + kfree(ptp_tx_tstamp); + } + + /* Remove list with latches in use */ + head = &vport->tx_tstamp_caps->latches_in_use; + list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) { + list_del(&ptp_tx_tstamp->list_member); + kfree(ptp_tx_tstamp); + } + + spin_unlock_bh(&vport->tx_tstamp_caps->latches_lock); + + kfree(vport->tx_tstamp_caps); + vport->tx_tstamp_caps = NULL; +} + +/** + * idpf_ptp_release_tstamp - Release the Tx timestamps trackers + * @adapter: Driver specific private structure + * + * Remove the queues and delete lists that tracks Tx timestamp entries. + */ +static void idpf_ptp_release_tstamp(struct idpf_adapter *adapter) +{ + idpf_for_each_vport(adapter, vport) { + if (!idpf_ptp_is_vport_tx_tstamp_ena(vport)) + continue; + + idpf_ptp_release_vport_tstamp(vport); + } +} + +/** + * idpf_ptp_get_txq_tstamp_capability - Verify the timestamping capability + * for a given tx queue. + * @txq: Transmit queue + * + * Since performing timestamp flows requires reading the device clock value and + * the support in the Control Plane, the function checks both factors and + * summarizes the support for the timestamping. + * + * Return: true if the timestamping is supported, false otherwise. + */ +bool idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq) +{ + if (!txq || !txq->cached_tstamp_caps) + return false; + else if (txq->cached_tstamp_caps->access) + return true; + else + return false; +} + +/** + * idpf_ptp_init - Initialize PTP hardware clock support + * @adapter: Driver specific private structure + * + * Set up the device for interacting with the PTP hardware clock for all + * functions. Function will allocate and register a ptp_clock with the + * PTP_1588_CLOCK infrastructure. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_init(struct idpf_adapter *adapter) +{ + struct timespec64 ts; + int err; + + if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PTP)) { + pci_dbg(adapter->pdev, "PTP capability is not detected\n"); + return -EOPNOTSUPP; + } + + adapter->ptp = kzalloc(sizeof(*adapter->ptp), GFP_KERNEL); + if (!adapter->ptp) + return -ENOMEM; + + /* add a back pointer to adapter */ + adapter->ptp->adapter = adapter; + + if (adapter->dev_ops.reg_ops.ptp_reg_init) + adapter->dev_ops.reg_ops.ptp_reg_init(adapter); + + err = idpf_ptp_get_caps(adapter); + if (err) { + pci_err(adapter->pdev, "Failed to get PTP caps err %d\n", err); + goto free_ptp; + } + + err = idpf_ptp_create_clock(adapter); + if (err) + goto free_ptp; + + if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE) + ptp_schedule_worker(adapter->ptp->clock, 0); + + /* Write the default increment time value if the clock adjustments + * are enabled. + */ + if (adapter->ptp->adj_dev_clk_time_access != IDPF_PTP_NONE) { + err = idpf_ptp_adj_dev_clk_fine(adapter, + adapter->ptp->base_incval); + if (err) + goto remove_clock; + } + + /* Write the initial time value if the set time operation is enabled */ + if (adapter->ptp->set_dev_clk_time_access != IDPF_PTP_NONE) { + ts = ktime_to_timespec64(ktime_get_real()); + err = idpf_ptp_settime64(&adapter->ptp->info, &ts); + if (err) + goto remove_clock; + } + + spin_lock_init(&adapter->ptp->read_dev_clk_lock); + + pci_dbg(adapter->pdev, "PTP init successful\n"); + + return 0; + +remove_clock: + if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE) + ptp_cancel_worker_sync(adapter->ptp->clock); + + ptp_clock_unregister(adapter->ptp->clock); + adapter->ptp->clock = NULL; + +free_ptp: + kfree(adapter->ptp); + adapter->ptp = NULL; + + return err; +} + +/** + * idpf_ptp_release - Clear PTP hardware clock support + * @adapter: Driver specific private structure + */ +void idpf_ptp_release(struct idpf_adapter *adapter) +{ + struct idpf_ptp *ptp = adapter->ptp; + + if (!ptp) + return; + + if (ptp->tx_tstamp_access != IDPF_PTP_NONE && + ptp->get_dev_clk_time_access != IDPF_PTP_NONE) + idpf_ptp_release_tstamp(adapter); + + if (ptp->clock) { + if (adapter->ptp->get_dev_clk_time_access != IDPF_PTP_NONE) + ptp_cancel_worker_sync(adapter->ptp->clock); + + ptp_clock_unregister(ptp->clock); + } + + kfree(ptp); + adapter->ptp = NULL; +} diff --git a/drivers/net/ethernet/intel/idpf/idpf_ptp.h b/drivers/net/ethernet/intel/idpf/idpf_ptp.h new file mode 100644 index 000000000000..785da03e4cf5 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_ptp.h @@ -0,0 +1,379 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2024 Intel Corporation */ + +#ifndef _IDPF_PTP_H +#define _IDPF_PTP_H + +#include <linux/ptp_clock_kernel.h> + +/** + * struct idpf_ptp_cmd - PTP command masks + * @exec_cmd_mask: mask to trigger command execution + * @shtime_enable_mask: mask to enable shadow time + */ +struct idpf_ptp_cmd { + u32 exec_cmd_mask; + u32 shtime_enable_mask; +}; + +/* struct idpf_ptp_dev_clk_regs - PTP device registers + * @dev_clk_ns_l: low part of the device clock register + * @dev_clk_ns_h: high part of the device clock register + * @phy_clk_ns_l: low part of the PHY clock register + * @phy_clk_ns_h: high part of the PHY clock register + * @sys_time_ns_l: low part of the system time register + * @sys_time_ns_h: high part of the system time register + * @incval_l: low part of the increment value register + * @incval_h: high part of the increment value register + * @shadj_l: low part of the shadow adjust register + * @shadj_h: high part of the shadow adjust register + * @phy_incval_l: low part of the PHY increment value register + * @phy_incval_h: high part of the PHY increment value register + * @phy_shadj_l: low part of the PHY shadow adjust register + * @phy_shadj_h: high part of the PHY shadow adjust register + * @cmd: PTP command register + * @phy_cmd: PHY command register + * @cmd_sync: PTP command synchronization register + */ +struct idpf_ptp_dev_clk_regs { + /* Main clock */ + void __iomem *dev_clk_ns_l; + void __iomem *dev_clk_ns_h; + + /* PHY timer */ + void __iomem *phy_clk_ns_l; + void __iomem *phy_clk_ns_h; + + /* System time */ + void __iomem *sys_time_ns_l; + void __iomem *sys_time_ns_h; + + /* Main timer adjustments */ + void __iomem *incval_l; + void __iomem *incval_h; + void __iomem *shadj_l; + void __iomem *shadj_h; + + /* PHY timer adjustments */ + void __iomem *phy_incval_l; + void __iomem *phy_incval_h; + void __iomem *phy_shadj_l; + void __iomem *phy_shadj_h; + + /* Command */ + void __iomem *cmd; + void __iomem *phy_cmd; + void __iomem *cmd_sync; +}; + +/** + * enum idpf_ptp_access - the type of access to PTP operations + * @IDPF_PTP_NONE: no access + * @IDPF_PTP_DIRECT: direct access through BAR registers + * @IDPF_PTP_MAILBOX: access through mailbox messages + */ +enum idpf_ptp_access { + IDPF_PTP_NONE = 0, + IDPF_PTP_DIRECT, + IDPF_PTP_MAILBOX, +}; + +/** + * struct idpf_ptp_secondary_mbx - PTP secondary mailbox + * @peer_mbx_q_id: PTP mailbox queue ID + * @peer_id: Peer ID for PTP Device Control daemon + * @valid: indicates whether secondary mailblox is supported by the Control + * Plane + */ +struct idpf_ptp_secondary_mbx { + u16 peer_mbx_q_id; + u16 peer_id; + bool valid:1; +}; + +/** + * enum idpf_ptp_tx_tstamp_state - Tx timestamp states + * @IDPF_PTP_FREE: Tx timestamp index free to use + * @IDPF_PTP_REQUEST: Tx timestamp index set to the Tx descriptor + * @IDPF_PTP_READ_VALUE: Tx timestamp value ready to be read + */ +enum idpf_ptp_tx_tstamp_state { + IDPF_PTP_FREE, + IDPF_PTP_REQUEST, + IDPF_PTP_READ_VALUE, +}; + +/** + * struct idpf_ptp_tx_tstamp_status - Parameters to track Tx timestamp + * @skb: the pointer to the SKB that received the completion tag + * @state: the state of the Tx timestamp + */ +struct idpf_ptp_tx_tstamp_status { + struct sk_buff *skb; + enum idpf_ptp_tx_tstamp_state state; +}; + +/** + * struct idpf_ptp_tx_tstamp - Parameters for Tx timestamping + * @list_member: the list member structure + * @tx_latch_reg_offset_l: Tx tstamp latch low register offset + * @tx_latch_reg_offset_h: Tx tstamp latch high register offset + * @skb: the pointer to the SKB for this timestamp request + * @tstamp: the Tx tstamp value + * @idx: the index of the Tx tstamp + */ +struct idpf_ptp_tx_tstamp { + struct list_head list_member; + u32 tx_latch_reg_offset_l; + u32 tx_latch_reg_offset_h; + struct sk_buff *skb; + u64 tstamp; + u32 idx; +}; + +/** + * struct idpf_ptp_vport_tx_tstamp_caps - Tx timestamp capabilities + * @vport_id: the vport id + * @num_entries: the number of negotiated Tx timestamp entries + * @tstamp_ns_lo_bit: first bit for nanosecond part of the timestamp + * @latches_lock: the lock to the lists of free/used timestamp indexes + * @status_lock: the lock to the status tracker + * @access: indicates an access to Tx timestamp + * @latches_free: the list of the free Tx timestamps latches + * @latches_in_use: the list of the used Tx timestamps latches + * @tx_tstamp_status: Tx tstamp status tracker + */ +struct idpf_ptp_vport_tx_tstamp_caps { + u32 vport_id; + u16 num_entries; + u16 tstamp_ns_lo_bit; + spinlock_t latches_lock; + spinlock_t status_lock; + bool access:1; + struct list_head latches_free; + struct list_head latches_in_use; + struct idpf_ptp_tx_tstamp_status tx_tstamp_status[]; +}; + +/** + * struct idpf_ptp - PTP parameters + * @info: structure defining PTP hardware capabilities + * @clock: pointer to registered PTP clock device + * @adapter: back pointer to the adapter + * @base_incval: base increment value of the PTP clock + * @max_adj: maximum adjustment of the PTP clock + * @cmd: HW specific command masks + * @cached_phc_time: a cached copy of the PHC time for timestamp extension + * @cached_phc_jiffies: jiffies when cached_phc_time was last updated + * @dev_clk_regs: the set of registers to access the device clock + * @caps: PTP capabilities negotiated with the Control Plane + * @get_dev_clk_time_access: access type for getting the device clock time + * @get_cross_tstamp_access: access type for the cross timestamping + * @set_dev_clk_time_access: access type for setting the device clock time + * @adj_dev_clk_time_access: access type for the adjusting the device clock + * @tx_tstamp_access: access type for the Tx timestamp value read + * @rsv: reserved bits + * @secondary_mbx: parameters for using dedicated PTP mailbox + * @read_dev_clk_lock: spinlock protecting access to the device clock read + * operation executed by the HW latch + */ +struct idpf_ptp { + struct ptp_clock_info info; + struct ptp_clock *clock; + struct idpf_adapter *adapter; + u64 base_incval; + u64 max_adj; + struct idpf_ptp_cmd cmd; + u64 cached_phc_time; + unsigned long cached_phc_jiffies; + struct idpf_ptp_dev_clk_regs dev_clk_regs; + u32 caps; + enum idpf_ptp_access get_dev_clk_time_access:2; + enum idpf_ptp_access get_cross_tstamp_access:2; + enum idpf_ptp_access set_dev_clk_time_access:2; + enum idpf_ptp_access adj_dev_clk_time_access:2; + enum idpf_ptp_access tx_tstamp_access:2; + u8 rsv; + struct idpf_ptp_secondary_mbx secondary_mbx; + spinlock_t read_dev_clk_lock; +}; + +/** + * idpf_ptp_info_to_adapter - get driver adapter struct from ptp_clock_info + * @info: pointer to ptp_clock_info struct + * + * Return: pointer to the corresponding adapter struct + */ +static inline struct idpf_adapter * +idpf_ptp_info_to_adapter(const struct ptp_clock_info *info) +{ + const struct idpf_ptp *ptp = container_of_const(info, struct idpf_ptp, + info); + return ptp->adapter; +} + +/** + * struct idpf_ptp_dev_timers - System time and device time values + * @sys_time_ns: system time value expressed in nanoseconds + * @dev_clk_time_ns: device clock time value expressed in nanoseconds + */ +struct idpf_ptp_dev_timers { + u64 sys_time_ns; + u64 dev_clk_time_ns; +}; + +/** + * idpf_ptp_is_vport_tx_tstamp_ena - Verify the Tx timestamping enablement for + * a given vport. + * @vport: Virtual port structure + * + * Tx timestamp capabilities are negotiated with the Control Plane only if the + * device clock value can be read, Tx timestamp access type is different than + * NONE, and the PTP clock for the adapter is created. When all those conditions + * are satisfied, Tx timestamp feature is enabled and tx_tstamp_caps is + * allocated and fulfilled. + * + * Return: true if the Tx timestamping is enabled, false otherwise. + */ +static inline bool idpf_ptp_is_vport_tx_tstamp_ena(struct idpf_vport *vport) +{ + if (!vport->tx_tstamp_caps) + return false; + else + return true; +} + +/** + * idpf_ptp_is_vport_rx_tstamp_ena - Verify the Rx timestamping enablement for + * a given vport. + * @vport: Virtual port structure + * + * Rx timestamp feature is enabled if the PTP clock for the adapter is created + * and it is possible to read the value of the device clock. The second + * assumption comes from the need to extend the Rx timestamp value to 64 bit + * based on the current device clock time. + * + * Return: true if the Rx timestamping is enabled, false otherwise. + */ +static inline bool idpf_ptp_is_vport_rx_tstamp_ena(struct idpf_vport *vport) +{ + if (!vport->adapter->ptp || + vport->adapter->ptp->get_dev_clk_time_access == IDPF_PTP_NONE) + return false; + else + return true; +} + +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +int idpf_ptp_init(struct idpf_adapter *adapter); +void idpf_ptp_release(struct idpf_adapter *adapter); +int idpf_ptp_get_caps(struct idpf_adapter *adapter); +void idpf_ptp_get_features_access(const struct idpf_adapter *adapter); +bool idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq); +int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter, + struct idpf_ptp_dev_timers *dev_clk_time); +int idpf_ptp_get_cross_time(struct idpf_adapter *adapter, + struct idpf_ptp_dev_timers *cross_time); +int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time); +int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval); +int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta); +int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport); +int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport); +int idpf_ptp_set_timestamp_mode(struct idpf_vport *vport, + struct kernel_hwtstamp_config *config); +u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u64 in_tstamp); +u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time, u32 in_timestamp); +int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, struct sk_buff *skb, + u32 *idx); +void idpf_tstamp_task(struct work_struct *work); +#else /* CONFIG_PTP_1588_CLOCK */ +static inline int idpf_ptp_init(struct idpf_adapter *adapter) +{ + return 0; +} + +static inline void idpf_ptp_release(struct idpf_adapter *adapter) { } + +static inline int idpf_ptp_get_caps(struct idpf_adapter *adapter) +{ + return -EOPNOTSUPP; +} + +static inline void +idpf_ptp_get_features_access(const struct idpf_adapter *adapter) { } + +static inline bool +idpf_ptp_get_txq_tstamp_capability(struct idpf_tx_queue *txq) +{ + return false; +} + +static inline int +idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter, + struct idpf_ptp_dev_timers *dev_clk_time) +{ + return -EOPNOTSUPP; +} + +static inline int +idpf_ptp_get_cross_time(struct idpf_adapter *adapter, + struct idpf_ptp_dev_timers *cross_time) +{ + return -EOPNOTSUPP; +} + +static inline int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, + u64 time) +{ + return -EOPNOTSUPP; +} + +static inline int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, + u64 incval) +{ + return -EOPNOTSUPP; +} + +static inline int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, + s64 delta) +{ + return -EOPNOTSUPP; +} + +static inline int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport) +{ + return -EOPNOTSUPP; +} + +static inline int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport) +{ + return -EOPNOTSUPP; +} + +static inline int +idpf_ptp_set_timestamp_mode(struct idpf_vport *vport, + struct kernel_hwtstamp_config *config) +{ + return -EOPNOTSUPP; +} + +static inline u64 idpf_ptp_extend_ts(struct idpf_vport *vport, u32 in_tstamp) +{ + return 0; +} + +static inline u64 idpf_ptp_tstamp_extend_32b_to_64b(u64 cached_phc_time, + u32 in_timestamp) +{ + return 0; +} + +static inline int idpf_ptp_request_ts(struct idpf_tx_queue *tx_q, + struct sk_buff *skb, u32 *idx) +{ + return -EOPNOTSUPP; +} + +static inline void idpf_tstamp_task(struct work_struct *work) { } +#endif /* CONFIG_PTP_1588_CLOCK */ +#endif /* _IDPF_PTP_H */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c index eae1b6f474e6..555879b1248d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -362,17 +362,18 @@ netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, { struct idpf_tx_offload_params offload = { }; struct idpf_tx_buf *first; + int csum, tso, needed; unsigned int count; __be16 protocol; - int csum, tso; count = idpf_tx_desc_count_required(tx_q, skb); if (unlikely(!count)) return idpf_tx_drop_skb(tx_q, skb); - if (idpf_tx_maybe_stop_common(tx_q, - count + IDPF_TX_DESCS_PER_CACHE_LINE + - IDPF_TX_DESCS_FOR_CTX)) { + needed = count + IDPF_TX_DESCS_PER_CACHE_LINE + IDPF_TX_DESCS_FOR_CTX; + if (!netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx, + IDPF_DESC_UNUSED(tx_q), + needed, needed)) { idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); u64_stats_update_begin(&tx_q->stats_sync); @@ -891,7 +892,6 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q, * idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor * @rx_desc: the descriptor to process * @fields: storage for extracted values - * @ptype: pointer that will store packet type * * Decode the Rx descriptor and extract relevant information including the * size and Rx packet type. @@ -901,21 +901,20 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q, */ static void idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc, - struct libeth_rqe_info *fields, u32 *ptype) + struct libeth_rqe_info *fields) { u64 qword; qword = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len); fields->len = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword); - *ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword); + fields->ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword); } /** * idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor * @rx_desc: the descriptor to process * @fields: storage for extracted values - * @ptype: pointer that will store packet type * * Decode the Rx descriptor and extract relevant information including the * size and Rx packet type. @@ -925,12 +924,12 @@ idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc, */ static void idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc, - struct libeth_rqe_info *fields, u32 *ptype) + struct libeth_rqe_info *fields) { fields->len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M, le16_to_cpu(rx_desc->flex_nic_wb.pkt_len)); - *ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M, - le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0)); + fields->ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M, + le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0)); } /** @@ -938,18 +937,17 @@ idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc, * @rx_q: Rx descriptor queue * @rx_desc: the descriptor to process * @fields: storage for extracted values - * @ptype: pointer that will store packet type * */ static void idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q, const union virtchnl2_rx_desc *rx_desc, - struct libeth_rqe_info *fields, u32 *ptype) + struct libeth_rqe_info *fields) { if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) - idpf_rx_singleq_extract_base_fields(rx_desc, fields, ptype); + idpf_rx_singleq_extract_base_fields(rx_desc, fields); else - idpf_rx_singleq_extract_flex_fields(rx_desc, fields, ptype); + idpf_rx_singleq_extract_flex_fields(rx_desc, fields); } /** @@ -972,7 +970,6 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget) struct libeth_rqe_info fields = { }; union virtchnl2_rx_desc *rx_desc; struct idpf_rx_buf *rx_buf; - u32 ptype; /* get the Rx desc from Rx queue based on 'next_to_clean' */ rx_desc = &rx_q->rx[ntc]; @@ -993,7 +990,7 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget) */ dma_rmb(); - idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields, &ptype); + idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields); rx_buf = &rx_q->rx_buf[ntc]; if (!libeth_rx_sync_for_cpu(rx_buf, fields.len)) @@ -1009,7 +1006,7 @@ static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget) break; skip_data: - rx_buf->page = NULL; + rx_buf->netmem = 0; IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc); cleaned_count++; @@ -1037,7 +1034,8 @@ skip_data: total_rx_bytes += skb->len; /* protocol */ - idpf_rx_singleq_process_skb_fields(rx_q, skb, rx_desc, ptype); + idpf_rx_singleq_process_skb_fields(rx_q, skb, rx_desc, + fields.ptype); /* send completed skb up the stack */ napi_gro_receive(rx_q->pp->p.napi, skb); diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index bdf52cef3891..66a1b040639d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -5,6 +5,7 @@ #include <net/libeth/tx.h> #include "idpf.h" +#include "idpf_ptp.h" #include "idpf_virtchnl.h" struct idpf_tx_stash { @@ -382,12 +383,12 @@ err_out: */ static void idpf_rx_page_rel(struct libeth_fqe *rx_buf) { - if (unlikely(!rx_buf->page)) + if (unlikely(!rx_buf->netmem)) return; - page_pool_put_full_page(rx_buf->page->pp, rx_buf->page, false); + libeth_rx_recycle_slow(rx_buf->netmem); - rx_buf->page = NULL; + rx_buf->netmem = 0; rx_buf->offset = 0; } @@ -1107,6 +1108,8 @@ void idpf_vport_queues_rel(struct idpf_vport *vport) */ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport) { + struct idpf_ptp_vport_tx_tstamp_caps *caps = vport->tx_tstamp_caps; + struct work_struct *tstamp_task = &vport->tstamp_task; int i, j, k = 0; vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs), @@ -1121,6 +1124,12 @@ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport) for (j = 0; j < tx_grp->num_txq; j++, k++) { vport->txqs[k] = tx_grp->txqs[j]; vport->txqs[k]->idx = k; + + if (!caps) + continue; + + vport->txqs[k]->cached_tstamp_caps = caps; + vport->txqs[k]->tstamp_task = tstamp_task; } } @@ -1655,6 +1664,40 @@ static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q) } /** + * idpf_tx_read_tstamp - schedule a work to read Tx timestamp value + * @txq: queue to read the timestamp from + * @skb: socket buffer to provide Tx timestamp value + * + * Schedule a work to read Tx timestamp value generated once the packet is + * transmitted. + */ +static void idpf_tx_read_tstamp(struct idpf_tx_queue *txq, struct sk_buff *skb) +{ + struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps; + struct idpf_ptp_tx_tstamp_status *tx_tstamp_status; + + tx_tstamp_caps = txq->cached_tstamp_caps; + spin_lock_bh(&tx_tstamp_caps->status_lock); + + for (u32 i = 0; i < tx_tstamp_caps->num_entries; i++) { + tx_tstamp_status = &tx_tstamp_caps->tx_tstamp_status[i]; + if (tx_tstamp_status->state != IDPF_PTP_FREE) + continue; + + tx_tstamp_status->skb = skb; + tx_tstamp_status->state = IDPF_PTP_REQUEST; + + /* Fetch timestamp from completion descriptor through + * virtchnl msg to report to stack. + */ + queue_work(system_unbound_wq, txq->tstamp_task); + break; + } + + spin_unlock_bh(&tx_tstamp_caps->status_lock); +} + +/** * idpf_tx_clean_stashed_bufs - clean bufs that were stored for * out of order completions * @txq: queue to clean @@ -1682,6 +1725,11 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq, continue; hash_del(&stash->hlist); + + if (stash->buf.type == LIBETH_SQE_SKB && + (skb_shinfo(stash->buf.skb)->tx_flags & SKBTX_IN_PROGRESS)) + idpf_tx_read_tstamp(txq, stash->buf.skb); + libeth_tx_complete(&stash->buf, &cp); /* Push shadow buf back onto stack */ @@ -1876,8 +1924,12 @@ static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag, idpf_tx_buf_compl_tag(tx_buf) != compl_tag)) return false; - if (tx_buf->type == LIBETH_SQE_SKB) + if (tx_buf->type == LIBETH_SQE_SKB) { + if (skb_shinfo(tx_buf->skb)->tx_flags & SKBTX_IN_PROGRESS) + idpf_tx_read_tstamp(txq, tx_buf->skb); + libeth_tx_complete(tx_buf, &cp); + } idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf); @@ -2127,11 +2179,24 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, struct idpf_tx_splitq_params *params, u16 td_cmd, u16 size) { - desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd; + *(u32 *)&desc->flow.qw1.cmd_dtype = (u8)(params->dtype | td_cmd); desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size); desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag); } +/* Global conditions to tell whether the txq (and related resources) + * has room to allow the use of "size" descriptors. + */ +static int idpf_txq_has_room(struct idpf_tx_queue *tx_q, u32 size) +{ + if (IDPF_DESC_UNUSED(tx_q) < size || + IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) > + IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq) || + IDPF_TX_BUF_RSV_LOW(tx_q)) + return 0; + return 1; +} + /** * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions * @tx_q: the queue to be checked @@ -2142,29 +2207,11 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q, unsigned int descs_needed) { - if (idpf_tx_maybe_stop_common(tx_q, descs_needed)) - goto out; - - /* If there are too many outstanding completions expected on the - * completion queue, stop the TX queue to give the device some time to - * catch up - */ - if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) > - IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq))) - goto splitq_stop; - - /* Also check for available book keeping buffers; if we are low, stop - * the queue to wait for more completions - */ - if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q))) - goto splitq_stop; - - return 0; - -splitq_stop: - netif_stop_subqueue(tx_q->netdev, tx_q->idx); + if (netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx, + idpf_txq_has_room(tx_q, descs_needed), + 1, 1)) + return 0; -out: u64_stats_update_begin(&tx_q->stats_sync); u64_stats_inc(&tx_q->q_stats.q_busy); u64_stats_update_end(&tx_q->stats_sync); @@ -2190,12 +2237,6 @@ void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val, nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); tx_q->next_to_use = val; - if (idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED)) { - u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_inc(&tx_q->q_stats.q_busy); - u64_stats_update_end(&tx_q->stats_sync); - } - /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -2296,7 +2337,7 @@ void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb, * descriptor. Reset that here. */ tx_desc = &txq->flex_tx[idx]; - memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc)); + memset(tx_desc, 0, sizeof(*tx_desc)); if (idx == 0) idx = txq->desc_count; idx--; @@ -2699,10 +2740,10 @@ static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, * Since the TX buffer rings mimics the descriptor ring, update the tx buffer * ring entry to reflect that this index is a context descriptor */ -static struct idpf_flex_tx_ctx_desc * +static union idpf_flex_tx_ctx_desc * idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq) { - struct idpf_flex_tx_ctx_desc *desc; + union idpf_flex_tx_ctx_desc *desc; int i = txq->next_to_use; txq->tx_buf[i].type = LIBETH_SQE_CTX; @@ -2732,6 +2773,73 @@ netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb) return NETDEV_TX_OK; } +#if (IS_ENABLED(CONFIG_PTP_1588_CLOCK)) +/** + * idpf_tx_tstamp - set up context descriptor for hardware timestamp + * @tx_q: queue to send buffer on + * @skb: pointer to the SKB we're sending + * @off: pointer to the offload struct + * + * Return: Positive index number on success, negative otherwise. + */ +static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb, + struct idpf_tx_offload_params *off) +{ + int err, idx; + + /* only timestamp the outbound packet if the user has requested it */ + if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) + return -1; + + if (!idpf_ptp_get_txq_tstamp_capability(tx_q)) + return -1; + + /* Tx timestamps cannot be sampled when doing TSO */ + if (off->tx_flags & IDPF_TX_FLAGS_TSO) + return -1; + + /* Grab an open timestamp slot */ + err = idpf_ptp_request_ts(tx_q, skb, &idx); + if (err) { + u64_stats_update_begin(&tx_q->stats_sync); + u64_stats_inc(&tx_q->q_stats.tstamp_skipped); + u64_stats_update_end(&tx_q->stats_sync); + + return -1; + } + + off->tx_flags |= IDPF_TX_FLAGS_TSYN; + + return idx; +} + +/** + * idpf_tx_set_tstamp_desc - Set the Tx descriptor fields needed to generate + * PHY Tx timestamp + * @ctx_desc: Context descriptor + * @idx: Index of the Tx timestamp latch + */ +static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc, + u32 idx) +{ + ctx_desc->tsyn.qw1 = le64_encode_bits(IDPF_TX_DESC_DTYPE_CTX, + IDPF_TX_CTX_DTYPE_M) | + le64_encode_bits(IDPF_TX_CTX_DESC_TSYN, + IDPF_TX_CTX_CMD_M) | + le64_encode_bits(idx, IDPF_TX_CTX_TSYN_REG_M); +} +#else /* CONFIG_PTP_1588_CLOCK */ +static int idpf_tx_tstamp(struct idpf_tx_queue *tx_q, struct sk_buff *skb, + struct idpf_tx_offload_params *off) +{ + return -1; +} + +static void idpf_tx_set_tstamp_desc(union idpf_flex_tx_ctx_desc *ctx_desc, + u32 idx) +{ } +#endif /* CONFIG_PTP_1588_CLOCK */ + /** * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors * @skb: send buffer @@ -2743,9 +2851,10 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, struct idpf_tx_queue *tx_q) { struct idpf_tx_splitq_params tx_params = { }; + union idpf_flex_tx_ctx_desc *ctx_desc; struct idpf_tx_buf *first; unsigned int count; - int tso; + int tso, idx; count = idpf_tx_desc_count_required(tx_q, skb); if (unlikely(!count)) @@ -2765,8 +2874,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, if (tso) { /* If tso is needed, set up context desc */ - struct idpf_flex_tx_ctx_desc *ctx_desc = - idpf_tx_splitq_get_ctx_desc(tx_q); + ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q); ctx_desc->tso.qw1.cmd_dtype = cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX | @@ -2784,6 +2892,12 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, u64_stats_update_end(&tx_q->stats_sync); } + idx = idpf_tx_tstamp(tx_q, skb, &tx_params.offload); + if (idx != -1) { + ctx_desc = idpf_tx_splitq_get_ctx_desc(tx_q); + idpf_tx_set_tstamp_desc(ctx_desc, idx); + } + /* record the location of the first descriptor for this packet */ first = &tx_q->tx_buf[tx_q->next_to_use]; first->skb = skb; @@ -3046,6 +3160,33 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb, } /** + * idpf_rx_hwtstamp - check for an RX timestamp and pass up the stack + * @rxq: pointer to the rx queue that receives the timestamp + * @rx_desc: pointer to rx descriptor containing timestamp + * @skb: skb to put timestamp in + */ +static void +idpf_rx_hwtstamp(const struct idpf_rx_queue *rxq, + const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, + struct sk_buff *skb) +{ + u64 cached_time, ts_ns; + u32 ts_high; + + if (!(rx_desc->ts_low & VIRTCHNL2_RX_FLEX_TSTAMP_VALID)) + return; + + cached_time = READ_ONCE(rxq->cached_phc_time); + + ts_high = le32_to_cpu(rx_desc->ts_high); + ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high); + + *skb_hwtstamps(skb) = (struct skb_shared_hwtstamps) { + .hwtstamp = ns_to_ktime(ts_ns), + }; +} + +/** * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor * @rxq: Rx descriptor ring packet is being transacted on * @skb: pointer to current skb being populated @@ -3070,6 +3211,9 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb, /* process RSS/hash */ idpf_rx_hash(rxq, skb, rx_desc, decoded); + if (idpf_queue_has(PTP, rxq)) + idpf_rx_hwtstamp(rxq, rx_desc, skb); + skb->protocol = eth_type_trans(skb, rxq->netdev); skb_record_rx_queue(skb, rxq->idx); @@ -3096,10 +3240,10 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb, void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb, unsigned int size) { - u32 hr = rx_buf->page->pp->p.offset; + u32 hr = netmem_get_pp(rx_buf->netmem)->p.offset; - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, - rx_buf->offset + hr, size, rx_buf->truesize); + skb_add_rx_frag_netmem(skb, skb_shinfo(skb)->nr_frags, rx_buf->netmem, + rx_buf->offset + hr, size, rx_buf->truesize); } /** @@ -3122,16 +3266,22 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr, struct libeth_fqe *buf, u32 data_len) { u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN; + struct page *hdr_page, *buf_page; const void *src; void *dst; - if (!libeth_rx_sync_for_cpu(buf, copy)) + if (unlikely(netmem_is_net_iov(buf->netmem)) || + !libeth_rx_sync_for_cpu(buf, copy)) return 0; - dst = page_address(hdr->page) + hdr->offset + hdr->page->pp->p.offset; - src = page_address(buf->page) + buf->offset + buf->page->pp->p.offset; - memcpy(dst, src, LARGEST_ALIGN(copy)); + hdr_page = __netmem_to_page(hdr->netmem); + buf_page = __netmem_to_page(buf->netmem); + dst = page_address(hdr_page) + hdr->offset + + pp_page_to_nmdesc(hdr_page)->pp->p.offset; + src = page_address(buf_page) + buf->offset + + pp_page_to_nmdesc(buf_page)->pp->p.offset; + memcpy(dst, src, LARGEST_ALIGN(copy)); buf->offset += copy; return copy; @@ -3147,11 +3297,12 @@ static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr, */ struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size) { - u32 hr = buf->page->pp->p.offset; + struct page *buf_page = __netmem_to_page(buf->netmem); + u32 hr = pp_page_to_nmdesc(buf_page)->pp->p.offset; struct sk_buff *skb; void *va; - va = page_address(buf->page) + buf->offset; + va = page_address(buf_page) + buf->offset; prefetch(va + hr); skb = napi_build_skb(va, buf->truesize); @@ -3285,7 +3436,8 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget) if (unlikely(!hdr_len && !skb)) { hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len); - pkt_len -= hdr_len; + /* If failed, drop both buffers by setting len to 0 */ + pkt_len -= hdr_len ? : pkt_len; u64_stats_update_begin(&rxq->stats_sync); u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf); @@ -3302,7 +3454,7 @@ static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget) u64_stats_update_end(&rxq->stats_sync); } - hdr->page = NULL; + hdr->netmem = 0; payload: if (!libeth_rx_sync_for_cpu(rx_buf, pkt_len)) @@ -3318,7 +3470,7 @@ payload: break; skip_data: - rx_buf->page = NULL; + rx_buf->netmem = 0; idpf_rx_post_buf_refill(refillq, buf_id); IDPF_RX_BUMP_NTC(rxq, ntc); @@ -4025,6 +4177,14 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) return budget; } + /* Switch to poll mode in the tear-down path after sending disable + * queues virtchnl message, as the interrupts will be disabled after + * that. + */ + if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE, + q_vector->tx[0]))) + return budget; + work_done = min_t(int, work_done, budget - 1); /* Exit the polling mode, but don't re-enable interrupts if stack might @@ -4035,15 +4195,7 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) else idpf_vport_intr_set_wb_on_itr(q_vector); - /* Switch to poll mode in the tear-down path after sending disable - * queues virtchnl message, as the interrupts will be disabled after - * that - */ - if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE, - q_vector->tx[0]))) - return budget; - else - return work_done; + return work_done; } /** @@ -4205,9 +4357,13 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport) int idpf_vport_intr_alloc(struct idpf_vport *vport) { u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector; + struct idpf_vport_user_config_data *user_config; struct idpf_q_vector *q_vector; + struct idpf_q_coalesce *q_coal; u32 complqs_per_vector, v_idx; + u16 idx = vport->idx; + user_config = &vport->adapter->vport_config[idx]->user_config; vport->q_vectors = kcalloc(vport->num_q_vectors, sizeof(struct idpf_q_vector), GFP_KERNEL); if (!vport->q_vectors) @@ -4225,14 +4381,15 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport) for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { q_vector = &vport->q_vectors[v_idx]; + q_coal = &user_config->q_coalesce[v_idx]; q_vector->vport = vport; - q_vector->tx_itr_value = IDPF_ITR_TX_DEF; - q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC; + q_vector->tx_itr_value = q_coal->tx_coalesce_usecs; + q_vector->tx_intr_mode = q_coal->tx_intr_mode; q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1; - q_vector->rx_itr_value = IDPF_ITR_RX_DEF; - q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC; + q_vector->rx_itr_value = q_coal->rx_coalesce_usecs; + q_vector->rx_intr_mode = q_coal->rx_intr_mode; q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0; q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx), diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index b029f566e57c..281de655a813 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -57,6 +57,7 @@ /* Default vector sharing */ #define IDPF_MBX_Q_VEC 1 #define IDPF_MIN_Q_VEC 1 +#define IDPF_MIN_RDMA_VEC 2 #define IDPF_DFLT_TX_Q_DESC_COUNT 512 #define IDPF_DFLT_TX_COMPLQ_DESC_COUNT 512 @@ -142,6 +143,7 @@ do { \ #define IDPF_TX_FLAGS_IPV4 BIT(1) #define IDPF_TX_FLAGS_IPV6 BIT(2) #define IDPF_TX_FLAGS_TUNNEL BIT(3) +#define IDPF_TX_FLAGS_TSYN BIT(4) union idpf_tx_flex_desc { struct idpf_flex_tx_desc q; /* queue based scheduling */ @@ -289,6 +291,8 @@ struct idpf_ptype_state { * @__IDPF_Q_POLL_MODE: Enable poll mode * @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq) + * @__IDPF_Q_PTP: indicates whether the Rx timestamping is enabled for the + * queue * @__IDPF_Q_FLAGS_NBITS: Must be last */ enum idpf_queue_flags_t { @@ -299,6 +303,7 @@ enum idpf_queue_flags_t { __IDPF_Q_POLL_MODE, __IDPF_Q_CRC_EN, __IDPF_Q_HSPLIT_EN, + __IDPF_Q_PTP, __IDPF_Q_FLAGS_NBITS, }; @@ -443,6 +448,7 @@ struct idpf_tx_queue_stats { u64_stats_t q_busy; u64_stats_t skb_drops; u64_stats_t dma_map_errs; + u64_stats_t tstamp_skipped; }; #define IDPF_ITR_DYNAMIC 1 @@ -494,6 +500,7 @@ struct idpf_txq_stash { * @next_to_alloc: RX buffer to allocate at * @skb: Pointer to the skb * @truesize: data buffer truesize in singleq + * @cached_phc_time: Cached PHC time for the Rx queue * @stats_sync: See struct u64_stats_sync * @q_stats: See union idpf_rx_queue_stats * @q_id: Queue id @@ -541,6 +548,7 @@ struct idpf_rx_queue { struct sk_buff *skb; u32 truesize; + u64 cached_phc_time; struct u64_stats_sync stats_sync; struct idpf_rx_queue_stats q_stats; @@ -560,7 +568,7 @@ struct idpf_rx_queue { __cacheline_group_end_aligned(cold); }; libeth_cacheline_set_assert(struct idpf_rx_queue, 64, - 80 + sizeof(struct u64_stats_sync), + 88 + sizeof(struct u64_stats_sync), 32); /** @@ -617,6 +625,8 @@ libeth_cacheline_set_assert(struct idpf_rx_queue, 64, * @compl_tag_bufid_m: Completion tag buffer id mask * @compl_tag_cur_gen: Used to keep track of current completion tag generation * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset + * @cached_tstamp_caps: Tx timestamp capabilities negotiated with the CP + * @tstamp_task: Work that handles Tx timestamp read * @stats_sync: See struct u64_stats_sync * @q_stats: See union idpf_tx_queue_stats * @q_id: Queue id @@ -630,7 +640,7 @@ struct idpf_tx_queue { struct idpf_base_tx_desc *base_tx; struct idpf_base_tx_ctx_desc *base_ctx; union idpf_tx_flex_desc *flex_tx; - struct idpf_flex_tx_ctx_desc *flex_ctx; + union idpf_flex_tx_ctx_desc *flex_ctx; void *desc_ring; }; @@ -666,6 +676,9 @@ struct idpf_tx_queue { u16 compl_tag_cur_gen; u16 compl_tag_gen_max; + struct idpf_ptp_vport_tx_tstamp_caps *cached_tstamp_caps; + struct work_struct *tstamp_task; + struct u64_stats_sync stats_sync; struct idpf_tx_queue_stats q_stats; __cacheline_group_end_aligned(read_write); @@ -679,7 +692,7 @@ struct idpf_tx_queue { __cacheline_group_end_aligned(cold); }; libeth_cacheline_set_assert(struct idpf_tx_queue, 64, - 88 + sizeof(struct u64_stats_sync), + 112 + sizeof(struct u64_stats_sync), 24); /** @@ -1037,12 +1050,4 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq, u16 cleaned_count); int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off); -static inline bool idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, - u32 needed) -{ - return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx, - IDPF_DESC_UNUSED(tx_q), - needed, needed); -} - #endif /* !_IDPF_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c index aba828abcb17..259d50fded67 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c +++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c @@ -9,10 +9,13 @@ /** * idpf_vf_ctlq_reg_init - initialize default mailbox registers + * @adapter: adapter structure * @cq: pointer to the array of create control queues */ -static void idpf_vf_ctlq_reg_init(struct idpf_ctlq_create_info *cq) +static void idpf_vf_ctlq_reg_init(struct idpf_adapter *adapter, + struct idpf_ctlq_create_info *cq) { + resource_size_t mbx_start = adapter->dev_ops.static_reg_info[0].start; int i; for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) { @@ -21,22 +24,22 @@ static void idpf_vf_ctlq_reg_init(struct idpf_ctlq_create_info *cq) switch (ccq->type) { case IDPF_CTLQ_TYPE_MAILBOX_TX: /* set head and tail registers in our local struct */ - ccq->reg.head = VF_ATQH; - ccq->reg.tail = VF_ATQT; - ccq->reg.len = VF_ATQLEN; - ccq->reg.bah = VF_ATQBAH; - ccq->reg.bal = VF_ATQBAL; + ccq->reg.head = VF_ATQH - mbx_start; + ccq->reg.tail = VF_ATQT - mbx_start; + ccq->reg.len = VF_ATQLEN - mbx_start; + ccq->reg.bah = VF_ATQBAH - mbx_start; + ccq->reg.bal = VF_ATQBAL - mbx_start; ccq->reg.len_mask = VF_ATQLEN_ATQLEN_M; ccq->reg.len_ena_mask = VF_ATQLEN_ATQENABLE_M; ccq->reg.head_mask = VF_ATQH_ATQH_M; break; case IDPF_CTLQ_TYPE_MAILBOX_RX: /* set head and tail registers in our local struct */ - ccq->reg.head = VF_ARQH; - ccq->reg.tail = VF_ARQT; - ccq->reg.len = VF_ARQLEN; - ccq->reg.bah = VF_ARQBAH; - ccq->reg.bal = VF_ARQBAL; + ccq->reg.head = VF_ARQH - mbx_start; + ccq->reg.tail = VF_ARQT - mbx_start; + ccq->reg.len = VF_ARQLEN - mbx_start; + ccq->reg.bah = VF_ARQBAH - mbx_start; + ccq->reg.bal = VF_ARQBAL - mbx_start; ccq->reg.len_mask = VF_ARQLEN_ARQLEN_M; ccq->reg.len_ena_mask = VF_ARQLEN_ARQENABLE_M; ccq->reg.head_mask = VF_ARQH_ARQH_M; @@ -129,7 +132,7 @@ free_reg_vals: */ static void idpf_vf_reset_reg_init(struct idpf_adapter *adapter) { - adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, VFGEN_RSTAT); + adapter->reset_reg.rstat = idpf_get_rstat_reg_addr(adapter, VFGEN_RSTAT); adapter->reset_reg.rstat_m = VFGEN_RSTAT_VFR_STATE_M; } @@ -148,6 +151,17 @@ static void idpf_vf_trigger_reset(struct idpf_adapter *adapter, } /** + * idpf_idc_vf_register - register for IDC callbacks + * @adapter: Driver specific private structure + * + * Return: 0 on success or error code on failure. + */ +static int idpf_idc_vf_register(struct idpf_adapter *adapter) +{ + return idpf_idc_init_aux_core_dev(adapter, IIDC_FUNCTION_TYPE_VF); +} + +/** * idpf_vf_reg_ops_init - Initialize register API function pointers * @adapter: Driver specific private structure */ @@ -167,4 +181,11 @@ static void idpf_vf_reg_ops_init(struct idpf_adapter *adapter) void idpf_vf_dev_ops_init(struct idpf_adapter *adapter) { idpf_vf_reg_ops_init(adapter); + + adapter->dev_ops.idc_init = idpf_idc_vf_register; + + resource_set_range(&adapter->dev_ops.static_reg_info[0], + VF_BASE, IDPF_VF_MBX_REGION_SZ); + resource_set_range(&adapter->dev_ops.static_reg_info[1], + VFGEN_RSTAT, IDPF_VF_RSTAT_REGION_SZ); } diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index 3d2413b8684f..a028c69f7fdc 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -1,92 +1,12 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2023 Intel Corporation */ +#include <linux/export.h> #include <net/libeth/rx.h> #include "idpf.h" #include "idpf_virtchnl.h" - -#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000 -#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000) -#define IDPF_VC_XN_IDX_M GENMASK(7, 0) -#define IDPF_VC_XN_SALT_M GENMASK(15, 8) -#define IDPF_VC_XN_RING_LEN U8_MAX - -/** - * enum idpf_vc_xn_state - Virtchnl transaction status - * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used - * @IDPF_VC_XN_WAITING: expecting a reply, not yet received - * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received, - * buffer updated - * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there - * was an error, buffer not updated - * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down - * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the - * return context; a callback may be provided to handle - * return - */ -enum idpf_vc_xn_state { - IDPF_VC_XN_IDLE = 1, - IDPF_VC_XN_WAITING, - IDPF_VC_XN_COMPLETED_SUCCESS, - IDPF_VC_XN_COMPLETED_FAILED, - IDPF_VC_XN_SHUTDOWN, - IDPF_VC_XN_ASYNC, -}; - -struct idpf_vc_xn; -/* Callback for asynchronous messages */ -typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *, - const struct idpf_ctlq_msg *); - -/** - * struct idpf_vc_xn - Data structure representing virtchnl transactions - * @completed: virtchnl event loop uses that to signal when a reply is - * available, uses kernel completion API - * @state: virtchnl event loop stores the data below, protected by the - * completion's lock. - * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be - * truncated on its way to the receiver thread according to - * reply_buf.iov_len. - * @reply: Reference to the buffer(s) where the reply data should be written - * to. May be 0-length (then NULL address permitted) if the reply data - * should be ignored. - * @async_handler: if sent asynchronously, a callback can be provided to handle - * the reply when it's received - * @vc_op: corresponding opcode sent with this transaction - * @idx: index used as retrieval on reply receive, used for cookie - * @salt: changed every message to make unique, used for cookie - */ -struct idpf_vc_xn { - struct completion completed; - enum idpf_vc_xn_state state; - size_t reply_sz; - struct kvec reply; - async_vc_cb async_handler; - u32 vc_op; - u8 idx; - u8 salt; -}; - -/** - * struct idpf_vc_xn_params - Parameters for executing transaction - * @send_buf: kvec for send buffer - * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length - * @timeout_ms: timeout to wait for reply - * @async: send message asynchronously, will not wait on completion - * @async_handler: If sent asynchronously, optional callback handler. The user - * must be careful when using async handlers as the memory for - * the recv_buf _cannot_ be on stack if this is async. - * @vc_op: virtchnl op to send - */ -struct idpf_vc_xn_params { - struct kvec send_buf; - struct kvec recv_buf; - int timeout_ms; - bool async; - async_vc_cb async_handler; - u32 vc_op; -}; +#include "idpf_ptp.h" /** * struct idpf_vc_xn_manager - Manager for tracking transactions @@ -235,6 +155,55 @@ err_kfree: return err; } +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) +/** + * idpf_ptp_is_mb_msg - Check if the message is PTP-related + * @op: virtchnl opcode + * + * Return: true if msg is PTP-related, false otherwise. + */ +static bool idpf_ptp_is_mb_msg(u32 op) +{ + switch (op) { + case VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME: + case VIRTCHNL2_OP_PTP_GET_CROSS_TIME: + case VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME: + case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE: + case VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME: + case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS: + case VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP: + return true; + default: + return false; + } +} + +/** + * idpf_prepare_ptp_mb_msg - Prepare PTP related message + * + * @adapter: Driver specific private structure + * @op: virtchnl opcode + * @ctlq_msg: Corresponding control queue message + */ +static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op, + struct idpf_ctlq_msg *ctlq_msg) +{ + /* If the message is PTP-related and the secondary mailbox is available, + * send the message through the secondary mailbox. + */ + if (!idpf_ptp_is_mb_msg(op) || !adapter->ptp->secondary_mbx.valid) + return; + + ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_peer_drv; + ctlq_msg->func_id = adapter->ptp->secondary_mbx.peer_mbx_q_id; + ctlq_msg->host_id = adapter->ptp->secondary_mbx.peer_id; +} +#else /* !CONFIG_PTP_1588_CLOCK */ +static void idpf_prepare_ptp_mb_msg(struct idpf_adapter *adapter, u32 op, + struct idpf_ctlq_msg *ctlq_msg) +{ } +#endif /* CONFIG_PTP_1588_CLOCK */ + /** * idpf_send_mb_msg - Send message over mailbox * @adapter: Driver specific private structure @@ -278,6 +247,9 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp; ctlq_msg->func_id = 0; + + idpf_prepare_ptp_mb_msg(adapter, op, ctlq_msg); + ctlq_msg->data_len = msg_size; ctlq_msg->cookie.mbx.chnl_opcode = op; ctlq_msg->cookie.mbx.chnl_retval = 0; @@ -376,7 +348,7 @@ static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr) * All waiting threads will be woken-up and their transaction aborted. Further * operations on that object will fail. */ -static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr) +void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr) { int i; @@ -449,8 +421,8 @@ static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr, * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for * error. */ -static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter, - const struct idpf_vc_xn_params *params) +ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter, + const struct idpf_vc_xn_params *params) { const struct kvec *send_buf = ¶ms->send_buf; struct idpf_vc_xn *xn; @@ -878,14 +850,14 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL); caps.rss_caps = - cpu_to_le64(VIRTCHNL2_CAP_RSS_IPV4_TCP | - VIRTCHNL2_CAP_RSS_IPV4_UDP | - VIRTCHNL2_CAP_RSS_IPV4_SCTP | - VIRTCHNL2_CAP_RSS_IPV4_OTHER | - VIRTCHNL2_CAP_RSS_IPV6_TCP | - VIRTCHNL2_CAP_RSS_IPV6_UDP | - VIRTCHNL2_CAP_RSS_IPV6_SCTP | - VIRTCHNL2_CAP_RSS_IPV6_OTHER); + cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP | + VIRTCHNL2_FLOW_IPV4_UDP | + VIRTCHNL2_FLOW_IPV4_SCTP | + VIRTCHNL2_FLOW_IPV4_OTHER | + VIRTCHNL2_FLOW_IPV6_TCP | + VIRTCHNL2_FLOW_IPV6_UDP | + VIRTCHNL2_FLOW_IPV6_SCTP | + VIRTCHNL2_FLOW_IPV6_OTHER); caps.hsplit_caps = cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 | @@ -897,10 +869,13 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) caps.other_caps = cpu_to_le64(VIRTCHNL2_CAP_SRIOV | + VIRTCHNL2_CAP_RDMA | + VIRTCHNL2_CAP_LAN_MEMORY_REGIONS | VIRTCHNL2_CAP_MACFILTER | VIRTCHNL2_CAP_SPLITQ_QSCHED | VIRTCHNL2_CAP_PROMISC | - VIRTCHNL2_CAP_LOOPBACK); + VIRTCHNL2_CAP_LOOPBACK | + VIRTCHNL2_CAP_PTP); xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS; xn_params.send_buf.iov_base = ∩︀ @@ -919,6 +894,163 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) } /** + * idpf_send_get_lan_memory_regions - Send virtchnl get LAN memory regions msg + * @adapter: Driver specific private struct + * + * Return: 0 on success or error code on failure. + */ +static int idpf_send_get_lan_memory_regions(struct idpf_adapter *adapter) +{ + struct virtchnl2_get_lan_memory_regions *rcvd_regions __free(kfree); + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS, + .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN, + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + int num_regions, size; + struct idpf_hw *hw; + ssize_t reply_sz; + int err = 0; + + rcvd_regions = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); + if (!rcvd_regions) + return -ENOMEM; + + xn_params.recv_buf.iov_base = rcvd_regions; + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + + num_regions = le16_to_cpu(rcvd_regions->num_memory_regions); + size = struct_size(rcvd_regions, mem_reg, num_regions); + if (reply_sz < size) + return -EIO; + + if (size > IDPF_CTLQ_MAX_BUF_LEN) + return -EINVAL; + + hw = &adapter->hw; + hw->lan_regs = kcalloc(num_regions, sizeof(*hw->lan_regs), GFP_KERNEL); + if (!hw->lan_regs) + return -ENOMEM; + + for (int i = 0; i < num_regions; i++) { + hw->lan_regs[i].addr_len = + le64_to_cpu(rcvd_regions->mem_reg[i].size); + hw->lan_regs[i].addr_start = + le64_to_cpu(rcvd_regions->mem_reg[i].start_offset); + } + hw->num_lan_regs = num_regions; + + return err; +} + +/** + * idpf_calc_remaining_mmio_regs - calculate MMIO regions outside mbx and rstat + * @adapter: Driver specific private structure + * + * Called when idpf_send_get_lan_memory_regions is not supported. This will + * calculate the offsets and sizes for the regions before, in between, and + * after the mailbox and rstat MMIO mappings. + * + * Return: 0 on success or error code on failure. + */ +static int idpf_calc_remaining_mmio_regs(struct idpf_adapter *adapter) +{ + struct resource *rstat_reg = &adapter->dev_ops.static_reg_info[1]; + struct resource *mbx_reg = &adapter->dev_ops.static_reg_info[0]; + struct idpf_hw *hw = &adapter->hw; + + hw->num_lan_regs = IDPF_MMIO_MAP_FALLBACK_MAX_REMAINING; + hw->lan_regs = kcalloc(hw->num_lan_regs, sizeof(*hw->lan_regs), + GFP_KERNEL); + if (!hw->lan_regs) + return -ENOMEM; + + /* Region preceding mailbox */ + hw->lan_regs[0].addr_start = 0; + hw->lan_regs[0].addr_len = mbx_reg->start; + /* Region between mailbox and rstat */ + hw->lan_regs[1].addr_start = mbx_reg->end + 1; + hw->lan_regs[1].addr_len = rstat_reg->start - + hw->lan_regs[1].addr_start; + /* Region after rstat */ + hw->lan_regs[2].addr_start = rstat_reg->end + 1; + hw->lan_regs[2].addr_len = pci_resource_len(adapter->pdev, 0) - + hw->lan_regs[2].addr_start; + + return 0; +} + +/** + * idpf_map_lan_mmio_regs - map remaining LAN BAR regions + * @adapter: Driver specific private structure + * + * Return: 0 on success or error code on failure. + */ +static int idpf_map_lan_mmio_regs(struct idpf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct idpf_hw *hw = &adapter->hw; + resource_size_t res_start; + + res_start = pci_resource_start(pdev, 0); + + for (int i = 0; i < hw->num_lan_regs; i++) { + resource_size_t start; + long len; + + len = hw->lan_regs[i].addr_len; + if (!len) + continue; + start = hw->lan_regs[i].addr_start + res_start; + + hw->lan_regs[i].vaddr = devm_ioremap(&pdev->dev, start, len); + if (!hw->lan_regs[i].vaddr) { + pci_err(pdev, "failed to allocate BAR0 region\n"); + return -ENOMEM; + } + } + + return 0; +} + +/** + * idpf_add_del_fsteer_filters - Send virtchnl add/del Flow Steering message + * @adapter: adapter info struct + * @rule: Flow steering rule to add/delete + * @opcode: VIRTCHNL2_OP_ADD_FLOW_RULE to add filter, or + * VIRTCHNL2_OP_DEL_FLOW_RULE to delete. All other values are invalid. + * + * Send ADD/DELETE flow steering virtchnl message and receive the result. + * + * Return: 0 on success, negative on failure. + */ +int idpf_add_del_fsteer_filters(struct idpf_adapter *adapter, + struct virtchnl2_flow_rule_add_del *rule, + enum virtchnl2_op opcode) +{ + int rule_count = le32_to_cpu(rule->count); + struct idpf_vc_xn_params xn_params = {}; + ssize_t reply_sz; + + if (opcode != VIRTCHNL2_OP_ADD_FLOW_RULE && + opcode != VIRTCHNL2_OP_DEL_FLOW_RULE) + return -EINVAL; + + xn_params.vc_op = opcode; + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; + xn_params.async = false; + xn_params.send_buf.iov_base = rule; + xn_params.send_buf.iov_len = struct_size(rule, rule_info, rule_count); + xn_params.recv_buf.iov_base = rule; + xn_params.recv_buf.iov_len = struct_size(rule, rule_info, rule_count); + + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + return reply_sz < 0 ? reply_sz : 0; +} + +/** * idpf_vport_alloc_max_qs - Allocate max queues for a vport * @adapter: Driver specific private structure * @max_q: vport max queue structure @@ -2829,7 +2961,7 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter) struct idpf_hw *hw = &adapter->hw; int err; - adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info); + adapter->dev_ops.reg_ops.ctlq_reg_init(adapter, ctlq_info); err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info); if (err) @@ -2989,6 +3121,30 @@ restart: msleep(task_delay); } + if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LAN_MEMORY_REGIONS)) { + err = idpf_send_get_lan_memory_regions(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to get LAN memory regions: %d\n", + err); + return -EINVAL; + } + } else { + /* Fallback to mapping the remaining regions of the entire BAR */ + err = idpf_calc_remaining_mmio_regs(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to allocate BAR0 region(s): %d\n", + err); + return -ENOMEM; + } + } + + err = idpf_map_lan_mmio_regs(adapter); + if (err) { + dev_err(&adapter->pdev->dev, "Failed to map BAR0 region(s): %d\n", + err); + return -ENOMEM; + } + pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter)); num_max_vports = idpf_get_max_vports(adapter); adapter->max_vports = num_max_vports; @@ -3029,6 +3185,11 @@ restart: goto err_intr_req; } + err = idpf_ptp_init(adapter); + if (err) + pci_err(adapter->pdev, "PTP init failed, err=%pe\n", + ERR_PTR(err)); + idpf_init_avail_queues(adapter); /* Skew the delay for init tasks for each function based on fn number @@ -3091,7 +3252,9 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter) if (!remove_in_prog) idpf_vc_xn_shutdown(adapter->vcxn_mngr); + idpf_ptp_release(adapter); idpf_deinit_task(adapter); + idpf_idc_deinit_core_aux_device(adapter->cdev_info); idpf_intr_rel(adapter); if (remove_in_prog) @@ -3158,6 +3321,7 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) u16 rx_itr[] = {2, 8, 32, 96, 128}; struct idpf_rss_data *rss_data; u16 idx = vport->idx; + int err; vport_config = adapter->vport_config[idx]; rss_data = &vport_config->user_config.rss_data; @@ -3192,6 +3356,18 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) idpf_vport_alloc_vec_indexes(vport); vport->crc_enable = adapter->crc_enable; + + if (!(vport_msg->vport_flags & + cpu_to_le16(VIRTCHNL2_VPORT_UPLINK_PORT))) + return; + + err = idpf_ptp_get_vport_tstamps_caps(vport); + if (err) { + pci_dbg(vport->adapter->pdev, "Tx timestamping not supported\n"); + return; + } + + INIT_WORK(&vport->tstamp_task, idpf_tstamp_task); } /** @@ -3502,6 +3678,79 @@ bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all, } /** + * idpf_vport_is_cap_ena - Check if vport capability is enabled + * @vport: Private data struct + * @flag: flag(s) to check + * + * Return: true if the capability is supported, false otherwise + */ +bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag) +{ + struct virtchnl2_create_vport *vport_msg; + + vport_msg = vport->adapter->vport_params_recvd[vport->idx]; + + return !!(le16_to_cpu(vport_msg->vport_flags) & flag); +} + +/** + * idpf_sideband_flow_type_ena - Check if steering is enabled for flow type + * @vport: Private data struct + * @flow_type: flow type to check (from ethtool.h) + * + * Return: true if sideband filters are allowed for @flow_type, false otherwise + */ +bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type) +{ + struct virtchnl2_create_vport *vport_msg; + __le64 caps; + + vport_msg = vport->adapter->vport_params_recvd[vport->idx]; + caps = vport_msg->sideband_flow_caps; + + switch (flow_type) { + case TCP_V4_FLOW: + return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_TCP)); + case UDP_V4_FLOW: + return !!(caps & cpu_to_le64(VIRTCHNL2_FLOW_IPV4_UDP)); + default: + return false; + } +} + +/** + * idpf_sideband_action_ena - Check if steering is enabled for action + * @vport: Private data struct + * @fsp: flow spec + * + * Return: true if sideband filters are allowed for @fsp, false otherwise + */ +bool idpf_sideband_action_ena(struct idpf_vport *vport, + struct ethtool_rx_flow_spec *fsp) +{ + struct virtchnl2_create_vport *vport_msg; + unsigned int supp_actions; + + vport_msg = vport->adapter->vport_params_recvd[vport->idx]; + supp_actions = le32_to_cpu(vport_msg->sideband_flow_actions); + + /* Actions Drop/Wake are not supported */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC || + fsp->ring_cookie == RX_CLS_FLOW_WAKE) + return false; + + return !!(supp_actions & VIRTCHNL2_ACTION_QUEUE); +} + +unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport) +{ + struct virtchnl2_create_vport *vport_msg; + + vport_msg = vport->adapter->vport_params_recvd[vport->idx]; + return le32_to_cpu(vport_msg->flow_steer_max_rules); +} + +/** * idpf_get_vport_id: Get vport id * @vport: virtual port structure * @@ -3737,3 +3986,42 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter, return reply_sz < 0 ? reply_sz : 0; } + +/** + * idpf_idc_rdma_vc_send_sync - virtchnl send callback for IDC registered drivers + * @cdev_info: IDC core device info pointer + * @send_msg: message to send + * @msg_size: size of message to send + * @recv_msg: message to populate on reception of response + * @recv_len: length of message copied into recv_msg or 0 on error + * + * Return: 0 on success or error code on failure. + */ +int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info, + u8 *send_msg, u16 msg_size, + u8 *recv_msg, u16 *recv_len) +{ + struct idpf_adapter *adapter = pci_get_drvdata(cdev_info->pdev); + struct idpf_vc_xn_params xn_params = { }; + ssize_t reply_sz; + u16 recv_size; + + if (!recv_msg || !recv_len || msg_size > IDPF_CTLQ_MAX_BUF_LEN) + return -EINVAL; + + recv_size = min_t(u16, *recv_len, IDPF_CTLQ_MAX_BUF_LEN); + *recv_len = 0; + xn_params.vc_op = VIRTCHNL2_OP_RDMA; + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; + xn_params.send_buf.iov_base = send_msg; + xn_params.send_buf.iov_len = msg_size; + xn_params.recv_buf.iov_base = recv_msg; + xn_params.recv_buf.iov_len = recv_size; + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + *recv_len = reply_sz; + + return 0; +} +EXPORT_SYMBOL_GPL(idpf_idc_rdma_vc_send_sync); diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h index 83da5d8da56b..86f30f0db07a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h @@ -4,6 +4,88 @@ #ifndef _IDPF_VIRTCHNL_H_ #define _IDPF_VIRTCHNL_H_ +#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000 +#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000) +#define IDPF_VC_XN_IDX_M GENMASK(7, 0) +#define IDPF_VC_XN_SALT_M GENMASK(15, 8) +#define IDPF_VC_XN_RING_LEN U8_MAX + +/** + * enum idpf_vc_xn_state - Virtchnl transaction status + * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used + * @IDPF_VC_XN_WAITING: expecting a reply, not yet received + * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received, buffer + * updated + * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there + * was an error, buffer not updated + * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down + * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the + * return context; a callback may be provided to handle + * return + */ +enum idpf_vc_xn_state { + IDPF_VC_XN_IDLE = 1, + IDPF_VC_XN_WAITING, + IDPF_VC_XN_COMPLETED_SUCCESS, + IDPF_VC_XN_COMPLETED_FAILED, + IDPF_VC_XN_SHUTDOWN, + IDPF_VC_XN_ASYNC, +}; + +struct idpf_vc_xn; +/* Callback for asynchronous messages */ +typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *, + const struct idpf_ctlq_msg *); + +/** + * struct idpf_vc_xn - Data structure representing virtchnl transactions + * @completed: virtchnl event loop uses that to signal when a reply is + * available, uses kernel completion API + * @state: virtchnl event loop stores the data below, protected by the + * completion's lock. + * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be + * truncated on its way to the receiver thread according to + * reply_buf.iov_len. + * @reply: Reference to the buffer(s) where the reply data should be written + * to. May be 0-length (then NULL address permitted) if the reply data + * should be ignored. + * @async_handler: if sent asynchronously, a callback can be provided to handle + * the reply when it's received + * @vc_op: corresponding opcode sent with this transaction + * @idx: index used as retrieval on reply receive, used for cookie + * @salt: changed every message to make unique, used for cookie + */ +struct idpf_vc_xn { + struct completion completed; + enum idpf_vc_xn_state state; + size_t reply_sz; + struct kvec reply; + async_vc_cb async_handler; + u32 vc_op; + u8 idx; + u8 salt; +}; + +/** + * struct idpf_vc_xn_params - Parameters for executing transaction + * @send_buf: kvec for send buffer + * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length + * @timeout_ms: timeout to wait for reply + * @async: send message asynchronously, will not wait on completion + * @async_handler: If sent asynchronously, optional callback handler. The user + * must be careful when using async handlers as the memory for + * the recv_buf _cannot_ be on stack if this is async. + * @vc_op: virtchnl op to send + */ +struct idpf_vc_xn_params { + struct kvec send_buf; + struct kvec recv_buf; + int timeout_ms; + bool async; + async_vc_cb async_handler; + u32 vc_op; +}; + struct idpf_adapter; struct idpf_netdev_priv; struct idpf_vec_regs; @@ -11,6 +93,8 @@ struct idpf_vport; struct idpf_vport_max_q; struct idpf_vport_user_config_data; +ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter, + const struct idpf_vc_xn_params *params); int idpf_init_dflt_mbx(struct idpf_adapter *adapter); void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter); int idpf_vc_core_init(struct idpf_adapter *adapter); @@ -21,6 +105,12 @@ int idpf_get_reg_intr_vecs(struct idpf_vport *vport, int idpf_queue_reg_init(struct idpf_vport *vport); int idpf_vport_queue_ids_init(struct idpf_vport *vport); +bool idpf_vport_is_cap_ena(struct idpf_vport *vport, u16 flag); +bool idpf_sideband_flow_type_ena(struct idpf_vport *vport, u32 flow_type); +bool idpf_sideband_action_ena(struct idpf_vport *vport, + struct ethtool_rx_flow_spec *fsp); +unsigned int idpf_fsteer_max_rules(struct idpf_vport *vport); + int idpf_recv_mb_msg(struct idpf_adapter *adapter); int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, u16 msg_size, u8 *msg, u16 cookie); @@ -66,5 +156,9 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport); int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs); int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get); int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get); +void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr); +int idpf_idc_rdma_vc_send_sync(struct iidc_rdma_core_dev_info *cdev_info, + u8 *send_msg, u16 msg_size, + u8 *recv_msg, u16 *recv_len); #endif /* _IDPF_VIRTCHNL_H_ */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c new file mode 100644 index 000000000000..4f1fb0cefe51 --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c @@ -0,0 +1,668 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2024 Intel Corporation */ + +#include "idpf.h" +#include "idpf_ptp.h" +#include "idpf_virtchnl.h" + +/** + * idpf_ptp_get_caps - Send virtchnl get ptp capabilities message + * @adapter: Driver specific private structure + * + * Send virtchnl get PTP capabilities message. + * + * Return: 0 on success, -errno on failure. + */ +int idpf_ptp_get_caps(struct idpf_adapter *adapter) +{ + struct virtchnl2_ptp_get_caps *recv_ptp_caps_msg __free(kfree) = NULL; + struct virtchnl2_ptp_get_caps send_ptp_caps_msg = { + .caps = cpu_to_le32(VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME | + VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB | + VIRTCHNL2_CAP_PTP_GET_CROSS_TIME | + VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB | + VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB | + VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB) + }; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_GET_CAPS, + .send_buf.iov_base = &send_ptp_caps_msg, + .send_buf.iov_len = sizeof(send_ptp_caps_msg), + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + struct virtchnl2_ptp_cross_time_reg_offsets cross_tstamp_offsets; + struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets; + struct virtchnl2_ptp_clk_reg_offsets clock_offsets; + struct idpf_ptp_secondary_mbx *scnd_mbx; + struct idpf_ptp *ptp = adapter->ptp; + enum idpf_ptp_access access_type; + u32 temp_offset; + int reply_sz; + + recv_ptp_caps_msg = kzalloc(sizeof(struct virtchnl2_ptp_get_caps), + GFP_KERNEL); + if (!recv_ptp_caps_msg) + return -ENOMEM; + + xn_params.recv_buf.iov_base = recv_ptp_caps_msg; + xn_params.recv_buf.iov_len = sizeof(*recv_ptp_caps_msg); + + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + else if (reply_sz != sizeof(*recv_ptp_caps_msg)) + return -EIO; + + ptp->caps = le32_to_cpu(recv_ptp_caps_msg->caps); + ptp->base_incval = le64_to_cpu(recv_ptp_caps_msg->base_incval); + ptp->max_adj = le32_to_cpu(recv_ptp_caps_msg->max_adj); + + scnd_mbx = &ptp->secondary_mbx; + scnd_mbx->peer_mbx_q_id = le16_to_cpu(recv_ptp_caps_msg->peer_mbx_q_id); + + /* if the ptp_mb_q_id holds invalid value (0xffff), the secondary + * mailbox is not supported. + */ + scnd_mbx->valid = scnd_mbx->peer_mbx_q_id != 0xffff; + if (scnd_mbx->valid) + scnd_mbx->peer_id = recv_ptp_caps_msg->peer_id; + + /* Determine the access type for the PTP features */ + idpf_ptp_get_features_access(adapter); + + access_type = ptp->get_dev_clk_time_access; + if (access_type != IDPF_PTP_DIRECT) + goto cross_tstamp; + + clock_offsets = recv_ptp_caps_msg->clk_offsets; + + temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_l); + ptp->dev_clk_regs.dev_clk_ns_l = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(clock_offsets.dev_clk_ns_h); + ptp->dev_clk_regs.dev_clk_ns_h = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_l); + ptp->dev_clk_regs.phy_clk_ns_l = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(clock_offsets.phy_clk_ns_h); + ptp->dev_clk_regs.phy_clk_ns_h = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(clock_offsets.cmd_sync_trigger); + ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset); + +cross_tstamp: + access_type = ptp->get_cross_tstamp_access; + if (access_type != IDPF_PTP_DIRECT) + goto discipline_clock; + + cross_tstamp_offsets = recv_ptp_caps_msg->cross_time_offsets; + + temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_l); + ptp->dev_clk_regs.sys_time_ns_l = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(cross_tstamp_offsets.sys_time_ns_h); + ptp->dev_clk_regs.sys_time_ns_h = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(cross_tstamp_offsets.cmd_sync_trigger); + ptp->dev_clk_regs.cmd_sync = idpf_get_reg_addr(adapter, temp_offset); + +discipline_clock: + access_type = ptp->adj_dev_clk_time_access; + if (access_type != IDPF_PTP_DIRECT) + return 0; + + clk_adj_offsets = recv_ptp_caps_msg->clk_adj_offsets; + + /* Device clock offsets */ + temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_cmd_type); + ptp->dev_clk_regs.cmd = idpf_get_reg_addr(adapter, temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_l); + ptp->dev_clk_regs.incval_l = idpf_get_reg_addr(adapter, temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_incval_h); + ptp->dev_clk_regs.incval_h = idpf_get_reg_addr(adapter, temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_l); + ptp->dev_clk_regs.shadj_l = idpf_get_reg_addr(adapter, temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.dev_clk_shadj_h); + ptp->dev_clk_regs.shadj_h = idpf_get_reg_addr(adapter, temp_offset); + + /* PHY clock offsets */ + temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_cmd_type); + ptp->dev_clk_regs.phy_cmd = idpf_get_reg_addr(adapter, temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_l); + ptp->dev_clk_regs.phy_incval_l = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_incval_h); + ptp->dev_clk_regs.phy_incval_h = idpf_get_reg_addr(adapter, + temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_l); + ptp->dev_clk_regs.phy_shadj_l = idpf_get_reg_addr(adapter, temp_offset); + temp_offset = le32_to_cpu(clk_adj_offsets.phy_clk_shadj_h); + ptp->dev_clk_regs.phy_shadj_h = idpf_get_reg_addr(adapter, temp_offset); + + return 0; +} + +/** + * idpf_ptp_get_dev_clk_time - Send virtchnl get device clk time message + * @adapter: Driver specific private structure + * @dev_clk_time: Pointer to the device clock structure where the value is set + * + * Send virtchnl get time message to get the time of the clock. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_get_dev_clk_time(struct idpf_adapter *adapter, + struct idpf_ptp_dev_timers *dev_clk_time) +{ + struct virtchnl2_ptp_get_dev_clk_time get_dev_clk_time_msg; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME, + .send_buf.iov_base = &get_dev_clk_time_msg, + .send_buf.iov_len = sizeof(get_dev_clk_time_msg), + .recv_buf.iov_base = &get_dev_clk_time_msg, + .recv_buf.iov_len = sizeof(get_dev_clk_time_msg), + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + int reply_sz; + u64 dev_time; + + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + if (reply_sz != sizeof(get_dev_clk_time_msg)) + return -EIO; + + dev_time = le64_to_cpu(get_dev_clk_time_msg.dev_time_ns); + dev_clk_time->dev_clk_time_ns = dev_time; + + return 0; +} + +/** + * idpf_ptp_get_cross_time - Send virtchnl get cross time message + * @adapter: Driver specific private structure + * @cross_time: Pointer to the device clock structure where the value is set + * + * Send virtchnl get cross time message to get the time of the clock and the + * system time. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_get_cross_time(struct idpf_adapter *adapter, + struct idpf_ptp_dev_timers *cross_time) +{ + struct virtchnl2_ptp_get_cross_time cross_time_msg; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_GET_CROSS_TIME, + .send_buf.iov_base = &cross_time_msg, + .send_buf.iov_len = sizeof(cross_time_msg), + .recv_buf.iov_base = &cross_time_msg, + .recv_buf.iov_len = sizeof(cross_time_msg), + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + int reply_sz; + + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + if (reply_sz != sizeof(cross_time_msg)) + return -EIO; + + cross_time->dev_clk_time_ns = le64_to_cpu(cross_time_msg.dev_time_ns); + cross_time->sys_time_ns = le64_to_cpu(cross_time_msg.sys_time_ns); + + return 0; +} + +/** + * idpf_ptp_set_dev_clk_time - Send virtchnl set device time message + * @adapter: Driver specific private structure + * @time: New time value + * + * Send virtchnl set time message to set the time of the clock. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_set_dev_clk_time(struct idpf_adapter *adapter, u64 time) +{ + struct virtchnl2_ptp_set_dev_clk_time set_dev_clk_time_msg = { + .dev_time_ns = cpu_to_le64(time), + }; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME, + .send_buf.iov_base = &set_dev_clk_time_msg, + .send_buf.iov_len = sizeof(set_dev_clk_time_msg), + .recv_buf.iov_base = &set_dev_clk_time_msg, + .recv_buf.iov_len = sizeof(set_dev_clk_time_msg), + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + int reply_sz; + + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + if (reply_sz != sizeof(set_dev_clk_time_msg)) + return -EIO; + + return 0; +} + +/** + * idpf_ptp_adj_dev_clk_time - Send virtchnl adj device clock time message + * @adapter: Driver specific private structure + * @delta: Offset in nanoseconds to adjust the time by + * + * Send virtchnl adj time message to adjust the clock by the indicated delta. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_adj_dev_clk_time(struct idpf_adapter *adapter, s64 delta) +{ + struct virtchnl2_ptp_adj_dev_clk_time adj_dev_clk_time_msg = { + .delta = cpu_to_le64(delta), + }; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME, + .send_buf.iov_base = &adj_dev_clk_time_msg, + .send_buf.iov_len = sizeof(adj_dev_clk_time_msg), + .recv_buf.iov_base = &adj_dev_clk_time_msg, + .recv_buf.iov_len = sizeof(adj_dev_clk_time_msg), + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + int reply_sz; + + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + if (reply_sz != sizeof(adj_dev_clk_time_msg)) + return -EIO; + + return 0; +} + +/** + * idpf_ptp_adj_dev_clk_fine - Send virtchnl adj time message + * @adapter: Driver specific private structure + * @incval: Source timer increment value per clock cycle + * + * Send virtchnl adj fine message to adjust the frequency of the clock by + * incval. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_adj_dev_clk_fine(struct idpf_adapter *adapter, u64 incval) +{ + struct virtchnl2_ptp_adj_dev_clk_fine adj_dev_clk_fine_msg = { + .incval = cpu_to_le64(incval), + }; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE, + .send_buf.iov_base = &adj_dev_clk_fine_msg, + .send_buf.iov_len = sizeof(adj_dev_clk_fine_msg), + .recv_buf.iov_base = &adj_dev_clk_fine_msg, + .recv_buf.iov_len = sizeof(adj_dev_clk_fine_msg), + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + int reply_sz; + + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + if (reply_sz != sizeof(adj_dev_clk_fine_msg)) + return -EIO; + + return 0; +} + +/** + * idpf_ptp_get_vport_tstamps_caps - Send virtchnl to get tstamps caps for vport + * @vport: Virtual port structure + * + * Send virtchnl get vport tstamps caps message to receive the set of tstamp + * capabilities per vport. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_get_vport_tstamps_caps(struct idpf_vport *vport) +{ + struct virtchnl2_ptp_get_vport_tx_tstamp_caps send_tx_tstamp_caps; + struct virtchnl2_ptp_get_vport_tx_tstamp_caps *rcv_tx_tstamp_caps; + struct virtchnl2_ptp_tx_tstamp_latch_caps tx_tstamp_latch_caps; + struct idpf_ptp_vport_tx_tstamp_caps *tstamp_caps; + struct idpf_ptp_tx_tstamp *ptp_tx_tstamp, *tmp; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS, + .send_buf.iov_base = &send_tx_tstamp_caps, + .send_buf.iov_len = sizeof(send_tx_tstamp_caps), + .recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN, + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + }; + enum idpf_ptp_access tstamp_access, get_dev_clk_access; + struct idpf_ptp *ptp = vport->adapter->ptp; + struct list_head *head; + int err = 0, reply_sz; + u16 num_latches; + u32 size; + + if (!ptp) + return -EOPNOTSUPP; + + tstamp_access = ptp->tx_tstamp_access; + get_dev_clk_access = ptp->get_dev_clk_time_access; + if (tstamp_access == IDPF_PTP_NONE || + get_dev_clk_access == IDPF_PTP_NONE) + return -EOPNOTSUPP; + + rcv_tx_tstamp_caps = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); + if (!rcv_tx_tstamp_caps) + return -ENOMEM; + + send_tx_tstamp_caps.vport_id = cpu_to_le32(vport->vport_id); + xn_params.recv_buf.iov_base = rcv_tx_tstamp_caps; + + reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); + if (reply_sz < 0) { + err = reply_sz; + goto get_tstamp_caps_out; + } + + num_latches = le16_to_cpu(rcv_tx_tstamp_caps->num_latches); + size = struct_size(rcv_tx_tstamp_caps, tstamp_latches, num_latches); + if (reply_sz != size) { + err = -EIO; + goto get_tstamp_caps_out; + } + + size = struct_size(tstamp_caps, tx_tstamp_status, num_latches); + tstamp_caps = kzalloc(size, GFP_KERNEL); + if (!tstamp_caps) { + err = -ENOMEM; + goto get_tstamp_caps_out; + } + + tstamp_caps->access = true; + tstamp_caps->num_entries = num_latches; + + INIT_LIST_HEAD(&tstamp_caps->latches_in_use); + INIT_LIST_HEAD(&tstamp_caps->latches_free); + + spin_lock_init(&tstamp_caps->latches_lock); + spin_lock_init(&tstamp_caps->status_lock); + + tstamp_caps->tstamp_ns_lo_bit = rcv_tx_tstamp_caps->tstamp_ns_lo_bit; + + for (u16 i = 0; i < tstamp_caps->num_entries; i++) { + __le32 offset_l, offset_h; + + ptp_tx_tstamp = kzalloc(sizeof(*ptp_tx_tstamp), GFP_KERNEL); + if (!ptp_tx_tstamp) { + err = -ENOMEM; + goto err_free_ptp_tx_stamp_list; + } + + tx_tstamp_latch_caps = rcv_tx_tstamp_caps->tstamp_latches[i]; + + if (tstamp_access != IDPF_PTP_DIRECT) + goto skip_offsets; + + offset_l = tx_tstamp_latch_caps.tx_latch_reg_offset_l; + offset_h = tx_tstamp_latch_caps.tx_latch_reg_offset_h; + ptp_tx_tstamp->tx_latch_reg_offset_l = le32_to_cpu(offset_l); + ptp_tx_tstamp->tx_latch_reg_offset_h = le32_to_cpu(offset_h); + +skip_offsets: + ptp_tx_tstamp->idx = tx_tstamp_latch_caps.index; + + list_add(&ptp_tx_tstamp->list_member, + &tstamp_caps->latches_free); + + tstamp_caps->tx_tstamp_status[i].state = IDPF_PTP_FREE; + } + + vport->tx_tstamp_caps = tstamp_caps; + kfree(rcv_tx_tstamp_caps); + + return 0; + +err_free_ptp_tx_stamp_list: + head = &tstamp_caps->latches_free; + list_for_each_entry_safe(ptp_tx_tstamp, tmp, head, list_member) { + list_del(&ptp_tx_tstamp->list_member); + kfree(ptp_tx_tstamp); + } + + kfree(tstamp_caps); +get_tstamp_caps_out: + kfree(rcv_tx_tstamp_caps); + + return err; +} + +/** + * idpf_ptp_update_tstamp_tracker - Update the Tx timestamp tracker based on + * the skb compatibility. + * @caps: Tx timestamp capabilities that monitor the latch status + * @skb: skb for which the tstamp value is returned through virtchnl message + * @current_state: Current state of the Tx timestamp latch + * @expected_state: Expected state of the Tx timestamp latch + * + * Find a proper skb tracker for which the Tx timestamp is received and change + * the state to expected value. + * + * Return: true if the tracker has been found and updated, false otherwise. + */ +static bool +idpf_ptp_update_tstamp_tracker(struct idpf_ptp_vport_tx_tstamp_caps *caps, + struct sk_buff *skb, + enum idpf_ptp_tx_tstamp_state current_state, + enum idpf_ptp_tx_tstamp_state expected_state) +{ + bool updated = false; + + spin_lock(&caps->status_lock); + for (u16 i = 0; i < caps->num_entries; i++) { + struct idpf_ptp_tx_tstamp_status *status; + + status = &caps->tx_tstamp_status[i]; + + if (skb == status->skb && status->state == current_state) { + status->state = expected_state; + updated = true; + break; + } + } + spin_unlock(&caps->status_lock); + + return updated; +} + +/** + * idpf_ptp_get_tstamp_value - Get the Tx timestamp value and provide it + * back to the skb. + * @vport: Virtual port structure + * @tstamp_latch: Tx timestamp latch structure fulfilled by the Control Plane + * @ptp_tx_tstamp: Tx timestamp latch to add to the free list + * + * Read the value of the Tx timestamp for a given latch received from the + * Control Plane, extend it to 64 bit and provide back to the skb. + * + * Return: 0 on success, -errno otherwise. + */ +static int +idpf_ptp_get_tstamp_value(struct idpf_vport *vport, + struct virtchnl2_ptp_tx_tstamp_latch *tstamp_latch, + struct idpf_ptp_tx_tstamp *ptp_tx_tstamp) +{ + struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps; + struct skb_shared_hwtstamps shhwtstamps; + bool state_upd = false; + u8 tstamp_ns_lo_bit; + u64 tstamp; + + tx_tstamp_caps = vport->tx_tstamp_caps; + tstamp_ns_lo_bit = tx_tstamp_caps->tstamp_ns_lo_bit; + + ptp_tx_tstamp->tstamp = le64_to_cpu(tstamp_latch->tstamp); + ptp_tx_tstamp->tstamp >>= tstamp_ns_lo_bit; + + state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps, + ptp_tx_tstamp->skb, + IDPF_PTP_READ_VALUE, + IDPF_PTP_FREE); + if (!state_upd) + return -EINVAL; + + tstamp = idpf_ptp_extend_ts(vport, ptp_tx_tstamp->tstamp); + shhwtstamps.hwtstamp = ns_to_ktime(tstamp); + skb_tstamp_tx(ptp_tx_tstamp->skb, &shhwtstamps); + consume_skb(ptp_tx_tstamp->skb); + + list_add(&ptp_tx_tstamp->list_member, + &tx_tstamp_caps->latches_free); + + return 0; +} + +/** + * idpf_ptp_get_tx_tstamp_async_handler - Async callback for getting Tx tstamps + * @adapter: Driver specific private structure + * @xn: transaction for message + * @ctlq_msg: received message + * + * Read the tstamps Tx tstamp values from a received message and put them + * directly to the skb. The number of timestamps to read is specified by + * the virtchnl message. + * + * Return: 0 on success, -errno otherwise. + */ +static int +idpf_ptp_get_tx_tstamp_async_handler(struct idpf_adapter *adapter, + struct idpf_vc_xn *xn, + const struct idpf_ctlq_msg *ctlq_msg) +{ + struct virtchnl2_ptp_get_vport_tx_tstamp_latches *recv_tx_tstamp_msg; + struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps; + struct virtchnl2_ptp_tx_tstamp_latch tstamp_latch; + struct idpf_ptp_tx_tstamp *tx_tstamp, *tmp; + struct idpf_vport *tstamp_vport = NULL; + struct list_head *head; + u16 num_latches; + u32 vport_id; + int err = 0; + + recv_tx_tstamp_msg = ctlq_msg->ctx.indirect.payload->va; + vport_id = le32_to_cpu(recv_tx_tstamp_msg->vport_id); + + idpf_for_each_vport(adapter, vport) { + if (!vport) + continue; + + if (vport->vport_id == vport_id) { + tstamp_vport = vport; + break; + } + } + + if (!tstamp_vport || !tstamp_vport->tx_tstamp_caps) + return -EINVAL; + + tx_tstamp_caps = tstamp_vport->tx_tstamp_caps; + num_latches = le16_to_cpu(recv_tx_tstamp_msg->num_latches); + + spin_lock_bh(&tx_tstamp_caps->latches_lock); + head = &tx_tstamp_caps->latches_in_use; + + for (u16 i = 0; i < num_latches; i++) { + tstamp_latch = recv_tx_tstamp_msg->tstamp_latches[i]; + + if (!tstamp_latch.valid) + continue; + + if (list_empty(head)) { + err = -ENOBUFS; + goto unlock; + } + + list_for_each_entry_safe(tx_tstamp, tmp, head, list_member) { + if (tstamp_latch.index == tx_tstamp->idx) { + list_del(&tx_tstamp->list_member); + err = idpf_ptp_get_tstamp_value(tstamp_vport, + &tstamp_latch, + tx_tstamp); + if (err) + goto unlock; + + break; + } + } + } + +unlock: + spin_unlock_bh(&tx_tstamp_caps->latches_lock); + + return err; +} + +/** + * idpf_ptp_get_tx_tstamp - Send virtchnl get Tx timestamp latches message + * @vport: Virtual port structure + * + * Send virtchnl get Tx tstamp message to read the value of the HW timestamp. + * The message contains a list of indexes set in the Tx descriptors. + * + * Return: 0 on success, -errno otherwise. + */ +int idpf_ptp_get_tx_tstamp(struct idpf_vport *vport) +{ + struct virtchnl2_ptp_get_vport_tx_tstamp_latches *send_tx_tstamp_msg; + struct idpf_ptp_vport_tx_tstamp_caps *tx_tstamp_caps; + struct idpf_vc_xn_params xn_params = { + .vc_op = VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP, + .timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC, + .async = true, + .async_handler = idpf_ptp_get_tx_tstamp_async_handler, + }; + struct idpf_ptp_tx_tstamp *ptp_tx_tstamp; + int reply_sz, size, msg_size; + struct list_head *head; + bool state_upd; + u16 id = 0; + + tx_tstamp_caps = vport->tx_tstamp_caps; + head = &tx_tstamp_caps->latches_in_use; + + size = struct_size(send_tx_tstamp_msg, tstamp_latches, + tx_tstamp_caps->num_entries); + send_tx_tstamp_msg = kzalloc(size, GFP_KERNEL); + if (!send_tx_tstamp_msg) + return -ENOMEM; + + spin_lock_bh(&tx_tstamp_caps->latches_lock); + list_for_each_entry(ptp_tx_tstamp, head, list_member) { + u8 idx; + + state_upd = idpf_ptp_update_tstamp_tracker(tx_tstamp_caps, + ptp_tx_tstamp->skb, + IDPF_PTP_REQUEST, + IDPF_PTP_READ_VALUE); + if (!state_upd) + continue; + + idx = ptp_tx_tstamp->idx; + send_tx_tstamp_msg->tstamp_latches[id].index = idx; + id++; + } + spin_unlock_bh(&tx_tstamp_caps->latches_lock); + + msg_size = struct_size(send_tx_tstamp_msg, tstamp_latches, id); + send_tx_tstamp_msg->vport_id = cpu_to_le32(vport->vport_id); + send_tx_tstamp_msg->num_latches = cpu_to_le16(id); + xn_params.send_buf.iov_base = send_tx_tstamp_msg; + xn_params.send_buf.iov_len = msg_size; + + reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); + kfree(send_tx_tstamp_msg); + + return min(reply_sz, 0); +} diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h index 63deb120359c..02ae447cc24a 100644 --- a/drivers/net/ethernet/intel/idpf/virtchnl2.h +++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h @@ -62,12 +62,28 @@ enum virtchnl2_op { VIRTCHNL2_OP_GET_PTYPE_INFO = 526, /* Opcode 527 and 528 are reserved for VIRTCHNL2_OP_GET_PTYPE_ID and * VIRTCHNL2_OP_GET_PTYPE_INFO_RAW. - * Opcodes 529, 530, 531, 532 and 533 are reserved. */ + VIRTCHNL2_OP_RDMA = 529, + /* Opcodes 530 through 533 are reserved. */ VIRTCHNL2_OP_LOOPBACK = 534, VIRTCHNL2_OP_ADD_MAC_ADDR = 535, VIRTCHNL2_OP_DEL_MAC_ADDR = 536, VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE = 537, + + /* TimeSync opcodes */ + VIRTCHNL2_OP_PTP_GET_CAPS = 541, + VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP = 542, + VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME = 543, + VIRTCHNL2_OP_PTP_GET_CROSS_TIME = 544, + VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME = 545, + VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE = 546, + VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME = 547, + VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS = 548, + VIRTCHNL2_OP_GET_LAN_MEMORY_REGIONS = 549, + /* Opcode 550 is reserved */ + VIRTCHNL2_OP_ADD_FLOW_RULE = 551, + VIRTCHNL2_OP_GET_FLOW_RULE = 552, + VIRTCHNL2_OP_DEL_FLOW_RULE = 553, }; /** @@ -141,22 +157,22 @@ enum virtchnl2_cap_seg { VIRTCHNL2_CAP_SEG_TX_DOUBLE_TUNNEL = BIT(8), }; -/* Receive Side Scaling Flow type capability flags */ -enum virtchnl2_cap_rss { - VIRTCHNL2_CAP_RSS_IPV4_TCP = BIT(0), - VIRTCHNL2_CAP_RSS_IPV4_UDP = BIT(1), - VIRTCHNL2_CAP_RSS_IPV4_SCTP = BIT(2), - VIRTCHNL2_CAP_RSS_IPV4_OTHER = BIT(3), - VIRTCHNL2_CAP_RSS_IPV6_TCP = BIT(4), - VIRTCHNL2_CAP_RSS_IPV6_UDP = BIT(5), - VIRTCHNL2_CAP_RSS_IPV6_SCTP = BIT(6), - VIRTCHNL2_CAP_RSS_IPV6_OTHER = BIT(7), - VIRTCHNL2_CAP_RSS_IPV4_AH = BIT(8), - VIRTCHNL2_CAP_RSS_IPV4_ESP = BIT(9), - VIRTCHNL2_CAP_RSS_IPV4_AH_ESP = BIT(10), - VIRTCHNL2_CAP_RSS_IPV6_AH = BIT(11), - VIRTCHNL2_CAP_RSS_IPV6_ESP = BIT(12), - VIRTCHNL2_CAP_RSS_IPV6_AH_ESP = BIT(13), +/* Receive Side Scaling and Flow Steering Flow type capability flags */ +enum virtchnl2_flow_types { + VIRTCHNL2_FLOW_IPV4_TCP = BIT(0), + VIRTCHNL2_FLOW_IPV4_UDP = BIT(1), + VIRTCHNL2_FLOW_IPV4_SCTP = BIT(2), + VIRTCHNL2_FLOW_IPV4_OTHER = BIT(3), + VIRTCHNL2_FLOW_IPV6_TCP = BIT(4), + VIRTCHNL2_FLOW_IPV6_UDP = BIT(5), + VIRTCHNL2_FLOW_IPV6_SCTP = BIT(6), + VIRTCHNL2_FLOW_IPV6_OTHER = BIT(7), + VIRTCHNL2_FLOW_IPV4_AH = BIT(8), + VIRTCHNL2_FLOW_IPV4_ESP = BIT(9), + VIRTCHNL2_FLOW_IPV4_AH_ESP = BIT(10), + VIRTCHNL2_FLOW_IPV6_AH = BIT(11), + VIRTCHNL2_FLOW_IPV6_ESP = BIT(12), + VIRTCHNL2_FLOW_IPV6_AH_ESP = BIT(13), }; /* Header split capability flags */ @@ -182,8 +198,9 @@ enum virtchnl2_cap_other { VIRTCHNL2_CAP_RDMA = BIT_ULL(0), VIRTCHNL2_CAP_SRIOV = BIT_ULL(1), VIRTCHNL2_CAP_MACFILTER = BIT_ULL(2), - VIRTCHNL2_CAP_FLOW_DIRECTOR = BIT_ULL(3), - /* Queue based scheduling using split queue model */ + /* Other capability 3 is available + * Queue based scheduling using split queue model + */ VIRTCHNL2_CAP_SPLITQ_QSCHED = BIT_ULL(4), VIRTCHNL2_CAP_CRC = BIT_ULL(5), VIRTCHNL2_CAP_ADQ = BIT_ULL(6), @@ -197,16 +214,37 @@ enum virtchnl2_cap_other { /* EDT: Earliest Departure Time capability used for Timing Wheel */ VIRTCHNL2_CAP_EDT = BIT_ULL(14), VIRTCHNL2_CAP_ADV_RSS = BIT_ULL(15), - VIRTCHNL2_CAP_FDIR = BIT_ULL(16), + /* Other capability 16 is available */ VIRTCHNL2_CAP_RX_FLEX_DESC = BIT_ULL(17), VIRTCHNL2_CAP_PTYPE = BIT_ULL(18), VIRTCHNL2_CAP_LOOPBACK = BIT_ULL(19), /* Other capability 20 is reserved */ + VIRTCHNL2_CAP_FLOW_STEER = BIT_ULL(21), + VIRTCHNL2_CAP_LAN_MEMORY_REGIONS = BIT_ULL(22), /* this must be the last capability */ VIRTCHNL2_CAP_OEM = BIT_ULL(63), }; +/** + * enum virtchnl2_action_types - Available actions for sideband flow steering + * @VIRTCHNL2_ACTION_DROP: Drop the packet + * @VIRTCHNL2_ACTION_PASSTHRU: Forward the packet to the next classifier/stage + * @VIRTCHNL2_ACTION_QUEUE: Forward the packet to a receive queue + * @VIRTCHNL2_ACTION_Q_GROUP: Forward the packet to a receive queue group + * @VIRTCHNL2_ACTION_MARK: Mark the packet with specific marker value + * @VIRTCHNL2_ACTION_COUNT: Increment the corresponding counter + */ + +enum virtchnl2_action_types { + VIRTCHNL2_ACTION_DROP = BIT(0), + VIRTCHNL2_ACTION_PASSTHRU = BIT(1), + VIRTCHNL2_ACTION_QUEUE = BIT(2), + VIRTCHNL2_ACTION_Q_GROUP = BIT(3), + VIRTCHNL2_ACTION_MARK = BIT(4), + VIRTCHNL2_ACTION_COUNT = BIT(5), +}; + /* underlying device type */ enum virtchl2_device_type { VIRTCHNL2_MEV_DEVICE = 0, @@ -448,7 +486,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info); * @seg_caps: See enum virtchnl2_cap_seg. * @hsplit_caps: See enum virtchnl2_cap_rx_hsplit_at. * @rsc_caps: See enum virtchnl2_cap_rsc. - * @rss_caps: See enum virtchnl2_cap_rss. + * @rss_caps: See enum virtchnl2_flow_types. * @other_caps: See enum virtchnl2_cap_other. * @mailbox_dyn_ctl: DYN_CTL register offset and vector id for mailbox * provided by CP. @@ -473,6 +511,8 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info); * segment offload. * @max_hdr_buf_per_lso: Max number of header buffers that can be used for * an LSO. + * @num_rdma_allocated_vectors: Maximum number of allocated RDMA vectors for + * the device. * @pad1: Padding for future extensions. * * Dataplane driver sends this message to CP to negotiate capabilities and @@ -520,7 +560,8 @@ struct virtchnl2_get_capabilities { __le32 device_type; u8 min_sso_packet_len; u8 max_hdr_buf_per_lso; - u8 pad1[10]; + __le16 num_rdma_allocated_vectors; + u8 pad1[8]; }; VIRTCHNL2_CHECK_STRUCT_LEN(80, virtchnl2_get_capabilities); @@ -560,6 +601,23 @@ struct virtchnl2_queue_reg_chunks { VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks); /** + * enum virtchnl2_vport_flags - Vport flags that indicate vport capabilities. + * @VIRTCHNL2_VPORT_UPLINK_PORT: Representatives of underlying physical ports + * @VIRTCHNL2_VPORT_INLINE_FLOW_STEER: Inline flow steering enabled + * @VIRTCHNL2_VPORT_INLINE_FLOW_STEER_RXQ: Inline flow steering enabled + * with explicit Rx queue action + * @VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER: Sideband flow steering enabled + * @VIRTCHNL2_VPORT_ENABLE_RDMA: RDMA is enabled for this vport + */ +enum virtchnl2_vport_flags { + VIRTCHNL2_VPORT_UPLINK_PORT = BIT(0), + VIRTCHNL2_VPORT_INLINE_FLOW_STEER = BIT(1), + VIRTCHNL2_VPORT_INLINE_FLOW_STEER_RXQ = BIT(2), + VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER = BIT(3), + VIRTCHNL2_VPORT_ENABLE_RDMA = BIT(4), +}; + +/** * struct virtchnl2_create_vport - Create vport config info. * @vport_type: See enum virtchnl2_vport_type. * @txq_model: See virtchnl2_queue_model. @@ -577,10 +635,18 @@ VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks); * @max_mtu: Max MTU. CP populates this field on response. * @vport_id: Vport id. CP populates this field on response. * @default_mac_addr: Default MAC address. - * @pad: Padding. + * @vport_flags: See enum virtchnl2_vport_flags. * @rx_desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions. * @tx_desc_ids: See VIRTCHNL2_TX_DESC_IDS definitions. * @pad1: Padding. + * @inline_flow_caps: Bit mask of supported inline-flow-steering + * flow types (See enum virtchnl2_flow_types) + * @sideband_flow_caps: Bit mask of supported sideband-flow-steering + * flow types (See enum virtchnl2_flow_types) + * @sideband_flow_actions: Bit mask of supported action types + * for sideband flow steering (See enum virtchnl2_action_types) + * @flow_steer_max_rules: Max rules allowed for inline and sideband + * flow steering combined * @rss_algorithm: RSS algorithm. * @rss_key_size: RSS key size. * @rss_lut_size: RSS LUT size. @@ -610,10 +676,14 @@ struct virtchnl2_create_vport { __le16 max_mtu; __le32 vport_id; u8 default_mac_addr[ETH_ALEN]; - __le16 pad; + __le16 vport_flags; __le64 rx_desc_ids; __le64 tx_desc_ids; - u8 pad1[72]; + u8 pad1[48]; + __le64 inline_flow_caps; + __le64 sideband_flow_caps; + __le32 sideband_flow_actions; + __le32 flow_steer_max_rules; __le32 rss_algorithm; __le16 rss_key_size; __le16 rss_lut_size; @@ -1270,4 +1340,474 @@ struct virtchnl2_promisc_info { }; VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info); +/** + * enum virtchnl2_ptp_caps - PTP capabilities + * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME: direct access to get the time of + * device clock + * @VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB: mailbox access to get the time of + * device clock + * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME: direct access to cross timestamp + * @VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB: mailbox access to cross timestamp + * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME: direct access to set the time of + * device clock + * @VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB: mailbox access to set the time of + * device clock + * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK: direct access to adjust the time of device + * clock + * @VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB: mailbox access to adjust the time of + * device clock + * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS: direct access to the Tx timestamping + * @VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB: mailbox access to the Tx timestamping + * + * PF/VF negotiates a set of supported PTP capabilities with the Control Plane. + * There are two access methods - mailbox (_MB) and direct. + * PTP capabilities enables Main Timer operations: get/set/adjust Main Timer, + * cross timestamping and the Tx timestamping. + */ +enum virtchnl2_ptp_caps { + VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME = BIT(0), + VIRTCHNL2_CAP_PTP_GET_DEVICE_CLK_TIME_MB = BIT(1), + VIRTCHNL2_CAP_PTP_GET_CROSS_TIME = BIT(2), + VIRTCHNL2_CAP_PTP_GET_CROSS_TIME_MB = BIT(3), + VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME = BIT(4), + VIRTCHNL2_CAP_PTP_SET_DEVICE_CLK_TIME_MB = BIT(5), + VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK = BIT(6), + VIRTCHNL2_CAP_PTP_ADJ_DEVICE_CLK_MB = BIT(7), + VIRTCHNL2_CAP_PTP_TX_TSTAMPS = BIT(8), + VIRTCHNL2_CAP_PTP_TX_TSTAMPS_MB = BIT(9), +}; + +/** + * struct virtchnl2_ptp_clk_reg_offsets - Offsets of device and PHY clocks + * registers. + * @dev_clk_ns_l: Device clock low register offset + * @dev_clk_ns_h: Device clock high register offset + * @phy_clk_ns_l: PHY clock low register offset + * @phy_clk_ns_h: PHY clock high register offset + * @cmd_sync_trigger: The command sync trigger register offset + * @pad: Padding for future extensions + */ +struct virtchnl2_ptp_clk_reg_offsets { + __le32 dev_clk_ns_l; + __le32 dev_clk_ns_h; + __le32 phy_clk_ns_l; + __le32 phy_clk_ns_h; + __le32 cmd_sync_trigger; + u8 pad[4]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_ptp_clk_reg_offsets); + +/** + * struct virtchnl2_ptp_cross_time_reg_offsets - Offsets of the device cross + * time registers. + * @sys_time_ns_l: System time low register offset + * @sys_time_ns_h: System time high register offset + * @cmd_sync_trigger: The command sync trigger register offset + * @pad: Padding for future extensions + */ +struct virtchnl2_ptp_cross_time_reg_offsets { + __le32 sys_time_ns_l; + __le32 sys_time_ns_h; + __le32 cmd_sync_trigger; + u8 pad[4]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_cross_time_reg_offsets); + +/** + * struct virtchnl2_ptp_clk_adj_reg_offsets - Offsets of device and PHY clocks + * adjustments registers. + * @dev_clk_cmd_type: Device clock command type register offset + * @dev_clk_incval_l: Device clock increment value low register offset + * @dev_clk_incval_h: Device clock increment value high registers offset + * @dev_clk_shadj_l: Device clock shadow adjust low register offset + * @dev_clk_shadj_h: Device clock shadow adjust high register offset + * @phy_clk_cmd_type: PHY timer command type register offset + * @phy_clk_incval_l: PHY timer increment value low register offset + * @phy_clk_incval_h: PHY timer increment value high register offset + * @phy_clk_shadj_l: PHY timer shadow adjust low register offset + * @phy_clk_shadj_h: PHY timer shadow adjust high register offset + */ +struct virtchnl2_ptp_clk_adj_reg_offsets { + __le32 dev_clk_cmd_type; + __le32 dev_clk_incval_l; + __le32 dev_clk_incval_h; + __le32 dev_clk_shadj_l; + __le32 dev_clk_shadj_h; + __le32 phy_clk_cmd_type; + __le32 phy_clk_incval_l; + __le32 phy_clk_incval_h; + __le32 phy_clk_shadj_l; + __le32 phy_clk_shadj_h; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(40, virtchnl2_ptp_clk_adj_reg_offsets); + +/** + * struct virtchnl2_ptp_tx_tstamp_latch_caps - PTP Tx timestamp latch + * capabilities. + * @tx_latch_reg_offset_l: Tx timestamp latch low register offset + * @tx_latch_reg_offset_h: Tx timestamp latch high register offset + * @index: Latch index provided to the Tx descriptor + * @pad: Padding for future extensions + */ +struct virtchnl2_ptp_tx_tstamp_latch_caps { + __le32 tx_latch_reg_offset_l; + __le32 tx_latch_reg_offset_h; + u8 index; + u8 pad[7]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch_caps); + +/** + * struct virtchnl2_ptp_get_vport_tx_tstamp_caps - Structure that defines Tx + * tstamp entries. + * @vport_id: Vport number + * @num_latches: Total number of latches + * @tstamp_ns_lo_bit: First bit for nanosecond part of the timestamp + * @tstamp_ns_hi_bit: Last bit for nanosecond part of the timestamp + * @pad: Padding for future tstamp granularity extensions + * @tstamp_latches: Capabilities of Tx timestamp entries + * + * PF/VF sends this message to negotiate the Tx timestamp latches for each + * Vport. + * + * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP_CAPS. + */ +struct virtchnl2_ptp_get_vport_tx_tstamp_caps { + __le32 vport_id; + __le16 num_latches; + u8 tstamp_ns_lo_bit; + u8 tstamp_ns_hi_bit; + u8 pad[8]; + + struct virtchnl2_ptp_tx_tstamp_latch_caps tstamp_latches[] + __counted_by_le(num_latches); +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_caps); + +/** + * struct virtchnl2_ptp_get_caps - Get PTP capabilities + * @caps: PTP capability bitmap. See enum virtchnl2_ptp_caps + * @max_adj: The maximum possible frequency adjustment + * @base_incval: The default timer increment value + * @peer_mbx_q_id: ID of the PTP Device Control daemon queue + * @peer_id: Peer ID for PTP Device Control daemon + * @secondary_mbx: Indicates to the driver that it should create a secondary + * mailbox to inetract with control plane for PTP + * @pad: Padding for future extensions + * @clk_offsets: Main timer and PHY registers offsets + * @cross_time_offsets: Cross time registers offsets + * @clk_adj_offsets: Offsets needed to adjust the PHY and the main timer + * + * PF/VF sends this message to negotiate PTP capabilities. CP updates bitmap + * with supported features and fulfills appropriate structures. + * If HW uses primary MBX for PTP: secondary_mbx is set to false. + * If HW uses secondary MBX for PTP: secondary_mbx is set to true. + * Control plane has 2 MBX and the driver has 1 MBX, send to peer + * driver may be used to send a message using valid ptp_peer_mb_q_id and + * ptp_peer_id. + * If HW does not use send to peer driver: secondary_mbx is no care field and + * peer_mbx_q_id holds invalid value (0xFFFF). + * + * Associated with VIRTCHNL2_OP_PTP_GET_CAPS. + */ +struct virtchnl2_ptp_get_caps { + __le32 caps; + __le32 max_adj; + __le64 base_incval; + __le16 peer_mbx_q_id; + u8 peer_id; + u8 secondary_mbx; + u8 pad[4]; + + struct virtchnl2_ptp_clk_reg_offsets clk_offsets; + struct virtchnl2_ptp_cross_time_reg_offsets cross_time_offsets; + struct virtchnl2_ptp_clk_adj_reg_offsets clk_adj_offsets; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(104, virtchnl2_ptp_get_caps); + +/** + * struct virtchnl2_ptp_tx_tstamp_latch - Structure that describes tx tstamp + * values, index and validity. + * @tstamp: Timestamp value + * @index: Timestamp index from which the value is read + * @valid: Timestamp validity + * @pad: Padding for future extensions + */ +struct virtchnl2_ptp_tx_tstamp_latch { + __le64 tstamp; + u8 index; + u8 valid; + u8 pad[6]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_tx_tstamp_latch); + +/** + * struct virtchnl2_ptp_get_vport_tx_tstamp_latches - Tx timestamp latches + * associated with the vport. + * @vport_id: Number of vport that requests the timestamp + * @num_latches: Number of latches + * @get_devtime_with_txtstmp: Flag to request device time along with Tx timestamp + * @pad: Padding for future extensions + * @device_time: device time if get_devtime_with_txtstmp was set in request + * @tstamp_latches: PTP TX timestamp latch + * + * PF/VF sends this message to receive a specified number of timestamps + * entries. + * + * Associated with VIRTCHNL2_OP_PTP_GET_VPORT_TX_TSTAMP. + */ +struct virtchnl2_ptp_get_vport_tx_tstamp_latches { + __le32 vport_id; + __le16 num_latches; + u8 get_devtime_with_txtstmp; + u8 pad[1]; + __le64 device_time; + + struct virtchnl2_ptp_tx_tstamp_latch tstamp_latches[] + __counted_by_le(num_latches); +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_vport_tx_tstamp_latches); + +/** + * struct virtchnl2_ptp_get_dev_clk_time - Associated with message + * VIRTCHNL2_OP_PTP_GET_DEV_CLK_TIME. + * @dev_time_ns: Device clock time value in nanoseconds + * + * PF/VF sends this message to receive the time from the main timer. + */ +struct virtchnl2_ptp_get_dev_clk_time { + __le64 dev_time_ns; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_get_dev_clk_time); + +/** + * struct virtchnl2_ptp_get_cross_time: Associated with message + * VIRTCHNL2_OP_PTP_GET_CROSS_TIME. + * @sys_time_ns: System counter value expressed in nanoseconds, read + * synchronously with device time + * @dev_time_ns: Device clock time value expressed in nanoseconds + * + * PF/VF sends this message to receive the cross time. + */ +struct virtchnl2_ptp_get_cross_time { + __le64 sys_time_ns; + __le64 dev_time_ns; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_ptp_get_cross_time); + +/** + * struct virtchnl2_ptp_set_dev_clk_time: Associated with message + * VIRTCHNL2_OP_PTP_SET_DEV_CLK_TIME. + * @dev_time_ns: Device time value expressed in nanoseconds to set + * + * PF/VF sends this message to set the time of the main timer. + */ +struct virtchnl2_ptp_set_dev_clk_time { + __le64 dev_time_ns; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_set_dev_clk_time); + +/** + * struct virtchnl2_ptp_adj_dev_clk_fine: Associated with message + * VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_FINE. + * @incval: Source timer increment value per clock cycle + * + * PF/VF sends this message to adjust the frequency of the main timer by the + * indicated increment value. + */ +struct virtchnl2_ptp_adj_dev_clk_fine { + __le64 incval; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_fine); + +/** + * struct virtchnl2_ptp_adj_dev_clk_time: Associated with message + * VIRTCHNL2_OP_PTP_ADJ_DEV_CLK_TIME. + * @delta: Offset in nanoseconds to adjust the time by + * + * PF/VF sends this message to adjust the time of the main timer by the delta. + */ +struct virtchnl2_ptp_adj_dev_clk_time { + __le64 delta; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_ptp_adj_dev_clk_time); + +/** + * struct virtchnl2_mem_region - MMIO memory region + * @start_offset: starting offset of the MMIO memory region + * @size: size of the MMIO memory region + */ +struct virtchnl2_mem_region { + __le64 start_offset; + __le64 size; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_mem_region); + +/** + * struct virtchnl2_get_lan_memory_regions - List of LAN MMIO memory regions + * @num_memory_regions: number of memory regions + * @pad: Padding + * @mem_reg: List with memory region info + * + * PF/VF sends this message to learn what LAN MMIO memory regions it should map. + */ +struct virtchnl2_get_lan_memory_regions { + __le16 num_memory_regions; + u8 pad[6]; + struct virtchnl2_mem_region mem_reg[]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_get_lan_memory_regions); + +#define VIRTCHNL2_MAX_NUM_PROTO_HDRS 4 +#define VIRTCHNL2_MAX_SIZE_RAW_PACKET 256 +#define VIRTCHNL2_MAX_NUM_ACTIONS 8 + +/** + * struct virtchnl2_proto_hdr - represent one protocol header + * @hdr_type: See enum virtchnl2_proto_hdr_type + * @pad: padding + * @buffer_spec: binary buffer based on header type. + * @buffer_mask: mask applied on buffer_spec. + * + * Structure to hold protocol headers based on hdr_type + */ +struct virtchnl2_proto_hdr { + __le32 hdr_type; + u8 pad[4]; + u8 buffer_spec[64]; + u8 buffer_mask[64]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(136, virtchnl2_proto_hdr); + +/** + * struct virtchnl2_proto_hdrs - struct to represent match criteria + * @tunnel_level: specify where protocol header(s) start from. + * must be 0 when sending a raw packet request. + * 0 - from the outer layer + * 1 - from the first inner layer + * 2 - from the second inner layer + * @pad: Padding bytes + * @count: total number of protocol headers in proto_hdr. 0 for raw packet. + * @proto_hdr: Array of protocol headers + * @raw: struct holding raw packet buffer when count is 0 + */ +struct virtchnl2_proto_hdrs { + u8 tunnel_level; + u8 pad[3]; + __le32 count; + union { + struct virtchnl2_proto_hdr + proto_hdr[VIRTCHNL2_MAX_NUM_PROTO_HDRS]; + struct { + __le16 pkt_len; + u8 spec[VIRTCHNL2_MAX_SIZE_RAW_PACKET]; + u8 mask[VIRTCHNL2_MAX_SIZE_RAW_PACKET]; + } raw; + }; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(552, virtchnl2_proto_hdrs); + +/** + * struct virtchnl2_rule_action - struct representing single action for a flow + * @action_type: see enum virtchnl2_action_types + * @act_conf: union representing action depending on action_type. + * @act_conf.q_id: queue id to redirect the packets to. + * @act_conf.q_grp_id: queue group id to redirect the packets to. + * @act_conf.ctr_id: used for count action. If input value 0xFFFFFFFF control + * plane assigns a new counter and returns the counter ID to + * the driver. If input value is not 0xFFFFFFFF then it must + * be an existing counter given to the driver for an earlier + * flow. Then this flow will share the counter. + * @act_conf.mark_id: Value used to mark the packets. Used for mark action. + * @act_conf.reserved: Reserved for future use. + */ +struct virtchnl2_rule_action { + __le32 action_type; + union { + __le32 q_id; + __le32 q_grp_id; + __le32 ctr_id; + __le32 mark_id; + u8 reserved[8]; + } act_conf; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(12, virtchnl2_rule_action); + +/** + * struct virtchnl2_rule_action_set - struct representing multiple actions + * @count: number of valid actions in the action set of a rule + * @actions: array of struct virtchnl2_rule_action + */ +struct virtchnl2_rule_action_set { + /* action count must be less than VIRTCHNL2_MAX_NUM_ACTIONS */ + __le32 count; + struct virtchnl2_rule_action actions[VIRTCHNL2_MAX_NUM_ACTIONS]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(100, virtchnl2_rule_action_set); + +/** + * struct virtchnl2_flow_rule - represent one flow steering rule + * @proto_hdrs: array of protocol header buffers representing match criteria + * @action_set: series of actions to be applied for given rule + * @priority: rule priority. + * @pad: padding for future extensions. + */ +struct virtchnl2_flow_rule { + struct virtchnl2_proto_hdrs proto_hdrs; + struct virtchnl2_rule_action_set action_set; + __le32 priority; + u8 pad[8]; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(664, virtchnl2_flow_rule); + +enum virtchnl2_flow_rule_status { + VIRTCHNL2_FLOW_RULE_SUCCESS = 1, + VIRTCHNL2_FLOW_RULE_NORESOURCE = 2, + VIRTCHNL2_FLOW_RULE_EXIST = 3, + VIRTCHNL2_FLOW_RULE_TIMEOUT = 4, + VIRTCHNL2_FLOW_RULE_FLOW_TYPE_NOT_SUPPORTED = 5, + VIRTCHNL2_FLOW_RULE_MATCH_KEY_NOT_SUPPORTED = 6, + VIRTCHNL2_FLOW_RULE_ACTION_NOT_SUPPORTED = 7, + VIRTCHNL2_FLOW_RULE_ACTION_COMBINATION_INVALID = 8, + VIRTCHNL2_FLOW_RULE_ACTION_DATA_INVALID = 9, + VIRTCHNL2_FLOW_RULE_NOT_ADDED = 10, +}; + +/** + * struct virtchnl2_flow_rule_info: structure representing single flow rule + * @rule_id: rule_id associated with the flow_rule. + * @rule_cfg: structure representing rule. + * @status: status of rule programming. See enum virtchnl2_flow_rule_status. + */ +struct virtchnl2_flow_rule_info { + __le32 rule_id; + struct virtchnl2_flow_rule rule_cfg; + __le32 status; +}; +VIRTCHNL2_CHECK_STRUCT_LEN(672, virtchnl2_flow_rule_info); + +/** + * struct virtchnl2_flow_rule_add_del - add/delete a flow steering rule + * @vport_id: vport id for which the rule is to be added or deleted. + * @count: Indicates number of rules to be added or deleted. + * @rule_info: Array of flow rules to be added or deleted. + * + * For VIRTCHNL2_OP_FLOW_RULE_ADD, rule_info contains list of rules to be + * added. If rule_id is 0xFFFFFFFF, then the rule is programmed and not cached. + * + * For VIRTCHNL2_OP_FLOW_RULE_DEL, there are two possibilities. The structure + * can contain either array of rule_ids or array of match keys to be deleted. + * When match keys are used the corresponding rule_ids must be 0xFFFFFFFF. + * + * status member of each rule indicates the result. Maximum of 6 rules can be + * added or deleted using this method. Driver has to retry in case of any + * failure of ADD or DEL opcode. CP doesn't retry in case of failure. + */ +struct virtchnl2_flow_rule_add_del { + __le32 vport_id; + __le32 count; + struct virtchnl2_flow_rule_info rule_info[] __counted_by_le(count); +}; +VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_flow_rule_add_del); + #endif /* _VIRTCHNL_2_H_ */ |