summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/qlogic/qed/qed_sriov.h
diff options
context:
space:
mode:
authorYuval Mintz <Yuval.Mintz@qlogic.com>2016-05-11 16:36:13 +0300
committerDavid S. Miller <davem@davemloft.net>2016-05-12 00:04:07 -0400
commit37bff2b9c6addf6216c8d04e95be596678e8deff (patch)
tree1d2488426fee5eab5a30fc4683e359c6e9f2c07b /drivers/net/ethernet/qlogic/qed/qed_sriov.h
parent32a47e72c9eb17e3b1bb507184e788b10d69ad4b (diff)
qed: Add VF->PF channel infrastructure
Communication between VF and PF is based on a dedicated HW channel; VF will prepare a messge, and by signaling the HW the PF would get a notification of that message existance. The PF would then copy the message, process it and DMA an answer back to the VF as a response. The messages themselves are TLV-based - allowing easier backward/forward compatibility. This patch adds the infrastructure of the channel on the PF side - starting with the arrival of the notification and ending with DMAing the response back to the VF. It also adds a dummy-response as reference, as it only lays the groundwork of the communication; it doesn't really add support of any actual messages. Signed-off-by: Yuval Mintz <Yuval.Mintz@qlogic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed/qed_sriov.h')
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h53
1 files changed, 53 insertions, 0 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index 0ac561916e2c..112216812a12 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -50,6 +50,14 @@ struct qed_iov_vf_mbx {
dma_addr_t req_phys;
union pfvf_tlvs *reply_virt;
dma_addr_t reply_phys;
+
+ /* Address in VF where a pending message is located */
+ dma_addr_t pending_req;
+
+ u8 *offset;
+
+ /* saved VF request header */
+ struct vfpf_first_tlv first_tlv;
};
enum vf_state {
@@ -96,6 +104,14 @@ struct qed_pf_iov {
u32 bulletins_size;
};
+enum qed_iov_wq_flag {
+ QED_IOV_WQ_MSG_FLAG,
+ QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
+ QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
+ QED_IOV_WQ_STOP_WQ_FLAG,
+ QED_IOV_WQ_FLR_FLAG,
+};
+
#ifdef CONFIG_QED_SRIOV
/**
* @brief - Given a VF index, return index of next [including that] active VF.
@@ -147,6 +163,22 @@ void qed_iov_free(struct qed_hwfn *p_hwfn);
* @param cdev
*/
void qed_iov_free_hw_info(struct qed_dev *cdev);
+
+/**
+ * @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe.
+ *
+ * @param p_hwfn
+ * @param opcode
+ * @param echo
+ * @param data
+ */
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode, __le16 echo, union event_ring_data *data);
+
+void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
+int qed_iov_wq_start(struct qed_dev *cdev);
+
+void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
#else
static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
u16 rel_vf_id)
@@ -175,6 +207,27 @@ static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
{
}
+
+static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo, union event_ring_data *data)
+{
+ return -EINVAL;
+}
+
+static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
+{
+}
+
+static inline int qed_iov_wq_start(struct qed_dev *cdev)
+{
+ return 0;
+}
+
+static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
+ enum qed_iov_wq_flag flag)
+{
+}
#endif
#define qed_for_each_vf(_p_hwfn, _i) \