summaryrefslogtreecommitdiff
path: root/include/rdma
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 14:54:53 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 14:54:53 -0800
commitad0835a93008e5901415a0a27847d6a27649aa3a (patch)
treee48be396ebfbb4f1fb02e7ca76461bdb1427490d /include/rdma
parent22714a2ba4b55737cd7d5299db7aaf1fa8287354 (diff)
parent4190b4e96954e2c3597021d29435c3f8db8d3129 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull rdma updates from Doug Ledford: "This is a fairly plain pull request. Lots of driver updates across the stack, a huge number of static analysis cleanups including a close to 50 patch series from Bart Van Assche, and a number of new features inside the stack such as general CQ moderation support. Nothing really stands out, but there might be a few conflicts as you take things in. In particular, the cleanups touched some of the same lines as the new timer_setup changes. Everything in this pull request has been through 0day and at least two days of linux-next (since Stephen doesn't necessarily flag new errors/warnings until day2). A few more items (about 30 patches) from Intel and Mellanox showed up on the list on Tuesday. I've excluded those from this pull request, and I'm sure some of them qualify as fixes suitable to send any time, but I still have to review them fully. If they contain mostly fixes and little or no new development, then I will probably send them through by the end of the week just to get them out of the way. There was a break in my acceptance of patches which coincides with the computer problems I had, and then when I got things mostly back under control I had a backlog of patches to process, which I did mostly last Friday and Monday. So there is a larger number of patches processed in that timeframe than I was striving for. Summary: - Add iWARP support to qedr driver - Lots of misc fixes across subsystem - Multiple update series to hns roce driver - Multiple update series to hfi1 driver - Updates to vnic driver - Add kref to wait struct in cxgb4 driver - Updates to i40iw driver - Mellanox shared pull request - timer_setup changes - massive cleanup series from Bart Van Assche - Two series of SRP/SRPT changes from Bart Van Assche - Core updates from Mellanox - i40iw updates - IPoIB updates - mlx5 updates - mlx4 updates - hns updates - bnxt_re fixes - PCI write padding support - Sparse/Smatch/warning cleanups/fixes - CQ moderation support - SRQ support in vmw_pvrdma" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (296 commits) RDMA/core: Rename kernel modify_cq to better describe its usage IB/mlx5: Add CQ moderation capability to query_device IB/mlx4: Add CQ moderation capability to query_device IB/uverbs: Add CQ moderation capability to query_device IB/mlx5: Exposing modify CQ callback to uverbs layer IB/mlx4: Exposing modify CQ callback to uverbs layer IB/uverbs: Allow CQ moderation with modify CQ iw_cxgb4: atomically flush the qp iw_cxgb4: only call the cq comp_handler when the cq is armed iw_cxgb4: Fix possible circular dependency locking warning RDMA/bnxt_re: report vlan_id and sl in qp1 recv completion IB/core: Only maintain real QPs in the security lists IB/ocrdma_hw: remove unnecessary code in ocrdma_mbx_dealloc_lkey RDMA/core: Make function rdma_copy_addr return void RDMA/vmw_pvrdma: Add shared receive queue support RDMA/core: avoid uninitialized variable warning in create_udata RDMA/bnxt_re: synchronize poll_cq and req_notify_cq verbs RDMA/bnxt_re: Flush CQ notification Work Queue before destroying QP RDMA/bnxt_re: Set QP state in case of response completion errors RDMA/bnxt_re: Add memory barriers when processing CQ/EQ entries ...
Diffstat (limited to 'include/rdma')
-rw-r--r--include/rdma/ib_addr.h16
-rw-r--r--include/rdma/ib_pack.h19
-rw-r--r--include/rdma/ib_sa.h12
-rw-r--r--include/rdma/ib_umem_odp.h4
-rw-r--r--include/rdma/ib_verbs.h35
-rw-r--r--include/rdma/opa_addr.h6
-rw-r--r--include/rdma/rdmavt_qp.h6
7 files changed, 63 insertions, 35 deletions
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index ec5008cf5d51..18c564f60e93 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -125,8 +125,9 @@ int rdma_resolve_ip_route(struct sockaddr *src_addr,
void rdma_addr_cancel(struct rdma_dev_addr *addr);
-int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
- const unsigned char *dst_dev_addr);
+void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
+ const struct net_device *dev,
+ const unsigned char *dst_dev_addr);
int rdma_addr_size(struct sockaddr *addr);
@@ -245,10 +246,11 @@ static inline void rdma_addr_set_dgid(struct rdma_dev_addr *dev_addr, union ib_g
static inline enum ib_mtu iboe_get_mtu(int mtu)
{
/*
- * reduce IB headers from effective IBoE MTU. 28 stands for
- * atomic header which is the biggest possible header after BTH
+ * Reduce IB headers from effective IBoE MTU.
*/
- mtu = mtu - IB_GRH_BYTES - IB_BTH_BYTES - 28;
+ mtu = mtu - (IB_GRH_BYTES + IB_UDP_BYTES + IB_BTH_BYTES +
+ IB_EXT_XRC_BYTES + IB_EXT_ATOMICETH_BYTES +
+ IB_ICRC_BYTES);
if (mtu >= ib_mtu_enum_to_int(IB_MTU_4096))
return IB_MTU_4096;
@@ -305,12 +307,12 @@ static inline void rdma_get_ll_mac(struct in6_addr *addr, u8 *mac)
static inline int rdma_is_multicast_addr(struct in6_addr *addr)
{
- u32 ipv4_addr;
+ __be32 ipv4_addr;
if (addr->s6_addr[0] == 0xff)
return 1;
- memcpy(&ipv4_addr, addr->s6_addr + 12, 4);
+ ipv4_addr = addr->s6_addr32[3];
return (ipv6_addr_v4mapped(addr) && ipv4_is_multicast(ipv4_addr));
}
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index 36655899ee02..7ea1382ad0e5 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -37,14 +37,17 @@
#include <uapi/linux/if_ether.h>
enum {
- IB_LRH_BYTES = 8,
- IB_ETH_BYTES = 14,
- IB_VLAN_BYTES = 4,
- IB_GRH_BYTES = 40,
- IB_IP4_BYTES = 20,
- IB_UDP_BYTES = 8,
- IB_BTH_BYTES = 12,
- IB_DETH_BYTES = 8
+ IB_LRH_BYTES = 8,
+ IB_ETH_BYTES = 14,
+ IB_VLAN_BYTES = 4,
+ IB_GRH_BYTES = 40,
+ IB_IP4_BYTES = 20,
+ IB_UDP_BYTES = 8,
+ IB_BTH_BYTES = 12,
+ IB_DETH_BYTES = 8,
+ IB_EXT_ATOMICETH_BYTES = 28,
+ IB_EXT_XRC_BYTES = 4,
+ IB_ICRC_BYTES = 4
};
struct ib_field {
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index 355b81f4242d..1f7f604db5aa 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -590,20 +590,20 @@ static inline bool sa_path_is_roce(struct sa_path_rec *rec)
(rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2));
}
-static inline void sa_path_set_slid(struct sa_path_rec *rec, __be32 slid)
+static inline void sa_path_set_slid(struct sa_path_rec *rec, u32 slid)
{
if (rec->rec_type == SA_PATH_REC_TYPE_IB)
- rec->ib.slid = htons(ntohl(slid));
+ rec->ib.slid = cpu_to_be16(slid);
else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
- rec->opa.slid = slid;
+ rec->opa.slid = cpu_to_be32(slid);
}
-static inline void sa_path_set_dlid(struct sa_path_rec *rec, __be32 dlid)
+static inline void sa_path_set_dlid(struct sa_path_rec *rec, u32 dlid)
{
if (rec->rec_type == SA_PATH_REC_TYPE_IB)
- rec->ib.dlid = htons(ntohl(dlid));
+ rec->ib.dlid = cpu_to_be16(dlid);
else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
- rec->opa.dlid = dlid;
+ rec->opa.dlid = cpu_to_be32(dlid);
}
static inline void sa_path_set_raw_traffic(struct sa_path_rec *rec,
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 5eb7f5bc8248..6a17f856f841 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -111,10 +111,6 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt,
void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset,
u64 bound);
-void rbt_ib_umem_insert(struct umem_odp_node *node,
- struct rb_root_cached *root);
-void rbt_ib_umem_remove(struct umem_odp_node *node,
- struct rb_root_cached *root);
typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end,
void *cookie);
/*
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index e8608b2dc844..fd84cda5ed7c 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -229,6 +229,8 @@ enum ib_device_cap_flags {
/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
+ /* The device supports padding incoming writes to cacheline. */
+ IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
};
enum ib_signature_prot_cap {
@@ -309,6 +311,15 @@ struct ib_cq_init_attr {
u32 flags;
};
+enum ib_cq_attr_mask {
+ IB_CQ_MODERATE = 1 << 0,
+};
+
+struct ib_cq_caps {
+ u16 max_cq_moderation_count;
+ u16 max_cq_moderation_period;
+};
+
struct ib_device_attr {
u64 fw_ver;
__be64 sys_image_guid;
@@ -359,6 +370,7 @@ struct ib_device_attr {
u32 max_wq_type_rq;
u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */
struct ib_tm_caps tm_caps;
+ struct ib_cq_caps cq_caps;
};
enum ib_mtu {
@@ -1098,6 +1110,7 @@ enum ib_qp_create_flags {
IB_QP_CREATE_SCATTER_FCS = 1 << 8,
IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
IB_QP_CREATE_SOURCE_QPN = 1 << 10,
+ IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11,
/* reserve bits 26-31 for low level drivers' internal use */
IB_QP_CREATE_RESERVED_START = 1 << 26,
IB_QP_CREATE_RESERVED_END = 1 << 31,
@@ -1621,6 +1634,7 @@ enum ib_wq_flags {
IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
IB_WQ_FLAGS_DELAY_DROP = 1 << 2,
+ IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
};
struct ib_wq_init_attr {
@@ -2858,6 +2872,21 @@ void ib_dealloc_pd(struct ib_pd *pd);
struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
/**
+ * rdma_create_user_ah - Creates an address handle for the given address vector.
+ * It resolves destination mac address for ah attribute of RoCE type.
+ * @pd: The protection domain associated with the address handle.
+ * @ah_attr: The attributes of the address vector.
+ * @udata: pointer to user's input output buffer information need by
+ * provider driver.
+ *
+ * It returns 0 on success and returns appropriate error code on error.
+ * The address handle is used to reference a local or global destination
+ * in all UD QP post sends.
+ */
+struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
+ struct rdma_ah_attr *ah_attr,
+ struct ib_udata *udata);
+/**
* ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
* work completion.
* @hdr: the L3 header to parse
@@ -3140,13 +3169,13 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
int ib_resize_cq(struct ib_cq *cq, int cqe);
/**
- * ib_modify_cq - Modifies moderation params of the CQ
+ * rdma_set_cq_moderation - Modifies moderation params of the CQ
* @cq: The CQ to modify.
* @cq_count: number of CQEs that will trigger an event
* @cq_period: max period of time in usec before triggering an event
*
*/
-int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
+int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
/**
* ib_destroy_cq - Destroys the specified CQ.
@@ -3607,8 +3636,6 @@ void ib_drain_rq(struct ib_qp *qp);
void ib_drain_sq(struct ib_qp *qp);
void ib_drain_qp(struct ib_qp *qp);
-int ib_resolve_eth_dmac(struct ib_device *device,
- struct rdma_ah_attr *ah_attr);
int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
diff --git a/include/rdma/opa_addr.h b/include/rdma/opa_addr.h
index e6e90f18e6d5..f68fca296631 100644
--- a/include/rdma/opa_addr.h
+++ b/include/rdma/opa_addr.h
@@ -97,15 +97,15 @@ static inline u32 opa_get_lid_from_gid(const union ib_gid *gid)
* @dlid: The DLID
* @slid: The SLID
*/
-static inline bool opa_is_extended_lid(u32 dlid, u32 slid)
+static inline bool opa_is_extended_lid(__be32 dlid, __be32 slid)
{
if ((be32_to_cpu(dlid) >=
be16_to_cpu(IB_MULTICAST_LID_BASE)) ||
(be32_to_cpu(slid) >=
be16_to_cpu(IB_MULTICAST_LID_BASE)))
return true;
- else
- return false;
+
+ return false;
}
/* Get multicast lid base */
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 0eed3d8752fa..89ab88c342b6 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -282,7 +282,6 @@ struct rvt_qp {
u32 remote_qpn;
u32 qkey; /* QKEY for this QP (for UD or RD) */
u32 s_size; /* send work queue size */
- u32 s_ahgpsn; /* set to the psn in the copy of the header */
u16 pmtu; /* decoded from path_mtu */
u8 log_pmtu; /* shift for pmtu */
@@ -344,7 +343,6 @@ struct rvt_qp {
struct rvt_swqe *s_wqe;
struct rvt_sge_state s_sge; /* current send request data */
struct rvt_mregion *s_rdma_mr;
- u32 s_cur_size; /* size of send packet in bytes */
u32 s_len; /* total length of s_sge */
u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
u32 s_last_psn; /* last response PSN processed */
@@ -358,8 +356,10 @@ struct rvt_qp {
u32 s_acked; /* last un-ACK'ed entry */
u32 s_last; /* last completed entry */
u32 s_lsn; /* limit sequence number (credit) */
- u16 s_hdrwords; /* size of s_hdr in 32 bit words */
+ u32 s_ahgpsn; /* set to the psn in the copy of the header */
+ u16 s_cur_size; /* size of send packet in bytes */
u16 s_rdma_ack_cnt;
+ u8 s_hdrwords; /* size of s_hdr in 32 bit words */
s8 s_ahgidx;
u8 s_state; /* opcode of last packet sent */
u8 s_ack_state; /* opcode of packet to ACK */