summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDoug Ledford <dledford@redhat.com>2019-02-13 09:35:39 -0500
committerDoug Ledford <dledford@redhat.com>2019-02-13 09:35:39 -0500
commitd892273bb5b6fe2a3b5d2d147153e35b447e9041 (patch)
treea7911a3a09851d252a24f5dae6b69141939714e3 /include
parent82771f20338fcdeb0a38319c7a72f1f36df39a7a (diff)
parenta87145957eb9c474559b3acd2cfc6e8914b0e08f (diff)
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma into for-next
I had merged the hfi1-tid code into my local copy of for-next, but was waiting on 0day testing before pushing it (I pushed it to my wip branch). Having waited several days for 0day testing to show up, I'm finally just going to push it out. In the meantime, though, Jason pushed other stuff to for-next, so I needed to merge up the branches before pushing. Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/scatterlist.h49
-rw-r--r--include/rdma/ib_mad.h5
-rw-r--r--include/rdma/ib_verbs.h55
-rw-r--r--include/rdma/iw_cm.h3
-rw-r--r--include/rdma/rdma_cm.h1
-rw-r--r--include/uapi/rdma/rdma_user_cm.h4
6 files changed, 86 insertions, 31 deletions
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index b96f0d0b5b8f..b4be960c7e5d 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -339,12 +339,12 @@ int sg_alloc_table_chained(struct sg_table *table, int nents,
/*
* sg page iterator
*
- * Iterates over sg entries page-by-page. On each successful iteration,
- * you can call sg_page_iter_page(@piter) and sg_page_iter_dma_address(@piter)
- * to get the current page and its dma address. @piter->sg will point to the
- * sg holding this page and @piter->sg_pgoffset to the page's page offset
- * within the sg. The iteration will stop either when a maximum number of sg
- * entries was reached or a terminating sg (sg_last(sg) == true) was reached.
+ * Iterates over sg entries page-by-page. On each successful iteration, you
+ * can call sg_page_iter_page(@piter) to get the current page and its dma
+ * address. @piter->sg will point to the sg holding this page and
+ * @piter->sg_pgoffset to the page's page offset within the sg. The iteration
+ * will stop either when a maximum number of sg entries was reached or a
+ * terminating sg (sg_last(sg) == true) was reached.
*/
struct sg_page_iter {
struct scatterlist *sg; /* sg holding the page */
@@ -356,7 +356,19 @@ struct sg_page_iter {
* next step */
};
+/*
+ * sg page iterator for DMA addresses
+ *
+ * This is the same as sg_page_iter however you can call
+ * sg_page_iter_dma_address(@dma_iter) to get the page's DMA
+ * address. sg_page_iter_page() cannot be called on this iterator.
+ */
+struct sg_dma_page_iter {
+ struct sg_page_iter base;
+};
+
bool __sg_page_iter_next(struct sg_page_iter *piter);
+bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter);
void __sg_page_iter_start(struct sg_page_iter *piter,
struct scatterlist *sglist, unsigned int nents,
unsigned long pgoffset);
@@ -372,11 +384,13 @@ static inline struct page *sg_page_iter_page(struct sg_page_iter *piter)
/**
* sg_page_iter_dma_address - get the dma address of the current page held by
* the page iterator.
- * @piter: page iterator holding the page
+ * @dma_iter: page iterator holding the page
*/
-static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter)
+static inline dma_addr_t
+sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
{
- return sg_dma_address(piter->sg) + (piter->sg_pgoffset << PAGE_SHIFT);
+ return sg_dma_address(dma_iter->base.sg) +
+ (dma_iter->base.sg_pgoffset << PAGE_SHIFT);
}
/**
@@ -385,11 +399,28 @@ static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter)
* @piter: page iterator to hold current page, sg, sg_pgoffset
* @nents: maximum number of sg entries to iterate over
* @pgoffset: starting page offset
+ *
+ * Callers may use sg_page_iter_page() to get each page pointer.
*/
#define for_each_sg_page(sglist, piter, nents, pgoffset) \
for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \
__sg_page_iter_next(piter);)
+/**
+ * for_each_sg_dma_page - iterate over the pages of the given sg list
+ * @sglist: sglist to iterate over
+ * @dma_iter: page iterator to hold current page
+ * @dma_nents: maximum number of sg entries to iterate over, this is the value
+ * returned from dma_map_sg
+ * @pgoffset: starting page offset
+ *
+ * Callers may use sg_page_iter_dma_address() to get each page's DMA address.
+ */
+#define for_each_sg_dma_page(sglist, dma_iter, dma_nents, pgoffset) \
+ for (__sg_page_iter_start(&(dma_iter)->base, sglist, dma_nents, \
+ pgoffset); \
+ __sg_page_iter_dma_next(dma_iter);)
+
/*
* Mapping sg iterator
*
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index fdef558e3a2d..79ba8219e7dc 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -616,12 +616,11 @@ struct ib_mad_agent {
void *context;
u32 hi_tid;
u32 flags;
+ void *security;
+ struct list_head mad_agent_sec_list;
u8 port_num;
u8 rmpp_version;
- void *security;
bool smp_allowed;
- bool lsm_nb_reg;
- struct notifier_block lsm_nb;
};
/**
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 2e1f1e885ee5..135fab2c016c 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2264,6 +2264,19 @@ struct ib_counters_read_attr {
struct uverbs_attr_bundle;
+#define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
+ .size_##ib_struct = \
+ (sizeof(struct drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
+ BUILD_BUG_ON_ZERO( \
+ !__same_type(((struct drv_struct *)NULL)->member, \
+ struct ib_struct)))
+
+#define rdma_zalloc_drv_obj(ib_dev, ib_type) \
+ ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, GFP_KERNEL))
+
+#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
+
/**
* struct ib_device_ops - InfiniBand device operations
* This structure defines all the InfiniBand device operations, providers will
@@ -2372,10 +2385,9 @@ struct ib_device_ops {
int (*dealloc_ucontext)(struct ib_ucontext *context);
int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
- struct ib_pd *(*alloc_pd)(struct ib_device *device,
- struct ib_ucontext *context,
- struct ib_udata *udata);
- int (*dealloc_pd)(struct ib_pd *pd);
+ int (*alloc_pd)(struct ib_pd *pd, struct ib_ucontext *context,
+ struct ib_udata *udata);
+ void (*dealloc_pd)(struct ib_pd *pd);
struct ib_ah *(*create_ah)(struct ib_pd *pd,
struct rdma_ah_attr *ah_attr, u32 flags,
struct ib_udata *udata);
@@ -2517,6 +2529,8 @@ struct ib_device_ops {
*/
int (*fill_res_entry)(struct sk_buff *msg,
struct rdma_restrack_entry *entry);
+
+ DECLARE_RDMA_OBJ_SIZE(ib_pd);
};
struct ib_device {
@@ -2528,12 +2542,8 @@ struct ib_device {
struct list_head event_handler_list;
spinlock_t event_handler_lock;
- rwlock_t client_data_lock;
- struct list_head core_list;
- /* Access to the client_data_list is protected by the client_data_lock
- * rwlock and the lists_rwsem read-write semaphore
- */
- struct list_head client_data_list;
+ struct rw_semaphore client_data_rwsem;
+ struct xarray client_data;
struct ib_cache cache;
/**
@@ -2558,12 +2568,6 @@ struct ib_device {
struct kobject *ports_kobj;
struct list_head port_list;
- enum {
- IB_DEV_UNINITIALIZED,
- IB_DEV_REGISTERED,
- IB_DEV_UNREGISTERED
- } reg_state;
-
int uverbs_abi_ver;
u64 uverbs_cmd_mask;
u64 uverbs_ex_cmd_mask;
@@ -2602,7 +2606,7 @@ struct ib_device {
};
struct ib_client {
- char *name;
+ const char *name;
void (*add) (struct ib_device *);
void (*remove)(struct ib_device *, void *client_data);
@@ -2629,6 +2633,7 @@ struct ib_client {
const struct sockaddr *addr,
void *client_data);
struct list_head list;
+ u32 client_id;
/* kverbs are not required by the client */
u8 no_kverbs_req:1;
@@ -2651,7 +2656,21 @@ void ib_unregister_device(struct ib_device *device);
int ib_register_client (struct ib_client *client);
void ib_unregister_client(struct ib_client *client);
-void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
+/**
+ * ib_get_client_data - Get IB client context
+ * @device:Device to get context for
+ * @client:Client to get context for
+ *
+ * ib_get_client_data() returns the client context data set with
+ * ib_set_client_data(). This can only be called while the client is
+ * registered to the device, once the ib_client remove() callback returns this
+ * cannot be called.
+ */
+static inline void *ib_get_client_data(struct ib_device *device,
+ struct ib_client *client)
+{
+ return xa_load(&device->client_data, client->client_id);
+}
void ib_set_client_data(struct ib_device *device, struct ib_client *client,
void *data);
void ib_set_device_ops(struct ib_device *device,
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
index 48512abd3162..0e1f02815643 100644
--- a/include/rdma/iw_cm.h
+++ b/include/rdma/iw_cm.h
@@ -94,7 +94,8 @@ struct iw_cm_id {
void (*add_ref)(struct iw_cm_id *);
void (*rem_ref)(struct iw_cm_id *);
u8 tos;
- bool mapped;
+ bool tos_set:1;
+ bool mapped:1;
};
struct iw_cm_conn_param {
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 60987a5903b7..71f48cfdc24c 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -374,6 +374,7 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse);
*/
int rdma_set_afonly(struct rdma_cm_id *id, int afonly);
+int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout);
/**
* rdma_get_service_id - Return the IB service ID for a specified address.
* @id: Communication identifier associated with the address.
diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
index 0d1e78ebad05..e42940a215a3 100644
--- a/include/uapi/rdma/rdma_user_cm.h
+++ b/include/uapi/rdma/rdma_user_cm.h
@@ -300,6 +300,10 @@ enum {
RDMA_OPTION_ID_TOS = 0,
RDMA_OPTION_ID_REUSEADDR = 1,
RDMA_OPTION_ID_AFONLY = 2,
+ RDMA_OPTION_ID_ACK_TIMEOUT = 3
+};
+
+enum {
RDMA_OPTION_IB_PATH = 1
};