summaryrefslogtreecommitdiff
path: root/include/rdma
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@mellanox.com>2019-04-24 16:20:34 -0300
committerJason Gunthorpe <jgg@mellanox.com>2019-04-24 16:20:34 -0300
commit449a224c10a48d047c799c5c5d3b22d6aec98c60 (patch)
tree7ecff2cce22ad3875b70a772eae55a443752cfce /include/rdma
parent3c176c9d72446217f6451543452692141eb665dc (diff)
parent4eb6ab13b99148b5bf9bfdae7977fe139b4452f8 (diff)
Merge branch 'rdma_mmap' into rdma.git for-next
Jason Gunthorpe says: ==================== Upon review it turns out there are some long standing problems in BAR mapping area: * BAR pages intended for read-only can be switched to writable via mprotect. * Missing use of rdma_user_mmap_io for the mlx5 clock BAR page. * Disassociate causes SIGBUS when touching the pages. * CPU pages are being mapped through to the process via remap_pfn_range instead of the more appropriate vm_insert_page, causing weird behaviors during disassociation. This series adds the missing VM_* flag manipulation, adds faulting a zero page for disassociation and revises the CPU page mappings to use vm_insert_page. ==================== For dependencies this branch is based on for-rc from git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git * branch 'rdma_mmap': RDMA: Remove rdma_user_mmap_page RDMA/mlx5: Use get_zeroed_page() for clock_info RDMA/ucontext: Fix regression with disassociate RDMA/mlx5: Use rdma_user_map_io for mapping BAR pages RDMA/mlx5: Do not allow the user to write to the clock page Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'include/rdma')
-rw-r--r--include/rdma/ib_verbs.h9
1 files changed, 0 insertions, 9 deletions
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 43a75ab8ea8a..737ef5ed3930 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2724,9 +2724,6 @@ void ib_set_device_ops(struct ib_device *device,
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
unsigned long pfn, unsigned long size, pgprot_t prot);
-int rdma_user_mmap_page(struct ib_ucontext *ucontext,
- struct vm_area_struct *vma, struct page *page,
- unsigned long size);
#else
static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext,
struct vm_area_struct *vma,
@@ -2735,12 +2732,6 @@ static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext,
{
return -EINVAL;
}
-static inline int rdma_user_mmap_page(struct ib_ucontext *ucontext,
- struct vm_area_struct *vma, struct page *page,
- unsigned long size)
-{
- return -EINVAL;
-}
#endif
static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)