summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/device.h1
-rw-r--r--include/linux/dma-mapping.h55
-rw-r--r--include/linux/mic_bus.h2
-rw-r--r--include/rdma/ib_verbs.h143
-rw-r--r--include/xen/arm/hypervisor.h2
5 files changed, 52 insertions, 151 deletions
diff --git a/include/linux/device.h b/include/linux/device.h
index a48a7ff70164..30c4570e928d 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -925,6 +925,7 @@ struct device {
#ifdef CONFIG_NUMA
int numa_node; /* NUMA node this device is close to */
#endif
+ const struct dma_map_ops *dma_ops;
u64 *dma_mask; /* dma mask (if dma'able device) */
u64 coherent_dma_mask;/* Like dma_mask, but for
alloc_coherent mappings as
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index c24721a33b4c..0977317c6835 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -134,7 +134,8 @@ struct dma_map_ops {
int is_phys;
};
-extern struct dma_map_ops dma_noop_ops;
+extern const struct dma_map_ops dma_noop_ops;
+extern const struct dma_map_ops dma_virt_ops;
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
@@ -171,14 +172,26 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
#ifdef CONFIG_HAS_DMA
#include <asm/dma-mapping.h>
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ if (dev && dev->dma_ops)
+ return dev->dma_ops;
+ return get_arch_dma_ops(dev ? dev->bus : NULL);
+}
+
+static inline void set_dma_ops(struct device *dev,
+ const struct dma_map_ops *dma_ops)
+{
+ dev->dma_ops = dma_ops;
+}
#else
/*
* Define the dma api to allow compilation but not linking of
* dma dependent code. Code that depends on the dma-mapping
* API needs to set 'depends on HAS_DMA' in its Kconfig
*/
-extern struct dma_map_ops bad_dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+extern const struct dma_map_ops bad_dma_ops;
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
return &bad_dma_ops;
}
@@ -189,7 +202,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
kmemcheck_mark_initialized(ptr, size);
@@ -208,7 +221,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_page)
@@ -224,7 +237,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
int i, ents;
struct scatterlist *s;
@@ -242,7 +255,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
debug_dma_unmap_sg(dev, sg, nents, dir);
@@ -256,7 +269,7 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
kmemcheck_mark_initialized(page_address(page) + offset, size);
@@ -272,7 +285,7 @@ static inline void dma_unmap_page_attrs(struct device *dev,
enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_page)
@@ -286,7 +299,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev,
enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir));
@@ -307,7 +320,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_resource)
@@ -319,7 +332,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
size_t size,
enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_cpu)
@@ -331,7 +344,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size,
enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_device)
@@ -371,7 +384,7 @@ static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_sg_for_cpu)
@@ -383,7 +396,7 @@ static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_sg_for_device)
@@ -428,7 +441,7 @@ static inline int
dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
dma_addr_t dma_addr, size_t size, unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
if (ops->mmap)
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
@@ -446,7 +459,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
if (ops->get_sgtable)
return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
@@ -464,7 +477,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
void *cpu_addr;
BUG_ON(!ops);
@@ -486,7 +499,7 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
unsigned long attrs)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
WARN_ON(irqs_disabled());
@@ -544,7 +557,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
#ifndef HAVE_ARCH_DMA_SUPPORTED
static inline int dma_supported(struct device *dev, u64 mask)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
if (!ops)
return 0;
@@ -557,7 +570,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
#ifndef HAVE_ARCH_DMA_SET_MASK
static inline int dma_set_mask(struct device *dev, u64 mask)
{
- struct dma_map_ops *ops = get_dma_ops(dev);
+ const struct dma_map_ops *ops = get_dma_ops(dev);
if (ops->set_dma_mask)
return ops->set_dma_mask(dev, mask);
diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h
index 27d7c95fd0da..504d54c71bdb 100644
--- a/include/linux/mic_bus.h
+++ b/include/linux/mic_bus.h
@@ -90,7 +90,7 @@ struct mbus_hw_ops {
};
struct mbus_device *
-mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops,
+mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops,
struct mbus_hw_ops *hw_ops, int index,
void __iomem *mmio_va);
void mbus_unregister_device(struct mbus_device *mbdev);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 89f5bd4e1d52..d84849c5dc05 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1843,53 +1843,6 @@ struct ib_cache {
struct ib_port_cache *ports;
};
-struct ib_dma_mapping_ops {
- int (*mapping_error)(struct ib_device *dev,
- u64 dma_addr);
- u64 (*map_single)(struct ib_device *dev,
- void *ptr, size_t size,
- enum dma_data_direction direction);
- void (*unmap_single)(struct ib_device *dev,
- u64 addr, size_t size,
- enum dma_data_direction direction);
- u64 (*map_page)(struct ib_device *dev,
- struct page *page, unsigned long offset,
- size_t size,
- enum dma_data_direction direction);
- void (*unmap_page)(struct ib_device *dev,
- u64 addr, size_t size,
- enum dma_data_direction direction);
- int (*map_sg)(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction);
- void (*unmap_sg)(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction);
- int (*map_sg_attrs)(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction,
- unsigned long attrs);
- void (*unmap_sg_attrs)(struct ib_device *dev,
- struct scatterlist *sg, int nents,
- enum dma_data_direction direction,
- unsigned long attrs);
- void (*sync_single_for_cpu)(struct ib_device *dev,
- u64 dma_handle,
- size_t size,
- enum dma_data_direction dir);
- void (*sync_single_for_device)(struct ib_device *dev,
- u64 dma_handle,
- size_t size,
- enum dma_data_direction dir);
- void *(*alloc_coherent)(struct ib_device *dev,
- size_t size,
- u64 *dma_handle,
- gfp_t flag);
- void (*free_coherent)(struct ib_device *dev,
- size_t size, void *cpu_addr,
- u64 dma_handle);
-};
-
struct iw_cm_verbs;
struct ib_port_immutable {
@@ -1900,8 +1853,6 @@ struct ib_port_immutable {
};
struct ib_device {
- struct device *dma_device;
-
char name[IB_DEVICE_NAME_MAX];
struct list_head event_handler_list;
@@ -2151,7 +2102,6 @@ struct ib_device {
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
- struct ib_dma_mapping_ops *dma_ops;
struct module *owner;
struct device dev;
@@ -3043,9 +2993,7 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
*/
static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
{
- if (dev->dma_ops)
- return dev->dma_ops->mapping_error(dev, dma_addr);
- return dma_mapping_error(dev->dma_device, dma_addr);
+ return dma_mapping_error(&dev->dev, dma_addr);
}
/**
@@ -3059,9 +3007,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev,
void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
- if (dev->dma_ops)
- return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
- return dma_map_single(dev->dma_device, cpu_addr, size, direction);
+ return dma_map_single(&dev->dev, cpu_addr, size, direction);
}
/**
@@ -3075,28 +3021,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
u64 addr, size_t size,
enum dma_data_direction direction)
{
- if (dev->dma_ops)
- dev->dma_ops->unmap_single(dev, addr, size, direction);
- else
- dma_unmap_single(dev->dma_device, addr, size, direction);
-}
-
-static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
- void *cpu_addr, size_t size,
- enum dma_data_direction direction,
- unsigned long dma_attrs)
-{
- return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
- direction, dma_attrs);
-}
-
-static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
- u64 addr, size_t size,
- enum dma_data_direction direction,
- unsigned long dma_attrs)
-{
- return dma_unmap_single_attrs(dev->dma_device, addr, size,
- direction, dma_attrs);
+ dma_unmap_single(&dev->dev, addr, size, direction);
}
/**
@@ -3113,9 +3038,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev,
size_t size,
enum dma_data_direction direction)
{
- if (dev->dma_ops)
- return dev->dma_ops->map_page(dev, page, offset, size, direction);
- return dma_map_page(dev->dma_device, page, offset, size, direction);
+ return dma_map_page(&dev->dev, page, offset, size, direction);
}
/**
@@ -3129,10 +3052,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev,
u64 addr, size_t size,
enum dma_data_direction direction)
{
- if (dev->dma_ops)
- dev->dma_ops->unmap_page(dev, addr, size, direction);
- else
- dma_unmap_page(dev->dma_device, addr, size, direction);
+ dma_unmap_page(&dev->dev, addr, size, direction);
}
/**
@@ -3146,9 +3066,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
- if (dev->dma_ops)
- return dev->dma_ops->map_sg(dev, sg, nents, direction);
- return dma_map_sg(dev->dma_device, sg, nents, direction);
+ return dma_map_sg(&dev->dev, sg, nents, direction);
}
/**
@@ -3162,10 +3080,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
- if (dev->dma_ops)
- dev->dma_ops->unmap_sg(dev, sg, nents, direction);
- else
- dma_unmap_sg(dev->dma_device, sg, nents, direction);
+ dma_unmap_sg(&dev->dev, sg, nents, direction);
}
static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
@@ -3173,12 +3088,7 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
enum dma_data_direction direction,
unsigned long dma_attrs)
{
- if (dev->dma_ops)
- return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction,
- dma_attrs);
- else
- return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
- dma_attrs);
+ return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs);
}
static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
@@ -3186,12 +3096,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
enum dma_data_direction direction,
unsigned long dma_attrs)
{
- if (dev->dma_ops)
- return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction,
- dma_attrs);
- else
- dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
- dma_attrs);
+ dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs);
}
/**
* ib_sg_dma_address - Return the DMA address from a scatter/gather entry
@@ -3233,10 +3138,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
size_t size,
enum dma_data_direction dir)
{
- if (dev->dma_ops)
- dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
- else
- dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
+ dma_sync_single_for_cpu(&dev->dev, addr, size, dir);
}
/**
@@ -3251,10 +3153,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
size_t size,
enum dma_data_direction dir)
{
- if (dev->dma_ops)
- dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
- else
- dma_sync_single_for_device(dev->dma_device, addr, size, dir);
+ dma_sync_single_for_device(&dev->dev, addr, size, dir);
}
/**
@@ -3266,19 +3165,10 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
*/
static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
size_t size,
- u64 *dma_handle,
+ dma_addr_t *dma_handle,
gfp_t flag)
{
- if (dev->dma_ops)
- return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
- else {
- dma_addr_t handle;
- void *ret;
-
- ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
- *dma_handle = handle;
- return ret;
- }
+ return dma_alloc_coherent(&dev->dev, size, dma_handle, flag);
}
/**
@@ -3290,12 +3180,9 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
*/
static inline void ib_dma_free_coherent(struct ib_device *dev,
size_t size, void *cpu_addr,
- u64 dma_handle)
+ dma_addr_t dma_handle)
{
- if (dev->dma_ops)
- dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
- else
- dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
+ dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle);
}
/**
diff --git a/include/xen/arm/hypervisor.h b/include/xen/arm/hypervisor.h
index 95251512e2c4..44b587b49904 100644
--- a/include/xen/arm/hypervisor.h
+++ b/include/xen/arm/hypervisor.h
@@ -18,7 +18,7 @@ static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
return PARAVIRT_LAZY_NONE;
}
-extern struct dma_map_ops *xen_dma_ops;
+extern const struct dma_map_ops *xen_dma_ops;
#ifdef CONFIG_XEN
void __init xen_early_init(void);