summaryrefslogtreecommitdiff
path: root/kernel/dma
diff options
context:
space:
mode:
authorXiang Chen <chenxiang66@hisilicon.com>2021-03-18 17:29:30 +0800
committerChristoph Hellwig <hch@lst.de>2021-04-02 16:41:08 +0200
commitca947482b0b30443e6da1f0f5ba7244e34a4f65a (patch)
tree4aa88cbca829f7daa3ceaf7384a4b81ab69729ca /kernel/dma
parent42e4eefb089f12ea900062ecdcc7ca10c3423a05 (diff)
dma-mapping: benchmark: Add support for multi-pages map/unmap
Currently it only support one page map/unmap once a time for dma-map benchmark, but there are some other scenaries which need to support for multi-page map/unmap: for those multi-pages interfaces such as dma_alloc_coherent() and dma_map_sg(), the time spent on multi-pages map/unmap is not the time of a single page * npages (not linear) as it may use block description instead of page description when it is satified with the size such as 2M/1G, and also it can send a single TLB invalidation command to invalidate multi-pages instead of multi-times when RIL is enabled (which will short the time of unmap). So it is necessary to add support for multi-pages map/unmap. Add a parameter "-g" to support multi-pages map/unmap. Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com> Acked-by: Barry Song <song.bao.hua@hisilicon.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'kernel/dma')
-rw-r--r--kernel/dma/map_benchmark.c21
1 files changed, 14 insertions, 7 deletions
diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
index 00d6549a5495..9b9af1bd6be3 100644
--- a/kernel/dma/map_benchmark.c
+++ b/kernel/dma/map_benchmark.c
@@ -38,7 +38,8 @@ struct map_benchmark {
__u32 dma_bits; /* DMA addressing capability */
__u32 dma_dir; /* DMA data direction */
__u32 dma_trans_ns; /* time for DMA transmission in ns */
- __u8 expansion[80]; /* For future use */
+ __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */
+ __u8 expansion[76]; /* For future use */
};
struct map_benchmark_data {
@@ -58,9 +59,11 @@ static int map_benchmark_thread(void *data)
void *buf;
dma_addr_t dma_addr;
struct map_benchmark_data *map = data;
+ int npages = map->bparam.granule;
+ u64 size = npages * PAGE_SIZE;
int ret = 0;
- buf = (void *)__get_free_page(GFP_KERNEL);
+ buf = alloc_pages_exact(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -76,10 +79,10 @@ static int map_benchmark_thread(void *data)
* 66 means evertything goes well! 66 is lucky.
*/
if (map->dir != DMA_FROM_DEVICE)
- memset(buf, 0x66, PAGE_SIZE);
+ memset(buf, 0x66, size);
map_stime = ktime_get();
- dma_addr = dma_map_single(map->dev, buf, PAGE_SIZE, map->dir);
+ dma_addr = dma_map_single(map->dev, buf, size, map->dir);
if (unlikely(dma_mapping_error(map->dev, dma_addr))) {
pr_err("dma_map_single failed on %s\n",
dev_name(map->dev));
@@ -93,7 +96,7 @@ static int map_benchmark_thread(void *data)
ndelay(map->bparam.dma_trans_ns);
unmap_stime = ktime_get();
- dma_unmap_single(map->dev, dma_addr, PAGE_SIZE, map->dir);
+ dma_unmap_single(map->dev, dma_addr, size, map->dir);
unmap_etime = ktime_get();
unmap_delta = ktime_sub(unmap_etime, unmap_stime);
@@ -112,7 +115,7 @@ static int map_benchmark_thread(void *data)
}
out:
- free_page((unsigned long)buf);
+ free_pages_exact(buf, size);
return ret;
}
@@ -203,7 +206,6 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
struct map_benchmark_data *map = file->private_data;
void __user *argp = (void __user *)arg;
u64 old_dma_mask;
-
int ret;
if (copy_from_user(&map->bparam, argp, sizeof(map->bparam)))
@@ -234,6 +236,11 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
return -EINVAL;
}
+ if (map->bparam.granule < 1 || map->bparam.granule > 1024) {
+ pr_err("invalid granule size\n");
+ return -EINVAL;
+ }
+
switch (map->bparam.dma_dir) {
case DMA_MAP_BIDIRECTIONAL:
map->dir = DMA_BIDIRECTIONAL;