summaryrefslogtreecommitdiff
path: root/mm/hmm.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-11-13 14:45:28 +0100
committerJason Gunthorpe <jgg@mellanox.com>2019-11-23 19:56:45 -0400
commit93f4e735b6d98ee4b7a1252d81e815a983e359f2 (patch)
tree2c51e00a4f59eed5405fe54b85e3c550f456d3c6 /mm/hmm.c
parentd28c2c9a487708b9db519ce7fedc10333032c76b (diff)
mm/hmm: remove hmm_range_dma_map and hmm_range_dma_unmap
These two functions have never been used since they were added. Link: https://lore.kernel.org/r/20191113134528.21187-1-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'mm/hmm.c')
-rw-r--r--mm/hmm.c147
1 files changed, 0 insertions, 147 deletions
diff --git a/mm/hmm.c b/mm/hmm.c
index d903b1555de4..d379cb6496ae 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -691,150 +691,3 @@ long hmm_range_fault(struct hmm_range *range, unsigned int flags)
return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
}
EXPORT_SYMBOL(hmm_range_fault);
-
-/**
- * hmm_range_dma_map - hmm_range_fault() and dma map page all in one.
- * @range: range being faulted
- * @device: device to map page to
- * @daddrs: array of dma addresses for the mapped pages
- * @flags: HMM_FAULT_*
- *
- * Return: the number of pages mapped on success (including zero), or any
- * status return from hmm_range_fault() otherwise.
- */
-long hmm_range_dma_map(struct hmm_range *range, struct device *device,
- dma_addr_t *daddrs, unsigned int flags)
-{
- unsigned long i, npages, mapped;
- long ret;
-
- ret = hmm_range_fault(range, flags);
- if (ret <= 0)
- return ret ? ret : -EBUSY;
-
- npages = (range->end - range->start) >> PAGE_SHIFT;
- for (i = 0, mapped = 0; i < npages; ++i) {
- enum dma_data_direction dir = DMA_TO_DEVICE;
- struct page *page;
-
- /*
- * FIXME need to update DMA API to provide invalid DMA address
- * value instead of a function to test dma address value. This
- * would remove lot of dumb code duplicated accross many arch.
- *
- * For now setting it to 0 here is good enough as the pfns[]
- * value is what is use to check what is valid and what isn't.
- */
- daddrs[i] = 0;
-
- page = hmm_device_entry_to_page(range, range->pfns[i]);
- if (page == NULL)
- continue;
-
- /* Check if range is being invalidated */
- if (mmu_interval_check_retry(range->notifier,
- range->notifier_seq)) {
- ret = -EBUSY;
- goto unmap;
- }
-
- /* If it is read and write than map bi-directional. */
- if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
- dir = DMA_BIDIRECTIONAL;
-
- daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir);
- if (dma_mapping_error(device, daddrs[i])) {
- ret = -EFAULT;
- goto unmap;
- }
-
- mapped++;
- }
-
- return mapped;
-
-unmap:
- for (npages = i, i = 0; (i < npages) && mapped; ++i) {
- enum dma_data_direction dir = DMA_TO_DEVICE;
- struct page *page;
-
- page = hmm_device_entry_to_page(range, range->pfns[i]);
- if (page == NULL)
- continue;
-
- if (dma_mapping_error(device, daddrs[i]))
- continue;
-
- /* If it is read and write than map bi-directional. */
- if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
- dir = DMA_BIDIRECTIONAL;
-
- dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
- mapped--;
- }
-
- return ret;
-}
-EXPORT_SYMBOL(hmm_range_dma_map);
-
-/**
- * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map()
- * @range: range being unmapped
- * @device: device against which dma map was done
- * @daddrs: dma address of mapped pages
- * @dirty: dirty page if it had the write flag set
- * Return: number of page unmapped on success, -EINVAL otherwise
- *
- * Note that caller MUST abide by mmu notifier or use HMM mirror and abide
- * to the sync_cpu_device_pagetables() callback so that it is safe here to
- * call set_page_dirty(). Caller must also take appropriate locks to avoid
- * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress.
- */
-long hmm_range_dma_unmap(struct hmm_range *range,
- struct device *device,
- dma_addr_t *daddrs,
- bool dirty)
-{
- unsigned long i, npages;
- long cpages = 0;
-
- /* Sanity check. */
- if (range->end <= range->start)
- return -EINVAL;
- if (!daddrs)
- return -EINVAL;
- if (!range->pfns)
- return -EINVAL;
-
- npages = (range->end - range->start) >> PAGE_SHIFT;
- for (i = 0; i < npages; ++i) {
- enum dma_data_direction dir = DMA_TO_DEVICE;
- struct page *page;
-
- page = hmm_device_entry_to_page(range, range->pfns[i]);
- if (page == NULL)
- continue;
-
- /* If it is read and write than map bi-directional. */
- if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) {
- dir = DMA_BIDIRECTIONAL;
-
- /*
- * See comments in function description on why it is
- * safe here to call set_page_dirty()
- */
- if (dirty)
- set_page_dirty(page);
- }
-
- /* Unmap and clear pfns/dma address */
- dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
- range->pfns[i] = range->values[HMM_PFN_NONE];
- /* FIXME see comments in hmm_vma_dma_map() */
- daddrs[i] = 0;
- cpages++;
- }
-
- return cpages;
-}
-EXPORT_SYMBOL(hmm_range_dma_unmap);