summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLeon Romanovsky <leonro@nvidia.com>2025-09-09 16:27:41 +0300
committerMarek Szyprowski <m.szyprowski@samsung.com>2025-09-12 00:18:21 +0200
commitec818caebc0809722f47004db9f74b7ab355c583 (patch)
tree5f78426de95c8db2cf4bb99c8d916ba40a6c9a56
parente1d69da24fb8ee02e13dcbc281f510f01332a7f8 (diff)
mm/hmm: properly take MMIO path
In case peer-to-peer transaction traverses through host bridge, the IOMMU needs to have IOMMU_MMIO flag, together with skip of CPU sync. The latter was handled by provided DMA_ATTR_SKIP_CPU_SYNC flag, but IOMMU flag was missed, due to assumption that such memory can be treated as regular one. Reuse newly introduced DMA attribute to properly take MMIO path. Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Link: https://lore.kernel.org/r/998251caf3f9d1a3f6f8205f1f494c707fb4d8fa.1757423202.git.leonro@nvidia.com
-rw-r--r--mm/hmm.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/mm/hmm.c b/mm/hmm.c
index 015ab243f081..6556c0e074ba 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -746,7 +746,7 @@ dma_addr_t hmm_dma_map_pfn(struct device *dev, struct hmm_dma_map *map,
case PCI_P2PDMA_MAP_NONE:
break;
case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
- attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+ attrs |= DMA_ATTR_MMIO;
pfns[idx] |= HMM_PFN_P2PDMA;
break;
case PCI_P2PDMA_MAP_BUS_ADDR:
@@ -776,7 +776,7 @@ dma_addr_t hmm_dma_map_pfn(struct device *dev, struct hmm_dma_map *map,
goto error;
dma_addr = dma_map_phys(dev, paddr, map->dma_entry_size,
- DMA_BIDIRECTIONAL, 0);
+ DMA_BIDIRECTIONAL, attrs);
if (dma_mapping_error(dev, dma_addr))
goto error;
@@ -811,16 +811,17 @@ bool hmm_dma_unmap_pfn(struct device *dev, struct hmm_dma_map *map, size_t idx)
if ((pfns[idx] & valid_dma) != valid_dma)
return false;
+ if (pfns[idx] & HMM_PFN_P2PDMA)
+ attrs |= DMA_ATTR_MMIO;
+
if (pfns[idx] & HMM_PFN_P2PDMA_BUS)
; /* no need to unmap bus address P2P mappings */
- else if (dma_use_iova(state)) {
- if (pfns[idx] & HMM_PFN_P2PDMA)
- attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+ else if (dma_use_iova(state))
dma_iova_unlink(dev, state, idx * map->dma_entry_size,
map->dma_entry_size, DMA_BIDIRECTIONAL, attrs);
- } else if (dma_need_unmap(dev))
+ else if (dma_need_unmap(dev))
dma_unmap_phys(dev, dma_addrs[idx], map->dma_entry_size,
- DMA_BIDIRECTIONAL, 0);
+ DMA_BIDIRECTIONAL, attrs);
pfns[idx] &=
~(HMM_PFN_DMA_MAPPED | HMM_PFN_P2PDMA | HMM_PFN_P2PDMA_BUS);