diff options
Diffstat (limited to 'arch/x86/kernel/amd_gart_64.c')
| -rw-r--r-- | arch/x86/kernel/amd_gart_64.c | 44 |
1 files changed, 23 insertions, 21 deletions
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index ed837383de5c..e8000a56732e 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -38,11 +38,9 @@ #include <asm/iommu.h> #include <asm/gart.h> #include <asm/set_memory.h> -#include <asm/swiotlb.h> #include <asm/dma.h> -#include <asm/amd_nb.h> +#include <asm/amd/nb.h> #include <asm/x86_init.h> -#include <asm/iommu_table.h> static unsigned long iommu_bus_base; /* GART remapping area (physical) */ static unsigned long iommu_size; /* size of remapping area bytes */ @@ -55,7 +53,7 @@ static u32 *iommu_gatt_base; /* Remapping table */ * of only flushing when an mapping is reused. With it true the GART is * flushed for every mapping. Problem is that doing the lazy flush seems * to trigger bugs with some popular PCI cards, in particular 3ware (but - * has been also also seen with Qlogic at least). + * has been also seen with Qlogic at least). */ static int iommu_fullflush = 1; @@ -224,13 +222,14 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, } /* Map a single area into the IOMMU */ -static dma_addr_t gart_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, +static dma_addr_t gart_map_phys(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir, unsigned long attrs) { unsigned long bus; - phys_addr_t paddr = page_to_phys(page) + offset; + + if (unlikely(attrs & DMA_ATTR_MMIO)) + return DMA_MAPPING_ERROR; if (!need_iommu(dev, paddr, size)) return paddr; @@ -244,7 +243,7 @@ static dma_addr_t gart_map_page(struct device *dev, struct page *page, /* * Free a DMA mapping. */ -static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, +static void gart_unmap_phys(struct device *dev, dma_addr_t dma_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { @@ -284,7 +283,7 @@ static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, for_each_sg(sg, s, nents, i) { if (!s->dma_length || !s->length) break; - gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0); + gart_unmap_phys(dev, s->dma_address, s->dma_length, dir, 0); } } @@ -489,7 +488,7 @@ static void gart_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, unsigned long attrs) { - gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0); + gart_unmap_phys(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0); dma_direct_free(dev, size, vaddr, dma_addr, attrs); } @@ -506,7 +505,7 @@ static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) } a = aper + iommu_size; - iommu_size -= round_up(a, PMD_PAGE_SIZE) - a; + iommu_size -= round_up(a, PMD_SIZE) - a; if (iommu_size < 64*1024*1024) { pr_warn("PCI-DMA: Warning: Small IOMMU %luMB." @@ -593,7 +592,7 @@ static void gart_fixup_northbridges(void) } } -static void gart_resume(void) +static void gart_resume(void *data) { pr_info("PCI-DMA: Resuming GART IOMMU\n"); @@ -602,11 +601,15 @@ static void gart_resume(void) enable_gart_translations(); } -static struct syscore_ops gart_syscore_ops = { +static const struct syscore_ops gart_syscore_ops = { .resume = gart_resume, }; +static struct syscore gart_syscore = { + .ops = &gart_syscore_ops, +}; + /* * Private Northbridge GATT initialization in case we cannot use the * AGP driver for some reason. @@ -652,7 +655,7 @@ static __init int init_amd_gatt(struct agp_kern_info *info) agp_gatt_table = gatt; - register_syscore_ops(&gart_syscore_ops); + register_syscore(&gart_syscore); flush_gart(); @@ -670,15 +673,15 @@ static __init int init_amd_gatt(struct agp_kern_info *info) static const struct dma_map_ops gart_dma_ops = { .map_sg = gart_map_sg, .unmap_sg = gart_unmap_sg, - .map_page = gart_map_page, - .unmap_page = gart_unmap_page, + .map_phys = gart_map_phys, + .unmap_phys = gart_unmap_phys, .alloc = gart_alloc_coherent, .free = gart_free_coherent, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, .dma_supported = dma_direct_supported, .get_required_mask = dma_direct_get_required_mask, - .alloc_pages = dma_direct_alloc_pages, + .alloc_pages_op = dma_direct_alloc_pages, .free_pages = dma_direct_free_pages, }; @@ -778,7 +781,7 @@ int __init gart_iommu_init(void) iommu_size >> PAGE_SHIFT); /* * Tricky. The GART table remaps the physical memory range, - * so the CPU wont notice potential aliases and if the memory + * so the CPU won't notice potential aliases and if the memory * is remapped to UC later on, we might surprise the PCI devices * with a stray writeout of a cacheline. So play it sure and * do an explicit, full-scale wbinvd() _after_ having marked all @@ -808,7 +811,7 @@ int __init gart_iommu_init(void) flush_gart(); dma_ops = &gart_dma_ops; x86_platform.iommu_shutdown = gart_iommu_shutdown; - swiotlb = 0; + x86_swiotlb_enable = false; return 0; } @@ -842,4 +845,3 @@ void __init gart_parse_options(char *p) } } } -IOMMU_INIT_POST(gart_iommu_hole_init); |
