diff options
Diffstat (limited to 'drivers/iommu/intel/iommu.c')
-rw-r--r-- | drivers/iommu/intel/iommu.c | 196 |
1 files changed, 137 insertions, 59 deletions
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 2e9811bf2a4e..3917279afca7 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -854,7 +854,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; if (domain->use_first_level) - pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS; + pteval |= DMA_FL_PTE_US | DMA_FL_PTE_ACCESS; tmp = 0ULL; if (!try_cmpxchg64(&pte->val, &tmp, pteval)) @@ -1359,21 +1359,6 @@ static void iommu_disable_pci_caps(struct device_domain_info *info) } } -static void __iommu_flush_dev_iotlb(struct device_domain_info *info, - u64 addr, unsigned int mask) -{ - u16 sid, qdep; - - if (!info || !info->ats_enabled) - return; - - sid = info->bus << 8 | info->devfn; - qdep = info->ats_qdep; - qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, - qdep, addr, mask); - quirk_extra_dev_tlb_flush(info, addr, mask, IOMMU_NO_PASID, qdep); -} - static void intel_flush_iotlb_all(struct iommu_domain *domain) { cache_tag_flush_all(to_dmar_domain(domain)); @@ -1872,7 +1857,7 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP); attr |= DMA_FL_PTE_PRESENT; if (domain->use_first_level) { - attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS; + attr |= DMA_FL_PTE_US | DMA_FL_PTE_ACCESS; if (prot & DMA_PTE_WRITE) attr |= DMA_FL_PTE_DIRTY; } @@ -1959,7 +1944,6 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 { struct intel_iommu *iommu = info->iommu; struct context_entry *context; - u16 did_old; spin_lock(&iommu->lock); context = iommu_context_addr(iommu, bus, devfn, 0); @@ -1968,24 +1952,10 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 return; } - did_old = context_domain_id(context); - context_clear_entry(context); __iommu_flush_cache(iommu, context, sizeof(*context)); spin_unlock(&iommu->lock); - iommu->flush.flush_context(iommu, - did_old, - (((u16)bus) << 8) | devfn, - DMA_CCMD_MASK_NOBIT, - DMA_CCMD_DEVICE_INVL); - - iommu->flush.flush_iotlb(iommu, - did_old, - 0, - 0, - DMA_TLB_DSI_FLUSH); - - __iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH); + intel_context_flush_present(info, context, true); } static int domain_setup_first_level(struct intel_iommu *iommu, @@ -2071,7 +2041,7 @@ static int __init si_domain_init(int hw) for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { ret = iommu_domain_identity_map(si_domain, mm_to_dma_pfn_start(start_pfn), - mm_to_dma_pfn_end(end_pfn)); + mm_to_dma_pfn_end(end_pfn-1)); if (ret) return ret; } @@ -2177,17 +2147,6 @@ static bool device_rmrr_is_relaxable(struct device *dev) return false; } -/* - * Return the required default domain type for a specific device. - * - * @dev: the device in query - * @startup: true if this is during early boot - * - * Returns: - * - IOMMU_DOMAIN_DMA: device requires a dynamic mapping domain - * - IOMMU_DOMAIN_IDENTITY: device requires an identical mapping domain - * - 0: both identity and dynamic domains work for this device - */ static int device_def_domain_type(struct device *dev) { if (dev_is_pci(dev)) { @@ -3633,6 +3592,79 @@ static struct iommu_domain blocking_domain = { } }; +static int iommu_superpage_capability(struct intel_iommu *iommu, bool first_stage) +{ + if (!intel_iommu_superpage) + return 0; + + if (first_stage) + return cap_fl1gp_support(iommu->cap) ? 2 : 1; + + return fls(cap_super_page_val(iommu->cap)); +} + +static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_stage) +{ + struct device_domain_info *info = dev_iommu_priv_get(dev); + struct intel_iommu *iommu = info->iommu; + struct dmar_domain *domain; + int addr_width; + + domain = kzalloc(sizeof(*domain), GFP_KERNEL); + if (!domain) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&domain->devices); + INIT_LIST_HEAD(&domain->dev_pasids); + INIT_LIST_HEAD(&domain->cache_tags); + spin_lock_init(&domain->lock); + spin_lock_init(&domain->cache_lock); + xa_init(&domain->iommu_array); + + domain->nid = dev_to_node(dev); + domain->has_iotlb_device = info->ats_enabled; + domain->use_first_level = first_stage; + + /* calculate the address width */ + addr_width = agaw_to_width(iommu->agaw); + if (addr_width > cap_mgaw(iommu->cap)) + addr_width = cap_mgaw(iommu->cap); + domain->gaw = addr_width; + domain->agaw = iommu->agaw; + domain->max_addr = __DOMAIN_MAX_ADDR(addr_width); + + /* iommu memory access coherency */ + domain->iommu_coherency = iommu_paging_structure_coherency(iommu); + + /* pagesize bitmap */ + domain->domain.pgsize_bitmap = SZ_4K; + domain->iommu_superpage = iommu_superpage_capability(iommu, first_stage); + domain->domain.pgsize_bitmap |= domain_super_pgsize_bitmap(domain); + + /* + * IOVA aperture: First-level translation restricts the input-address + * to a canonical address (i.e., address bits 63:N have the same value + * as address bit [N-1], where N is 48-bits with 4-level paging and + * 57-bits with 5-level paging). Hence, skip bit [N-1]. + */ + domain->domain.geometry.force_aperture = true; + domain->domain.geometry.aperture_start = 0; + if (first_stage) + domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1); + else + domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw); + + /* always allocate the top pgd */ + domain->pgd = iommu_alloc_page_node(domain->nid, GFP_KERNEL); + if (!domain->pgd) { + kfree(domain); + return ERR_PTR(-ENOMEM); + } + domain_flush_cache(domain, domain->pgd, PAGE_SIZE); + + return domain; +} + static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) { struct dmar_domain *dmar_domain; @@ -3695,15 +3727,14 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags, if (user_data || (dirty_tracking && !ssads_supported(iommu))) return ERR_PTR(-EOPNOTSUPP); - /* - * domain_alloc_user op needs to fully initialize a domain before - * return, so uses iommu_domain_alloc() here for simple. - */ - domain = iommu_domain_alloc(dev->bus); - if (!domain) - return ERR_PTR(-ENOMEM); - - dmar_domain = to_dmar_domain(domain); + /* Do not use first stage for user domain translation. */ + dmar_domain = paging_domain_alloc(dev, false); + if (IS_ERR(dmar_domain)) + return ERR_CAST(dmar_domain); + domain = &dmar_domain->domain; + domain->type = IOMMU_DOMAIN_UNMANAGED; + domain->owner = &intel_iommu_ops; + domain->ops = intel_iommu_ops.default_domain_ops; if (nested_parent) { dmar_domain->nested_parent = true; @@ -4213,6 +4244,37 @@ static int intel_iommu_enable_sva(struct device *dev) return 0; } +static int context_flip_pri(struct device_domain_info *info, bool enable) +{ + struct intel_iommu *iommu = info->iommu; + u8 bus = info->bus, devfn = info->devfn; + struct context_entry *context; + + spin_lock(&iommu->lock); + if (context_copied(iommu, bus, devfn)) { + spin_unlock(&iommu->lock); + return -EINVAL; + } + + context = iommu_context_addr(iommu, bus, devfn, false); + if (!context || !context_present(context)) { + spin_unlock(&iommu->lock); + return -ENODEV; + } + + if (enable) + context_set_sm_pre(context); + else + context_clear_sm_pre(context); + + if (!ecap_coherent(iommu->ecap)) + clflush_cache_range(context, sizeof(*context)); + intel_context_flush_present(info, context, true); + spin_unlock(&iommu->lock); + + return 0; +} + static int intel_iommu_enable_iopf(struct device *dev) { struct pci_dev *pdev = dev_is_pci(dev) ? to_pci_dev(dev) : NULL; @@ -4242,15 +4304,23 @@ static int intel_iommu_enable_iopf(struct device *dev) if (ret) return ret; + ret = context_flip_pri(info, true); + if (ret) + goto err_remove_device; + ret = pci_enable_pri(pdev, PRQ_DEPTH); - if (ret) { - iopf_queue_remove_device(iommu->iopf_queue, dev); - return ret; - } + if (ret) + goto err_clear_pri; info->pri_enabled = 1; return 0; +err_clear_pri: + context_flip_pri(info, false); +err_remove_device: + iopf_queue_remove_device(iommu->iopf_queue, dev); + + return ret; } static int intel_iommu_disable_iopf(struct device *dev) @@ -4261,6 +4331,15 @@ static int intel_iommu_disable_iopf(struct device *dev) if (!info->pri_enabled) return -EINVAL; + /* Disable new PRI reception: */ + context_flip_pri(info, false); + + /* + * Remove device from fault queue and acknowledge all outstanding + * PRQs to the device: + */ + iopf_queue_remove_device(iommu->iopf_queue, dev); + /* * PCIe spec states that by clearing PRI enable bit, the Page * Request Interface will not issue new page requests, but has @@ -4271,7 +4350,6 @@ static int intel_iommu_disable_iopf(struct device *dev) */ pci_disable_pri(to_pci_dev(dev)); info->pri_enabled = 0; - iopf_queue_remove_device(iommu->iopf_queue, dev); return 0; } |