summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
authorLu Baolu <baolu.lu@linux.intel.com>2020-05-06 09:59:45 +0800
committerJoerg Roedel <jroedel@suse.de>2020-05-13 12:02:01 +0200
commit327d5b2fee91c404a3956c324193892cf2cc9528 (patch)
tree6adf859f40b1d88ada34def2498de50f6045a664 /drivers/iommu
parentec9b40cffdb68c4ea1ebdcd1648ed6ce15c4449e (diff)
iommu/vt-d: Allow 32bit devices to uses DMA domain
Currently, if a 32bit device initially uses an identity domain, Intel IOMMU driver will convert it forcibly to a DMA one if its address capability is not enough for the whole system memory. The motivation was to overcome the overhead caused by possible bounced buffer. Unfortunately, this improvement has led to many problems. For example, some 32bit devices are required to use an identity domain, forcing them to use DMA domain will cause the device not to work anymore. On the other hand, the VMD sub-devices share a domain but each sub-device might have different address capability. Forcing a VMD sub-device to use DMA domain blindly will impact the operation of other sub-devices without any notification. Further more, PCI aliased devices (PCI bridge and all devices beneath it, VMD devices and various devices quirked with pci_add_dma_alias()) must use the same domain. Forcing one device to switch to DMA domain during runtime will cause in-fligh DMAs for other devices to abort or target to other memory which might cause undefind system behavior. With the last private domain usage in iommu_need_mapping() removed, all private domain helpers are also cleaned in this patch. Otherwise, the compiler will complain that some functions are defined but not used. Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Tested-by: Daniel Drake <drake@endlessm.com> Reviewed-by: Jon Derrick <jonathan.derrick@intel.com> Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com> Cc: Daniel Drake <drake@endlessm.com> Cc: Derrick Jonathan <jonathan.derrick@intel.com> Cc: Jerry Snitselaar <jsnitsel@redhat.com> Link: https://lore.kernel.org/r/20200506015947.28662-2-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/intel-iommu.c291
1 files changed, 1 insertions, 290 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 34e08fa2ce3a..16ba7add0f72 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -355,11 +355,6 @@ static void domain_exit(struct dmar_domain *domain);
static void domain_remove_dev_info(struct dmar_domain *domain);
static void dmar_remove_one_dev_info(struct device *dev);
static void __dmar_remove_one_dev_info(struct device_domain_info *info);
-static void domain_context_clear(struct intel_iommu *iommu,
- struct device *dev);
-static int domain_detach_iommu(struct dmar_domain *domain,
- struct intel_iommu *iommu);
-static bool device_is_rmrr_locked(struct device *dev);
static int intel_iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -1930,65 +1925,6 @@ static inline int guestwidth_to_adjustwidth(int gaw)
return agaw;
}
-static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
- int guest_width)
-{
- int adjust_width, agaw;
- unsigned long sagaw;
- int ret;
-
- init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
-
- if (!intel_iommu_strict) {
- ret = init_iova_flush_queue(&domain->iovad,
- iommu_flush_iova, iova_entry_free);
- if (ret)
- pr_info("iova flush queue initialization failed\n");
- }
-
- domain_reserve_special_ranges(domain);
-
- /* calculate AGAW */
- if (guest_width > cap_mgaw(iommu->cap))
- guest_width = cap_mgaw(iommu->cap);
- domain->gaw = guest_width;
- adjust_width = guestwidth_to_adjustwidth(guest_width);
- agaw = width_to_agaw(adjust_width);
- sagaw = cap_sagaw(iommu->cap);
- if (!test_bit(agaw, &sagaw)) {
- /* hardware doesn't support it, choose a bigger one */
- pr_debug("Hardware doesn't support agaw %d\n", agaw);
- agaw = find_next_bit(&sagaw, 5, agaw);
- if (agaw >= 5)
- return -ENODEV;
- }
- domain->agaw = agaw;
-
- if (ecap_coherent(iommu->ecap))
- domain->iommu_coherency = 1;
- else
- domain->iommu_coherency = 0;
-
- if (ecap_sc_support(iommu->ecap))
- domain->iommu_snooping = 1;
- else
- domain->iommu_snooping = 0;
-
- if (intel_iommu_superpage)
- domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
- else
- domain->iommu_superpage = 0;
-
- domain->nid = iommu->node;
-
- /* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
- if (!domain->pgd)
- return -ENOMEM;
- __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
- return 0;
-}
-
static void domain_exit(struct dmar_domain *domain)
{
@@ -2704,94 +2640,6 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
return domain;
}
-static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
-{
- *(u16 *)opaque = alias;
- return 0;
-}
-
-static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
-{
- struct device_domain_info *info;
- struct dmar_domain *domain = NULL;
- struct intel_iommu *iommu;
- u16 dma_alias;
- unsigned long flags;
- u8 bus, devfn;
-
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return NULL;
-
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
-
- pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
-
- spin_lock_irqsave(&device_domain_lock, flags);
- info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
- PCI_BUS_NUM(dma_alias),
- dma_alias & 0xff);
- if (info) {
- iommu = info->iommu;
- domain = info->domain;
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
- /* DMA alias already has a domain, use it */
- if (info)
- goto out;
- }
-
- /* Allocate and initialize new domain for the device */
- domain = alloc_domain(0);
- if (!domain)
- return NULL;
- if (domain_init(domain, iommu, gaw)) {
- domain_exit(domain);
- return NULL;
- }
-
-out:
- return domain;
-}
-
-static struct dmar_domain *set_domain_for_dev(struct device *dev,
- struct dmar_domain *domain)
-{
- struct intel_iommu *iommu;
- struct dmar_domain *tmp;
- u16 req_id, dma_alias;
- u8 bus, devfn;
-
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return NULL;
-
- req_id = ((u16)bus << 8) | devfn;
-
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
-
- pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
-
- /* register PCI DMA alias device */
- if (req_id != dma_alias) {
- tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
- dma_alias & 0xff, NULL, domain);
-
- if (!tmp || tmp != domain)
- return tmp;
- }
- }
-
- tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
- if (!tmp || tmp != domain)
- return tmp;
-
- return domain;
-}
-
static int iommu_domain_identity_map(struct dmar_domain *domain,
unsigned long long start,
unsigned long long end)
@@ -2817,45 +2665,6 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
DMA_PTE_READ|DMA_PTE_WRITE);
}
-static int domain_prepare_identity_map(struct device *dev,
- struct dmar_domain *domain,
- unsigned long long start,
- unsigned long long end)
-{
- /* For _hardware_ passthrough, don't bother. But for software
- passthrough, we do it anyway -- it may indicate a memory
- range which is reserved in E820, so which didn't get set
- up to start with in si_domain */
- if (domain == si_domain && hw_pass_through) {
- dev_warn(dev, "Ignoring identity map for HW passthrough [0x%Lx - 0x%Lx]\n",
- start, end);
- return 0;
- }
-
- dev_info(dev, "Setting identity map [0x%Lx - 0x%Lx]\n", start, end);
-
- if (end < start) {
- WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- return -EIO;
- }
-
- if (end >> agaw_to_width(domain->agaw)) {
- WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
- "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
- agaw_to_width(domain->agaw),
- dmi_get_system_info(DMI_BIOS_VENDOR),
- dmi_get_system_info(DMI_BIOS_VERSION),
- dmi_get_system_info(DMI_PRODUCT_VERSION));
- return -EIO;
- }
-
- return iommu_domain_identity_map(domain, start, end);
-}
-
static int md_domain_init(struct dmar_domain *domain, int guest_width);
static int __init si_domain_init(int hw)
@@ -3531,98 +3340,16 @@ static unsigned long intel_alloc_iova(struct device *dev,
return iova_pfn;
}
-static struct dmar_domain *get_private_domain_for_dev(struct device *dev)
-{
- struct dmar_domain *domain, *tmp;
- struct dmar_rmrr_unit *rmrr;
- struct device *i_dev;
- int i, ret;
-
- /* Device shouldn't be attached by any domains. */
- domain = find_domain(dev);
- if (domain)
- return NULL;
-
- domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
- if (!domain)
- goto out;
-
- /* We have a new domain - setup possible RMRRs for the device */
- rcu_read_lock();
- for_each_rmrr_units(rmrr) {
- for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
- i, i_dev) {
- if (i_dev != dev)
- continue;
-
- ret = domain_prepare_identity_map(dev, domain,
- rmrr->base_address,
- rmrr->end_address);
- if (ret)
- dev_err(dev, "Mapping reserved region failed\n");
- }
- }
- rcu_read_unlock();
-
- tmp = set_domain_for_dev(dev, domain);
- if (!tmp || domain != tmp) {
- domain_exit(domain);
- domain = tmp;
- }
-
-out:
- if (!domain)
- dev_err(dev, "Allocating domain failed\n");
- else
- domain->domain.type = IOMMU_DOMAIN_DMA;
-
- return domain;
-}
-
/* Check if the dev needs to go through non-identity map and unmap process.*/
static bool iommu_need_mapping(struct device *dev)
{
- int ret;
-
if (iommu_dummy(dev))
return false;
if (unlikely(attach_deferred(dev)))
do_deferred_attach(dev);
- ret = identity_mapping(dev);
- if (ret) {
- u64 dma_mask = *dev->dma_mask;
-
- if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask)
- dma_mask = dev->coherent_dma_mask;
-
- if (dma_mask >= dma_direct_get_required_mask(dev))
- return false;
-
- /*
- * 32 bit DMA is removed from si_domain and fall back to
- * non-identity mapping.
- */
- dmar_remove_one_dev_info(dev);
- ret = iommu_request_dma_domain_for_dev(dev);
- if (ret) {
- struct iommu_domain *domain;
- struct dmar_domain *dmar_domain;
-
- domain = iommu_get_domain_for_dev(dev);
- if (domain) {
- dmar_domain = to_dmar_domain(domain);
- dmar_domain->flags |= DOMAIN_FLAG_LOSE_CHILDREN;
- }
- dmar_remove_one_dev_info(dev);
- get_private_domain_for_dev(dev);
- }
-
- dev_info(dev, "32bit DMA uses non-identity mapping\n");
- }
-
- return true;
+ return !identity_mapping(dev);
}
static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
@@ -5186,16 +4913,6 @@ int __init intel_iommu_init(void)
}
up_write(&dmar_global_lock);
-#if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB)
- /*
- * If the system has no untrusted device or the user has decided
- * to disable the bounce page mechanisms, we don't need swiotlb.
- * Mark this and the pre-allocated bounce pages will be released
- * later.
- */
- if (!has_untrusted_dev() || intel_no_bounce)
- swiotlb = 0;
-#endif
dma_ops = &intel_dma_ops;
init_iommu_pm_ops();
@@ -5296,12 +5013,6 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
domain_detach_iommu(domain, iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
- /* free the private domain */
- if (domain->flags & DOMAIN_FLAG_LOSE_CHILDREN &&
- !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) &&
- list_empty(&domain->devices))
- domain_exit(info->domain);
-
free_devinfo_mem(info);
}