summaryrefslogtreecommitdiff
path: root/drivers/iommu/intel/pasid.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/intel/pasid.c')
-rw-r--r--drivers/iommu/intel/pasid.c555
1 files changed, 418 insertions, 137 deletions
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 11f0b856d74c..ac67a056b6c8 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -20,6 +20,7 @@
#include "iommu.h"
#include "pasid.h"
+#include "../iommu-pages.h"
/*
* Intel IOMMU system wide PASID name space:
@@ -38,7 +39,7 @@ int intel_pasid_alloc_table(struct device *dev)
{
struct device_domain_info *info;
struct pasid_table *pasid_table;
- struct page *pages;
+ struct pasid_dir_entry *dir;
u32 max_pasid = 0;
int order, size;
@@ -59,15 +60,14 @@ int intel_pasid_alloc_table(struct device *dev)
size = max_pasid >> (PASID_PDE_SHIFT - 3);
order = size ? get_order(size) : 0;
- pages = alloc_pages_node(info->iommu->node,
- GFP_KERNEL | __GFP_ZERO, order);
- if (!pages) {
+ dir = iommu_alloc_pages_node_sz(info->iommu->node, GFP_KERNEL,
+ 1 << (order + PAGE_SHIFT));
+ if (!dir) {
kfree(pasid_table);
return -ENOMEM;
}
- pasid_table->table = page_address(pages);
- pasid_table->order = order;
+ pasid_table->table = dir;
pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
info->pasid_table = pasid_table;
@@ -97,10 +97,10 @@ void intel_pasid_free_table(struct device *dev)
max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT;
for (i = 0; i < max_pde; i++) {
table = get_pasid_table_from_pde(&dir[i]);
- free_pgtable_page(table);
+ iommu_free_pages(table);
}
- free_pages((unsigned long)pasid_table->table, pasid_table->order);
+ iommu_free_pages(pasid_table->table);
kfree(pasid_table);
}
@@ -146,7 +146,10 @@ static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
retry:
entries = get_pasid_table_from_pde(&dir[dir_index]);
if (!entries) {
- entries = alloc_pgtable_page(info->iommu->node, GFP_ATOMIC);
+ u64 tmp;
+
+ entries = iommu_alloc_pages_node_sz(info->iommu->node,
+ GFP_ATOMIC, SZ_4K);
if (!entries)
return NULL;
@@ -156,9 +159,10 @@ retry:
* clear. However, this entry might be populated by others
* while we are preparing it. Use theirs with a retry.
*/
- if (cmpxchg64(&dir[dir_index].val, 0ULL,
- (u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) {
- free_pgtable_page(entries);
+ tmp = 0ULL;
+ if (!try_cmpxchg64(&dir[dir_index].val, &tmp,
+ (u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) {
+ iommu_free_pages(entries);
goto retry;
}
if (!ecap_coherent(info->iommu->ecap)) {
@@ -217,7 +221,7 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
if (pci_dev_is_disconnected(to_pci_dev(dev)))
return;
- sid = info->bus << 8 | info->devfn;
+ sid = PCI_DEVID(info->bus, info->devfn);
qdep = info->ats_qdep;
pfsid = info->pfsid;
@@ -241,8 +245,28 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid);
- if (WARN_ON(!pte) || !pasid_pte_is_present(pte)) {
+ if (WARN_ON(!pte)) {
+ spin_unlock(&iommu->lock);
+ return;
+ }
+
+ if (!pasid_pte_is_present(pte)) {
+ if (!pasid_pte_is_fault_disabled(pte)) {
+ WARN_ON(READ_ONCE(pte->val[0]) != 0);
+ spin_unlock(&iommu->lock);
+ return;
+ }
+
+ /*
+ * When a PASID is used for SVA by a device, it's possible
+ * that the pasid entry is non-present with the Fault
+ * Processing Disabled bit set. Clear the pasid entry and
+ * drain the PRQ for the PASID before return.
+ */
+ pasid_clear_entry(pte);
spin_unlock(&iommu->lock);
+ intel_iommu_drain_pasid_prq(dev, pasid);
+
return;
}
@@ -261,9 +285,9 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev,
else
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
- /* Device IOTLB doesn't need to be flushed in caching mode. */
- if (!cap_caching_mode(iommu->cap))
- devtlb_invalidation_with_pasid(iommu, dev, pasid);
+ devtlb_invalidation_with_pasid(iommu, dev, pasid);
+ if (!fault_ignore)
+ intel_iommu_drain_pasid_prq(dev, pasid);
}
/*
@@ -286,9 +310,68 @@ static void pasid_flush_caches(struct intel_iommu *iommu,
}
/*
+ * This function is supposed to be used after caller updates the fields
+ * except for the SSADE and P bit of a pasid table entry. It does the
+ * below:
+ * - Flush cacheline if needed
+ * - Flush the caches per Table 28 ”Guidance to Software for Invalidations“
+ * of VT-d spec 5.0.
+ */
+static void intel_pasid_flush_present(struct intel_iommu *iommu,
+ struct device *dev,
+ u32 pasid, u16 did,
+ struct pasid_entry *pte)
+{
+ if (!ecap_coherent(iommu->ecap))
+ clflush_cache_range(pte, sizeof(*pte));
+
+ /*
+ * VT-d spec 5.0 table28 states guides for cache invalidation:
+ *
+ * - PASID-selective-within-Domain PASID-cache invalidation
+ * - PASID-selective PASID-based IOTLB invalidation
+ * - If (pasid is RID_PASID)
+ * - Global Device-TLB invalidation to affected functions
+ * Else
+ * - PASID-based Device-TLB invalidation (with S=1 and
+ * Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions
+ */
+ pasid_cache_invalidation_with_pasid(iommu, did, pasid);
+ qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
+
+ devtlb_invalidation_with_pasid(iommu, dev, pasid);
+}
+
+/*
* Set up the scalable mode pasid table entry for first only
* translation type.
*/
+static void pasid_pte_config_first_level(struct intel_iommu *iommu,
+ struct pasid_entry *pte,
+ pgd_t *pgd, u16 did, int flags)
+{
+ lockdep_assert_held(&iommu->lock);
+
+ pasid_clear_entry(pte);
+
+ /* Setup the first level page table pointer: */
+ pasid_set_flptr(pte, (u64)__pa(pgd));
+
+ if (flags & PASID_FLAG_FL5LP)
+ pasid_set_flpm(pte, 1);
+
+ if (flags & PASID_FLAG_PAGE_SNOOP)
+ pasid_set_pgsnp(pte);
+
+ pasid_set_domain_id(pte, did);
+ pasid_set_address_width(pte, iommu->agaw);
+ pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+
+ /* Setup Present and PASID Granular Transfer Type: */
+ pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
+ pasid_set_present(pte);
+}
+
int intel_pasid_setup_first_level(struct intel_iommu *iommu,
struct device *dev, pgd_t *pgd,
u32 pasid, u16 did, int flags)
@@ -319,54 +402,82 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
return -EBUSY;
}
- pasid_clear_entry(pte);
+ pasid_pte_config_first_level(iommu, pte, pgd, did, flags);
- /* Setup the first level page table pointer: */
- pasid_set_flptr(pte, (u64)__pa(pgd));
+ spin_unlock(&iommu->lock);
- if (flags & PASID_FLAG_FL5LP)
- pasid_set_flpm(pte, 1);
+ pasid_flush_caches(iommu, pte, pasid, did);
- if (flags & PASID_FLAG_PAGE_SNOOP)
- pasid_set_pgsnp(pte);
+ return 0;
+}
- pasid_set_domain_id(pte, did);
- pasid_set_address_width(pte, iommu->agaw);
- pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
- pasid_set_nxe(pte);
+int intel_pasid_replace_first_level(struct intel_iommu *iommu,
+ struct device *dev, pgd_t *pgd,
+ u32 pasid, u16 did, u16 old_did,
+ int flags)
+{
+ struct pasid_entry *pte, new_pte;
- /* Setup Present and PASID Granular Transfer Type: */
- pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
- pasid_set_present(pte);
+ if (!ecap_flts(iommu->ecap)) {
+ pr_err("No first level translation support on %s\n",
+ iommu->name);
+ return -EINVAL;
+ }
+
+ if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) {
+ pr_err("No 5-level paging support for first-level on %s\n",
+ iommu->name);
+ return -EINVAL;
+ }
+
+ pasid_pte_config_first_level(iommu, &new_pte, pgd, did, flags);
+
+ spin_lock(&iommu->lock);
+ pte = intel_pasid_get_entry(dev, pasid);
+ if (!pte) {
+ spin_unlock(&iommu->lock);
+ return -ENODEV;
+ }
+
+ if (!pasid_pte_is_present(pte)) {
+ spin_unlock(&iommu->lock);
+ return -EINVAL;
+ }
+
+ WARN_ON(old_did != pasid_get_domain_id(pte));
+
+ *pte = new_pte;
spin_unlock(&iommu->lock);
- pasid_flush_caches(iommu, pte, pasid, did);
+ intel_pasid_flush_present(iommu, dev, pasid, old_did, pte);
+ intel_iommu_drain_pasid_prq(dev, pasid);
return 0;
}
/*
- * Skip top levels of page tables for iommu which has less agaw
- * than default. Unnecessary for PT mode.
+ * Set up the scalable mode pasid entry for second only translation type.
*/
-static int iommu_skip_agaw(struct dmar_domain *domain,
- struct intel_iommu *iommu,
- struct dma_pte **pgd)
+static void pasid_pte_config_second_level(struct intel_iommu *iommu,
+ struct pasid_entry *pte,
+ u64 pgd_val, int agaw, u16 did,
+ bool dirty_tracking)
{
- int agaw;
+ lockdep_assert_held(&iommu->lock);
- for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
- *pgd = phys_to_virt(dma_pte_addr(*pgd));
- if (!dma_pte_present(*pgd))
- return -EINVAL;
- }
+ pasid_clear_entry(pte);
+ pasid_set_domain_id(pte, did);
+ pasid_set_slptr(pte, pgd_val);
+ pasid_set_address_width(pte, agaw);
+ pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY);
+ pasid_set_fault_enable(pte);
+ pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+ if (dirty_tracking)
+ pasid_set_ssade(pte);
- return agaw;
+ pasid_set_present(pte);
}
-/*
- * Set up the scalable mode pasid entry for second only translation type.
- */
int intel_pasid_setup_second_level(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev, u32 pasid)
@@ -374,7 +485,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
struct pasid_entry *pte;
struct dma_pte *pgd;
u64 pgd_val;
- int agaw;
u16 did;
/*
@@ -388,15 +498,58 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
}
pgd = domain->pgd;
- agaw = iommu_skip_agaw(domain, iommu, &pgd);
- if (agaw < 0) {
- dev_err(dev, "Invalid domain page table\n");
+ pgd_val = virt_to_phys(pgd);
+ did = domain_id_iommu(domain, iommu);
+
+ spin_lock(&iommu->lock);
+ pte = intel_pasid_get_entry(dev, pasid);
+ if (!pte) {
+ spin_unlock(&iommu->lock);
+ return -ENODEV;
+ }
+
+ if (pasid_pte_is_present(pte)) {
+ spin_unlock(&iommu->lock);
+ return -EBUSY;
+ }
+
+ pasid_pte_config_second_level(iommu, pte, pgd_val, domain->agaw,
+ did, domain->dirty_tracking);
+ spin_unlock(&iommu->lock);
+
+ pasid_flush_caches(iommu, pte, pasid, did);
+
+ return 0;
+}
+
+int intel_pasid_replace_second_level(struct intel_iommu *iommu,
+ struct dmar_domain *domain,
+ struct device *dev, u16 old_did,
+ u32 pasid)
+{
+ struct pasid_entry *pte, new_pte;
+ struct dma_pte *pgd;
+ u64 pgd_val;
+ u16 did;
+
+ /*
+ * If hardware advertises no support for second level
+ * translation, return directly.
+ */
+ if (!ecap_slts(iommu->ecap)) {
+ pr_err("No second level translation support on %s\n",
+ iommu->name);
return -EINVAL;
}
+ pgd = domain->pgd;
pgd_val = virt_to_phys(pgd);
did = domain_id_iommu(domain, iommu);
+ pasid_pte_config_second_level(iommu, &new_pte, pgd_val,
+ domain->agaw, did,
+ domain->dirty_tracking);
+
spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid);
if (!pte) {
@@ -404,25 +557,18 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
return -ENODEV;
}
- if (pasid_pte_is_present(pte)) {
+ if (!pasid_pte_is_present(pte)) {
spin_unlock(&iommu->lock);
- return -EBUSY;
+ return -EINVAL;
}
- pasid_clear_entry(pte);
- pasid_set_domain_id(pte, did);
- pasid_set_slptr(pte, pgd_val);
- pasid_set_address_width(pte, agaw);
- pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY);
- pasid_set_fault_enable(pte);
- pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
- if (domain->dirty_tracking)
- pasid_set_ssade(pte);
+ WARN_ON(old_did != pasid_get_domain_id(pte));
- pasid_set_present(pte);
+ *pte = new_pte;
spin_unlock(&iommu->lock);
- pasid_flush_caches(iommu, pte, pasid, did);
+ intel_pasid_flush_present(iommu, dev, pasid, old_did, pte);
+ intel_iommu_drain_pasid_prq(dev, pasid);
return 0;
}
@@ -491,9 +637,7 @@ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
- /* Device IOTLB doesn't need to be flushed in caching mode. */
- if (!cap_caching_mode(iommu->cap))
- devtlb_invalidation_with_pasid(iommu, dev, pasid);
+ devtlb_invalidation_with_pasid(iommu, dev, pasid);
return 0;
}
@@ -501,6 +645,20 @@ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu,
/*
* Set up the scalable mode pasid entry for passthrough translation type.
*/
+static void pasid_pte_config_pass_through(struct intel_iommu *iommu,
+ struct pasid_entry *pte, u16 did)
+{
+ lockdep_assert_held(&iommu->lock);
+
+ pasid_clear_entry(pte);
+ pasid_set_domain_id(pte, did);
+ pasid_set_address_width(pte, iommu->agaw);
+ pasid_set_translation_type(pte, PASID_ENTRY_PGTT_PT);
+ pasid_set_fault_enable(pte);
+ pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+ pasid_set_present(pte);
+}
+
int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
struct device *dev, u32 pasid)
{
@@ -519,13 +677,7 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
return -EBUSY;
}
- pasid_clear_entry(pte);
- pasid_set_domain_id(pte, did);
- pasid_set_address_width(pte, iommu->agaw);
- pasid_set_translation_type(pte, PASID_ENTRY_PGTT_PT);
- pasid_set_fault_enable(pte);
- pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
- pasid_set_present(pte);
+ pasid_pte_config_pass_through(iommu, pte, did);
spin_unlock(&iommu->lock);
pasid_flush_caches(iommu, pte, pasid, did);
@@ -533,6 +685,38 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
return 0;
}
+int intel_pasid_replace_pass_through(struct intel_iommu *iommu,
+ struct device *dev, u16 old_did,
+ u32 pasid)
+{
+ struct pasid_entry *pte, new_pte;
+ u16 did = FLPT_DEFAULT_DID;
+
+ pasid_pte_config_pass_through(iommu, &new_pte, did);
+
+ spin_lock(&iommu->lock);
+ pte = intel_pasid_get_entry(dev, pasid);
+ if (!pte) {
+ spin_unlock(&iommu->lock);
+ return -ENODEV;
+ }
+
+ if (!pasid_pte_is_present(pte)) {
+ spin_unlock(&iommu->lock);
+ return -EINVAL;
+ }
+
+ WARN_ON(old_did != pasid_get_domain_id(pte));
+
+ *pte = new_pte;
+ spin_unlock(&iommu->lock);
+
+ intel_pasid_flush_present(iommu, dev, pasid, old_did, pte);
+ intel_iommu_drain_pasid_prq(dev, pasid);
+
+ return 0;
+}
+
/*
* Set the page snoop control for a pasid entry which has been set up.
*/
@@ -553,26 +737,47 @@ void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
did = pasid_get_domain_id(pte);
spin_unlock(&iommu->lock);
- if (!ecap_coherent(iommu->ecap))
- clflush_cache_range(pte, sizeof(*pte));
+ intel_pasid_flush_present(iommu, dev, pasid, did, pte);
+}
- /*
- * VT-d spec 3.4 table23 states guides for cache invalidation:
- *
- * - PASID-selective-within-Domain PASID-cache invalidation
- * - PASID-selective PASID-based IOTLB invalidation
- * - If (pasid is RID_PASID)
- * - Global Device-TLB invalidation to affected functions
- * Else
- * - PASID-based Device-TLB invalidation (with S=1 and
- * Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions
- */
- pasid_cache_invalidation_with_pasid(iommu, did, pasid);
- qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
+static void pasid_pte_config_nestd(struct intel_iommu *iommu,
+ struct pasid_entry *pte,
+ struct iommu_hwpt_vtd_s1 *s1_cfg,
+ struct dmar_domain *s2_domain,
+ u16 did)
+{
+ struct dma_pte *pgd = s2_domain->pgd;
+
+ lockdep_assert_held(&iommu->lock);
- /* Device IOTLB doesn't need to be flushed in caching mode. */
- if (!cap_caching_mode(iommu->cap))
- devtlb_invalidation_with_pasid(iommu, dev, pasid);
+ pasid_clear_entry(pte);
+
+ if (s1_cfg->addr_width == ADDR_WIDTH_5LEVEL)
+ pasid_set_flpm(pte, 1);
+
+ pasid_set_flptr(pte, s1_cfg->pgtbl_addr);
+
+ if (s1_cfg->flags & IOMMU_VTD_S1_SRE) {
+ pasid_set_sre(pte);
+ if (s1_cfg->flags & IOMMU_VTD_S1_WPE)
+ pasid_set_wpe(pte);
+ }
+
+ if (s1_cfg->flags & IOMMU_VTD_S1_EAFE)
+ pasid_set_eafe(pte);
+
+ if (s2_domain->force_snooping)
+ pasid_set_pgsnp(pte);
+
+ pasid_set_slptr(pte, virt_to_phys(pgd));
+ pasid_set_fault_enable(pte);
+ pasid_set_domain_id(pte, did);
+ pasid_set_address_width(pte, s2_domain->agaw);
+ pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+ if (s2_domain->dirty_tracking)
+ pasid_set_ssade(pte);
+ pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED);
+ pasid_set_present(pte);
}
/**
@@ -590,10 +795,8 @@ int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
u32 pasid, struct dmar_domain *domain)
{
struct iommu_hwpt_vtd_s1 *s1_cfg = &domain->s1_cfg;
- pgd_t *s1_gpgd = (pgd_t *)(uintptr_t)domain->s1_pgtbl;
struct dmar_domain *s2_domain = domain->s2_domain;
u16 did = domain_id_iommu(domain, iommu);
- struct dma_pte *pgd = s2_domain->pgd;
struct pasid_entry *pte;
/* Address width should match the address width supported by hardware */
@@ -636,37 +839,73 @@ int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
return -EBUSY;
}
- pasid_clear_entry(pte);
+ pasid_pte_config_nestd(iommu, pte, s1_cfg, s2_domain, did);
+ spin_unlock(&iommu->lock);
- if (s1_cfg->addr_width == ADDR_WIDTH_5LEVEL)
- pasid_set_flpm(pte, 1);
+ pasid_flush_caches(iommu, pte, pasid, did);
- pasid_set_flptr(pte, (uintptr_t)s1_gpgd);
+ return 0;
+}
- if (s1_cfg->flags & IOMMU_VTD_S1_SRE) {
- pasid_set_sre(pte);
- if (s1_cfg->flags & IOMMU_VTD_S1_WPE)
- pasid_set_wpe(pte);
+int intel_pasid_replace_nested(struct intel_iommu *iommu,
+ struct device *dev, u32 pasid,
+ u16 old_did, struct dmar_domain *domain)
+{
+ struct iommu_hwpt_vtd_s1 *s1_cfg = &domain->s1_cfg;
+ struct dmar_domain *s2_domain = domain->s2_domain;
+ u16 did = domain_id_iommu(domain, iommu);
+ struct pasid_entry *pte, new_pte;
+
+ /* Address width should match the address width supported by hardware */
+ switch (s1_cfg->addr_width) {
+ case ADDR_WIDTH_4LEVEL:
+ break;
+ case ADDR_WIDTH_5LEVEL:
+ if (!cap_fl5lp_support(iommu->cap)) {
+ dev_err_ratelimited(dev,
+ "5-level paging not supported\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err_ratelimited(dev, "Invalid stage-1 address width %d\n",
+ s1_cfg->addr_width);
+ return -EINVAL;
}
- if (s1_cfg->flags & IOMMU_VTD_S1_EAFE)
- pasid_set_eafe(pte);
+ if ((s1_cfg->flags & IOMMU_VTD_S1_SRE) && !ecap_srs(iommu->ecap)) {
+ pr_err_ratelimited("No supervisor request support on %s\n",
+ iommu->name);
+ return -EINVAL;
+ }
- if (s2_domain->force_snooping)
- pasid_set_pgsnp(pte);
+ if ((s1_cfg->flags & IOMMU_VTD_S1_EAFE) && !ecap_eafs(iommu->ecap)) {
+ pr_err_ratelimited("No extended access flag support on %s\n",
+ iommu->name);
+ return -EINVAL;
+ }
- pasid_set_slptr(pte, virt_to_phys(pgd));
- pasid_set_fault_enable(pte);
- pasid_set_domain_id(pte, did);
- pasid_set_address_width(pte, s2_domain->agaw);
- pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
- if (s2_domain->dirty_tracking)
- pasid_set_ssade(pte);
- pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED);
- pasid_set_present(pte);
+ pasid_pte_config_nestd(iommu, &new_pte, s1_cfg, s2_domain, did);
+
+ spin_lock(&iommu->lock);
+ pte = intel_pasid_get_entry(dev, pasid);
+ if (!pte) {
+ spin_unlock(&iommu->lock);
+ return -ENODEV;
+ }
+
+ if (!pasid_pte_is_present(pte)) {
+ spin_unlock(&iommu->lock);
+ return -EINVAL;
+ }
+
+ WARN_ON(old_did != pasid_get_domain_id(pte));
+
+ *pte = new_pte;
spin_unlock(&iommu->lock);
- pasid_flush_caches(iommu, pte, pasid, did);
+ intel_pasid_flush_present(iommu, dev, pasid, old_did, pte);
+ intel_iommu_drain_pasid_prq(dev, pasid);
return 0;
}
@@ -681,6 +920,7 @@ static void device_pasid_table_teardown(struct device *dev, u8 bus, u8 devfn)
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
struct context_entry *context;
+ u16 did;
spin_lock(&iommu->lock);
context = iommu_context_addr(iommu, bus, devfn, false);
@@ -689,28 +929,11 @@ static void device_pasid_table_teardown(struct device *dev, u8 bus, u8 devfn)
return;
}
+ did = context_domain_id(context);
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
spin_unlock(&iommu->lock);
-
- /*
- * Cache invalidation for changes to a scalable-mode context table
- * entry.
- *
- * Section 6.5.3.3 of the VT-d spec:
- * - Device-selective context-cache invalidation;
- * - Domain-selective PASID-cache invalidation to affected domains
- * (can be skipped if all PASID entries were not-present);
- * - Domain-selective IOTLB invalidation to affected domains;
- * - Global Device-TLB invalidation to affected functions.
- *
- * The iommu has been parked in the blocking state. All domains have
- * been detached from the device or PASID. The PASID and IOTLB caches
- * have been invalidated during the domain detach path.
- */
- iommu->flush.flush_context(iommu, 0, PCI_DEVID(bus, devfn),
- DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL);
- devtlb_invalidation_with_pasid(iommu, dev, IOMMU_NO_PASID);
+ intel_context_flush_no_pasid(info, context, did);
}
static int pci_pasid_table_teardown(struct pci_dev *pdev, u16 alias, void *data)
@@ -768,10 +991,10 @@ static int context_entry_set_pasid_table(struct context_entry *context,
if (info->ats_supported)
context_set_sm_dte(context);
- if (info->pri_supported)
- context_set_sm_pre(context);
if (info->pasid_supported)
context_set_pasid(context);
+ if (info->pri_supported)
+ context_set_sm_pre(context);
context_set_fault_enable(context);
context_set_present(context);
@@ -872,3 +1095,61 @@ int intel_pasid_setup_sm_context(struct device *dev)
return pci_for_each_dma_alias(to_pci_dev(dev), pci_pasid_table_setup, dev);
}
+
+/*
+ * Global Device-TLB invalidation following changes in a context entry which
+ * was present.
+ */
+static void __context_flush_dev_iotlb(struct device_domain_info *info)
+{
+ if (!info->ats_enabled)
+ return;
+
+ qi_flush_dev_iotlb(info->iommu, PCI_DEVID(info->bus, info->devfn),
+ info->pfsid, info->ats_qdep, 0, MAX_AGAW_PFN_WIDTH);
+
+ /*
+ * There is no guarantee that the device DMA is stopped when it reaches
+ * here. Therefore, always attempt the extra device TLB invalidation
+ * quirk. The impact on performance is acceptable since this is not a
+ * performance-critical path.
+ */
+ quirk_extra_dev_tlb_flush(info, 0, MAX_AGAW_PFN_WIDTH, IOMMU_NO_PASID,
+ info->ats_qdep);
+}
+
+/*
+ * Cache invalidations after change in a context table entry that was present
+ * according to the Spec 6.5.3.3 (Guidance to Software for Invalidations).
+ * This helper can only be used when IOMMU is working in the legacy mode or
+ * IOMMU is in scalable mode but all PASID table entries of the device are
+ * non-present.
+ */
+void intel_context_flush_no_pasid(struct device_domain_info *info,
+ struct context_entry *context, u16 did)
+{
+ struct intel_iommu *iommu = info->iommu;
+
+ /*
+ * Device-selective context-cache invalidation. The Domain-ID field
+ * of the Context-cache Invalidate Descriptor is ignored by hardware
+ * when operating in scalable mode. Therefore the @did value doesn't
+ * matter in scalable mode.
+ */
+ iommu->flush.flush_context(iommu, did, PCI_DEVID(info->bus, info->devfn),
+ DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL);
+
+ /*
+ * For legacy mode:
+ * - Domain-selective IOTLB invalidation
+ * - Global Device-TLB invalidation to all affected functions
+ */
+ if (!sm_supported(iommu)) {
+ iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
+ __context_flush_dev_iotlb(info);
+
+ return;
+ }
+
+ __context_flush_dev_iotlb(info);
+}