diff options
Diffstat (limited to 'drivers/iommu/amd/iommu.c')
-rw-r--r-- | drivers/iommu/amd/iommu.c | 1751 |
1 files changed, 966 insertions, 785 deletions
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 4283dd8191f0..cd5116d8c3b2 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -18,6 +18,7 @@ #include <linux/scatterlist.h> #include <linux/dma-map-ops.h> #include <linux/dma-direct.h> +#include <linux/idr.h> #include <linux/iommu-helper.h> #include <linux/delay.h> #include <linux/amd-iommu.h> @@ -42,23 +43,16 @@ #include "amd_iommu.h" #include "../dma-iommu.h" #include "../irq_remapping.h" +#include "../iommu-pages.h" #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) -/* IO virtual address start page frame number */ -#define IOVA_START_PFN (1) -#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) - /* Reserved IOVA ranges */ #define MSI_RANGE_START (0xfee00000) #define MSI_RANGE_END (0xfeefffff) #define HT_RANGE_START (0xfd00000000ULL) #define HT_RANGE_END (0xffffffffffULL) -#define DEFAULT_PGTABLE_LEVEL PAGE_MODE_3_LEVEL - -static DEFINE_SPINLOCK(pd_bitmap_lock); - LIST_HEAD(ioapic_map); LIST_HEAD(hpet_map); LIST_HEAD(acpihid_map); @@ -75,9 +69,23 @@ struct iommu_cmd { u32 data[4]; }; +/* + * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap + * to know which ones are already in use. + */ +DEFINE_IDA(pdom_ids); + struct kmem_cache *amd_iommu_irq_cache; -static void detach_device(struct device *dev); +static int amd_iommu_attach_device(struct iommu_domain *dom, + struct device *dev); + +static void set_dte_entry(struct amd_iommu *iommu, + struct iommu_dev_data *dev_data); + +static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid); + +static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid); /**************************************************************************** * @@ -85,9 +93,150 @@ static void detach_device(struct device *dev); * ****************************************************************************/ +static __always_inline void amd_iommu_atomic128_set(__int128 *ptr, __int128 val) +{ + /* + * Note: + * We use arch_cmpxchg128_local() because: + * - Need cmpxchg16b instruction mainly for 128-bit store to DTE + * (not necessary for cmpxchg since this function is already + * protected by a spin_lock for this DTE). + * - Neither need LOCK_PREFIX nor try loop because of the spin_lock. + */ + arch_cmpxchg128_local(ptr, *ptr, val); +} + +static void write_dte_upper128(struct dev_table_entry *ptr, struct dev_table_entry *new) +{ + struct dev_table_entry old; + + old.data128[1] = ptr->data128[1]; + /* + * Preserve DTE_DATA2_INTR_MASK. This needs to be + * done here since it requires to be inside + * spin_lock(&dev_data->dte_lock) context. + */ + new->data[2] &= ~DTE_DATA2_INTR_MASK; + new->data[2] |= old.data[2] & DTE_DATA2_INTR_MASK; + + amd_iommu_atomic128_set(&ptr->data128[1], new->data128[1]); +} + +static void write_dte_lower128(struct dev_table_entry *ptr, struct dev_table_entry *new) +{ + amd_iommu_atomic128_set(&ptr->data128[0], new->data128[0]); +} + +/* + * Note: + * IOMMU reads the entire Device Table entry in a single 256-bit transaction + * but the driver is programming DTE using 2 128-bit cmpxchg. So, the driver + * need to ensure the following: + * - DTE[V|GV] bit is being written last when setting. + * - DTE[V|GV] bit is being written first when clearing. + * + * This function is used only by code, which updates DMA translation part of the DTE. + * So, only consider control bits related to DMA when updating the entry. + */ +static void update_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data, + struct dev_table_entry *new) +{ + unsigned long flags; + struct dev_table_entry *dev_table = get_dev_table(iommu); + struct dev_table_entry *ptr = &dev_table[dev_data->devid]; + + spin_lock_irqsave(&dev_data->dte_lock, flags); + + if (!(ptr->data[0] & DTE_FLAG_V)) { + /* Existing DTE is not valid. */ + write_dte_upper128(ptr, new); + write_dte_lower128(ptr, new); + iommu_flush_dte_sync(iommu, dev_data->devid); + } else if (!(new->data[0] & DTE_FLAG_V)) { + /* Existing DTE is valid. New DTE is not valid. */ + write_dte_lower128(ptr, new); + write_dte_upper128(ptr, new); + iommu_flush_dte_sync(iommu, dev_data->devid); + } else if (!FIELD_GET(DTE_FLAG_GV, ptr->data[0])) { + /* + * Both DTEs are valid. + * Existing DTE has no guest page table. + */ + write_dte_upper128(ptr, new); + write_dte_lower128(ptr, new); + iommu_flush_dte_sync(iommu, dev_data->devid); + } else if (!FIELD_GET(DTE_FLAG_GV, new->data[0])) { + /* + * Both DTEs are valid. + * Existing DTE has guest page table, + * new DTE has no guest page table, + */ + write_dte_lower128(ptr, new); + write_dte_upper128(ptr, new); + iommu_flush_dte_sync(iommu, dev_data->devid); + } else if (FIELD_GET(DTE_GPT_LEVEL_MASK, ptr->data[2]) != + FIELD_GET(DTE_GPT_LEVEL_MASK, new->data[2])) { + /* + * Both DTEs are valid and have guest page table, + * but have different number of levels. So, we need + * to upadte both upper and lower 128-bit value, which + * require disabling and flushing. + */ + struct dev_table_entry clear = {}; + + /* First disable DTE */ + write_dte_lower128(ptr, &clear); + iommu_flush_dte_sync(iommu, dev_data->devid); + + /* Then update DTE */ + write_dte_upper128(ptr, new); + write_dte_lower128(ptr, new); + iommu_flush_dte_sync(iommu, dev_data->devid); + } else { + /* + * Both DTEs are valid and have guest page table, + * and same number of levels. We just need to only + * update the lower 128-bit. So no need to disable DTE. + */ + write_dte_lower128(ptr, new); + } + + spin_unlock_irqrestore(&dev_data->dte_lock, flags); +} + +static void get_dte256(struct amd_iommu *iommu, struct iommu_dev_data *dev_data, + struct dev_table_entry *dte) +{ + unsigned long flags; + struct dev_table_entry *ptr; + struct dev_table_entry *dev_table = get_dev_table(iommu); + + ptr = &dev_table[dev_data->devid]; + + spin_lock_irqsave(&dev_data->dte_lock, flags); + dte->data128[0] = ptr->data128[0]; + dte->data128[1] = ptr->data128[1]; + spin_unlock_irqrestore(&dev_data->dte_lock, flags); +} + static inline bool pdom_is_v2_pgtbl_mode(struct protection_domain *pdom) { - return (pdom && (pdom->flags & PD_IOMMUV2_MASK)); + return (pdom && (pdom->pd_mode == PD_MODE_V2)); +} + +static inline bool pdom_is_in_pt_mode(struct protection_domain *pdom) +{ + return (pdom->domain.type == IOMMU_DOMAIN_IDENTITY); +} + +/* + * We cannot support PASID w/ existing v1 page table in the same domain + * since it will be nested. However, existing domain w/ v2 page table + * or passthrough mode can be used for PASID. + */ +static inline bool pdom_is_sva_capable(struct protection_domain *pdom) +{ + return pdom_is_v2_pgtbl_mode(pdom) || pdom_is_in_pt_mode(pdom); } static inline int get_acpihid_device_id(struct device *dev, @@ -180,11 +329,6 @@ static struct amd_iommu *rlookup_amd_iommu(struct device *dev) return __rlookup_amd_iommu(seg, PCI_SBDF_TO_DEVID(devid)); } -static struct protection_domain *to_pdomain(struct iommu_domain *dom) -{ - return container_of(dom, struct protection_domain, domain); -} - static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid) { struct iommu_dev_data *dev_data; @@ -194,7 +338,8 @@ static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid) if (!dev_data) return NULL; - spin_lock_init(&dev_data->lock); + mutex_init(&dev_data->mutex); + spin_lock_init(&dev_data->dte_lock); dev_data->devid = devid; ratelimit_default_init(&dev_data->rs); @@ -202,7 +347,7 @@ static struct iommu_dev_data *alloc_dev_data(struct amd_iommu *iommu, u16 devid) return dev_data; } -static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid) +struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid) { struct iommu_dev_data *dev_data; struct llist_node *node; @@ -222,9 +367,11 @@ static struct iommu_dev_data *search_dev_data(struct amd_iommu *iommu, u16 devid static int clone_alias(struct pci_dev *pdev, u16 alias, void *data) { + struct dev_table_entry new; struct amd_iommu *iommu; - struct dev_table_entry *dev_table; + struct iommu_dev_data *dev_data, *alias_data; u16 devid = pci_dev_id(pdev); + int ret = 0; if (devid == alias) return 0; @@ -233,13 +380,27 @@ static int clone_alias(struct pci_dev *pdev, u16 alias, void *data) if (!iommu) return 0; - amd_iommu_set_rlookup_table(iommu, alias); - dev_table = get_dev_table(iommu); - memcpy(dev_table[alias].data, - dev_table[devid].data, - sizeof(dev_table[alias].data)); + /* Copy the data from pdev */ + dev_data = dev_iommu_priv_get(&pdev->dev); + if (!dev_data) { + pr_err("%s : Failed to get dev_data for 0x%x\n", __func__, devid); + ret = -EINVAL; + goto out; + } + get_dte256(iommu, dev_data, &new); - return 0; + /* Setup alias */ + alias_data = find_dev_data(iommu, alias); + if (!alias_data) { + pr_err("%s : Failed to get alias dev_data for 0x%x\n", __func__, alias); + ret = -EINVAL; + goto out; + } + update_dte256(iommu, alias_data, &new); + + amd_iommu_set_rlookup_table(iommu, alias); +out: + return ret; } static void clone_aliases(struct amd_iommu *iommu, struct device *dev) @@ -385,7 +546,7 @@ static inline void pdev_disable_cap_ats(struct pci_dev *pdev) } } -int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev) +static inline int pdev_enable_cap_pri(struct pci_dev *pdev) { struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); int ret = -EINVAL; @@ -393,6 +554,9 @@ int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev) if (dev_data->pri_enabled) return 0; + if (!dev_data->ats_enabled) + return 0; + if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) { /* * First reset the PRI state of the device. @@ -409,7 +573,7 @@ int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev) return ret; } -void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev) +static inline void pdev_disable_cap_pri(struct pci_dev *pdev) { struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev); @@ -451,15 +615,14 @@ static void pdev_enable_caps(struct pci_dev *pdev) { pdev_enable_cap_ats(pdev); pdev_enable_cap_pasid(pdev); - amd_iommu_pdev_enable_cap_pri(pdev); - + pdev_enable_cap_pri(pdev); } static void pdev_disable_caps(struct pci_dev *pdev) { pdev_disable_cap_ats(pdev); pdev_disable_cap_pasid(pdev); - amd_iommu_pdev_disable_cap_pri(pdev); + pdev_disable_cap_pri(pdev); } /* @@ -510,6 +673,12 @@ static int iommu_init_device(struct amd_iommu *iommu, struct device *dev) return -ENOMEM; dev_data->dev = dev; + + /* + * The dev_iommu_priv_set() needes to be called before setup_aliases. + * Otherwise, subsequent call to dev_iommu_priv_get() will fail. + */ + dev_iommu_priv_set(dev, dev_data); setup_aliases(iommu, dev); /* @@ -523,8 +692,6 @@ static int iommu_init_device(struct amd_iommu *iommu, struct device *dev) dev_data->flags = pdev_get_caps(to_pci_dev(dev)); } - dev_iommu_priv_set(dev, dev_data); - return 0; } @@ -545,22 +712,6 @@ static void iommu_ignore_device(struct amd_iommu *iommu, struct device *dev) setup_aliases(iommu, dev); } -static void amd_iommu_uninit_device(struct device *dev) -{ - struct iommu_dev_data *dev_data; - - dev_data = dev_iommu_priv_get(dev); - if (!dev_data) - return; - - if (dev_data->domain) - detach_device(dev); - - /* - * We keep dev_data around for unplugged devices and reuse it when the - * device is re-plugged - not doing so would introduce a ton of races. - */ -} /**************************************************************************** * @@ -571,10 +722,13 @@ static void amd_iommu_uninit_device(struct device *dev) static void dump_dte_entry(struct amd_iommu *iommu, u16 devid) { int i; - struct dev_table_entry *dev_table = get_dev_table(iommu); + struct dev_table_entry dte; + struct iommu_dev_data *dev_data = find_dev_data(iommu, devid); + + get_dte256(iommu, dev_data, &dte); for (i = 0; i < 4; ++i) - pr_err("DTE[%d]: %016llx\n", i, dev_table[devid].data[i]); + pr_err("DTE[%d]: %016llx\n", i, dte.data[i]); } static void dump_command(unsigned long phys_addr) @@ -813,63 +967,12 @@ static void iommu_poll_events(struct amd_iommu *iommu) while (head != tail) { iommu_print_event(iommu, iommu->evt_buf + head); - head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE; - } - - writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); -} - -static void iommu_poll_ppr_log(struct amd_iommu *iommu) -{ - u32 head, tail; - - if (iommu->ppr_log == NULL) - return; - - head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); - tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); - - while (head != tail) { - volatile u64 *raw; - u64 entry[2]; - int i; - - raw = (u64 *)(iommu->ppr_log + head); - - /* - * Hardware bug: Interrupt may arrive before the entry is - * written to memory. If this happens we need to wait for the - * entry to arrive. - */ - for (i = 0; i < LOOP_TIMEOUT; ++i) { - if (PPR_REQ_TYPE(raw[0]) != 0) - break; - udelay(1); - } - - /* Avoid memcpy function-call overhead */ - entry[0] = raw[0]; - entry[1] = raw[1]; - - /* - * To detect the hardware errata 733 we need to clear the - * entry back to zero. This issue does not exist on SNP - * enabled system. Also this buffer is not writeable on - * SNP enabled system. - */ - if (!amd_iommu_snp_en) - raw[0] = raw[1] = 0UL; /* Update head pointer of hardware ring-buffer */ - head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; - writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); - - /* TODO: PPR Handler will be added when we add IOPF support */ - - /* Refresh ring-buffer information */ - head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); - tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); + head = (head + EVENT_ENTRY_SIZE) % EVT_BUFFER_SIZE; + writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); } + } #ifdef CONFIG_IRQ_REMAP @@ -992,7 +1095,7 @@ irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data) { amd_iommu_handle_irq(data, "PPR", MMIO_STATUS_PPR_INT_MASK, MMIO_STATUS_PPR_OVERFLOW_MASK, - iommu_poll_ppr_log, amd_iommu_restart_ppr_log); + amd_iommu_poll_ppr_log, amd_iommu_restart_ppr_log); return IRQ_HANDLED; } @@ -1271,7 +1374,7 @@ static int iommu_completion_wait(struct amd_iommu *iommu) if (!iommu->need_sync) return 0; - data = atomic64_add_return(1, &iommu->cmd_sem_val); + data = atomic64_inc_return(&iommu->cmd_sem_val); build_completion_wait(&cmd, iommu, data); raw_spin_lock_irqsave(&iommu->lock, flags); @@ -1288,6 +1391,21 @@ out_unlock: return ret; } +static void domain_flush_complete(struct protection_domain *domain) +{ + struct pdom_iommu_info *pdom_iommu_info; + unsigned long i; + + lockdep_assert_held(&domain->lock); + + /* + * Devices of this domain are behind this IOMMU + * We need to wait for completion of all commands. + */ + xa_for_each(&domain->iommu_array, i, pdom_iommu_info) + iommu_completion_wait(pdom_iommu_info->iommu); +} + static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) { struct iommu_cmd cmd; @@ -1297,6 +1415,15 @@ static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) return iommu_queue_command(iommu, &cmd); } +static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid) +{ + int ret; + + ret = iommu_flush_dte(iommu, devid); + if (!ret) + iommu_completion_wait(iommu); +} + static void amd_iommu_flush_dte_all(struct amd_iommu *iommu) { u32 devid; @@ -1388,14 +1515,9 @@ void amd_iommu_flush_all_caches(struct amd_iommu *iommu) static int device_flush_iotlb(struct iommu_dev_data *dev_data, u64 address, size_t size, ioasid_t pasid, bool gn) { - struct amd_iommu *iommu; + struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); struct iommu_cmd cmd; - int qdep; - - qdep = dev_data->ats_qdep; - iommu = rlookup_amd_iommu(dev_data->dev); - if (!iommu) - return -EINVAL; + int qdep = dev_data->ats_qdep; build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size, pasid, gn); @@ -1415,16 +1537,12 @@ static int device_flush_dte_alias(struct pci_dev *pdev, u16 alias, void *data) */ static int device_flush_dte(struct iommu_dev_data *dev_data) { - struct amd_iommu *iommu; + struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); struct pci_dev *pdev = NULL; struct amd_iommu_pci_seg *pci_seg; u16 alias; int ret; - iommu = rlookup_amd_iommu(dev_data->dev); - if (!iommu) - return -EINVAL; - if (dev_is_pci(dev_data->dev)) pdev = to_pci_dev(dev_data->dev); @@ -1453,34 +1571,70 @@ static int device_flush_dte(struct iommu_dev_data *dev_data) return ret; } -/* - * TLB invalidation function which is called from the mapping functions. - * It invalidates a single PTE if the range to flush is within a single - * page. Otherwise it flushes the whole TLB of the IOMMU. - */ -static void __domain_flush_pages(struct protection_domain *domain, +static int domain_flush_pages_v2(struct protection_domain *pdom, u64 address, size_t size) { struct iommu_dev_data *dev_data; struct iommu_cmd cmd; - int ret = 0, i; - ioasid_t pasid = IOMMU_NO_PASID; - bool gn = false; + int ret = 0; - if (pdom_is_v2_pgtbl_mode(domain)) - gn = true; + lockdep_assert_held(&pdom->lock); + list_for_each_entry(dev_data, &pdom->dev_list, list) { + struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); + u16 domid = dev_data->gcr3_info.domid; - build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, gn); + build_inv_iommu_pages(&cmd, address, size, + domid, IOMMU_NO_PASID, true); - for (i = 0; i < amd_iommu_get_num_iommus(); ++i) { - if (!domain->dev_iommu[i]) - continue; + ret |= iommu_queue_command(iommu, &cmd); + } + + return ret; +} + +static int domain_flush_pages_v1(struct protection_domain *pdom, + u64 address, size_t size) +{ + struct pdom_iommu_info *pdom_iommu_info; + struct iommu_cmd cmd; + int ret = 0; + unsigned long i; + lockdep_assert_held(&pdom->lock); + + build_inv_iommu_pages(&cmd, address, size, + pdom->id, IOMMU_NO_PASID, false); + + xa_for_each(&pdom->iommu_array, i, pdom_iommu_info) { /* * Devices of this domain are behind this IOMMU * We need a TLB flush */ - ret |= iommu_queue_command(amd_iommus[i], &cmd); + ret |= iommu_queue_command(pdom_iommu_info->iommu, &cmd); + } + + return ret; +} + +/* + * TLB invalidation function which is called from the mapping functions. + * It flushes range of PTEs of the domain. + */ +static void __domain_flush_pages(struct protection_domain *domain, + u64 address, size_t size) +{ + struct iommu_dev_data *dev_data; + int ret = 0; + ioasid_t pasid = IOMMU_NO_PASID; + bool gn = false; + + lockdep_assert_held(&domain->lock); + + if (pdom_is_v2_pgtbl_mode(domain)) { + gn = true; + ret = domain_flush_pages_v2(domain, address, size); + } else { + ret = domain_flush_pages_v1(domain, address, size); } list_for_each_entry(dev_data, &domain->dev_list, list) { @@ -1497,11 +1651,13 @@ static void __domain_flush_pages(struct protection_domain *domain, void amd_iommu_domain_flush_pages(struct protection_domain *domain, u64 address, size_t size) { + lockdep_assert_held(&domain->lock); + if (likely(!amd_iommu_np_cache)) { __domain_flush_pages(domain, address, size); /* Wait until IOMMU TLB and all device IOTLB flushes are complete */ - amd_iommu_domain_flush_complete(domain); + domain_flush_complete(domain); return; } @@ -1541,7 +1697,7 @@ void amd_iommu_domain_flush_pages(struct protection_domain *domain, } /* Wait until IOMMU TLB and all device IOTLB flushes are complete */ - amd_iommu_domain_flush_complete(domain); + domain_flush_complete(domain); } /* Flush the whole IO/TLB for a given protection domain - including PDE */ @@ -1551,20 +1707,27 @@ static void amd_iommu_domain_flush_all(struct protection_domain *domain) CMD_INV_IOMMU_ALL_PAGES_ADDRESS); } -void amd_iommu_domain_flush_complete(struct protection_domain *domain) +void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data, + ioasid_t pasid, u64 address, size_t size) { - int i; + struct iommu_cmd cmd; + struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); - for (i = 0; i < amd_iommu_get_num_iommus(); ++i) { - if (domain && !domain->dev_iommu[i]) - continue; + build_inv_iommu_pages(&cmd, address, size, + dev_data->gcr3_info.domid, pasid, true); + iommu_queue_command(iommu, &cmd); - /* - * Devices of this domain are behind this IOMMU - * We need to wait for completion of all commands. - */ - iommu_completion_wait(amd_iommus[i]); - } + if (dev_data->ats_enabled) + device_flush_iotlb(dev_data, address, size, pasid, true); + + iommu_completion_wait(iommu); +} + +static void dev_flush_pasid_all(struct iommu_dev_data *dev_data, + ioasid_t pasid) +{ + amd_iommu_dev_flush_pasid_pages(dev_data, pasid, 0, + CMD_INV_IOMMU_ALL_PAGES_ADDRESS); } /* Flush the not present cache if it exists */ @@ -1584,12 +1747,38 @@ static void domain_flush_np_cache(struct protection_domain *domain, /* * This function flushes the DTEs for all devices in domain */ -static void domain_flush_devices(struct protection_domain *domain) +void amd_iommu_update_and_flush_device_table(struct protection_domain *domain) { struct iommu_dev_data *dev_data; + lockdep_assert_held(&domain->lock); + + list_for_each_entry(dev_data, &domain->dev_list, list) { + struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev); + + set_dte_entry(iommu, dev_data); + clone_aliases(iommu, dev_data->dev); + } + list_for_each_entry(dev_data, &domain->dev_list, list) device_flush_dte(dev_data); + + domain_flush_complete(domain); +} + +int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag) +{ + struct iommu_dev_data *dev_data; + struct amd_iommu *iommu; + struct iommu_cmd cmd; + + dev_data = dev_iommu_priv_get(dev); + iommu = get_amd_iommu_from_dev(dev); + + build_complete_ppr(&cmd, dev_data->devid, pasid, status, + tag, dev_data->pri_tlp); + + return iommu_queue_command(iommu, &cmd); } /**************************************************************************** @@ -1602,28 +1791,14 @@ static void domain_flush_devices(struct protection_domain *domain) * ****************************************************************************/ -static u16 domain_id_alloc(void) +static int pdom_id_alloc(void) { - int id; - - spin_lock(&pd_bitmap_lock); - id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID); - BUG_ON(id == 0); - if (id > 0 && id < MAX_DOMAIN_ID) - __set_bit(id, amd_iommu_pd_alloc_bitmap); - else - id = 0; - spin_unlock(&pd_bitmap_lock); - - return id; + return ida_alloc_range(&pdom_ids, 1, MAX_DOMAIN_ID - 1, GFP_ATOMIC); } -static void domain_id_free(int id) +static void pdom_id_free(int id) { - spin_lock(&pd_bitmap_lock); - if (id > 0 && id < MAX_DOMAIN_ID) - __clear_bit(id, amd_iommu_pd_alloc_bitmap); - spin_unlock(&pd_bitmap_lock); + ida_free(&pdom_ids, id); } static void free_gcr3_tbl_level1(u64 *tbl) @@ -1637,7 +1812,7 @@ static void free_gcr3_tbl_level1(u64 *tbl) ptr = iommu_phys_to_virt(tbl[i] & PAGE_MASK); - free_page((unsigned long)ptr); + iommu_free_page(ptr); } } @@ -1656,16 +1831,22 @@ static void free_gcr3_tbl_level2(u64 *tbl) } } -static void free_gcr3_table(struct protection_domain *domain) +static void free_gcr3_table(struct gcr3_tbl_info *gcr3_info) { - if (domain->glx == 2) - free_gcr3_tbl_level2(domain->gcr3_tbl); - else if (domain->glx == 1) - free_gcr3_tbl_level1(domain->gcr3_tbl); + if (gcr3_info->glx == 2) + free_gcr3_tbl_level2(gcr3_info->gcr3_tbl); + else if (gcr3_info->glx == 1) + free_gcr3_tbl_level1(gcr3_info->gcr3_tbl); else - BUG_ON(domain->glx != 0); + WARN_ON_ONCE(gcr3_info->glx != 0); - free_page((unsigned long)domain->gcr3_tbl); + gcr3_info->glx = 0; + + /* Free per device domain ID */ + pdom_id_free(gcr3_info->domid); + + iommu_free_page(gcr3_info->gcr3_tbl); + gcr3_info->gcr3_tbl = NULL; } /* @@ -1684,100 +1865,224 @@ static int get_gcr3_levels(int pasids) return levels ? (DIV_ROUND_UP(levels, 9) - 1) : levels; } -/* Note: This function expects iommu_domain->lock to be held prior calling the function. */ -static int setup_gcr3_table(struct protection_domain *domain, int pasids) +static int setup_gcr3_table(struct gcr3_tbl_info *gcr3_info, + struct amd_iommu *iommu, int pasids) { int levels = get_gcr3_levels(pasids); + int nid = iommu ? dev_to_node(&iommu->dev->dev) : NUMA_NO_NODE; + int domid; if (levels > amd_iommu_max_glx_val) return -EINVAL; - domain->gcr3_tbl = alloc_pgtable_page(domain->nid, GFP_ATOMIC); - if (domain->gcr3_tbl == NULL) + if (gcr3_info->gcr3_tbl) + return -EBUSY; + + /* Allocate per device domain ID */ + domid = pdom_id_alloc(); + if (domid <= 0) + return -ENOSPC; + gcr3_info->domid = domid; + + gcr3_info->gcr3_tbl = iommu_alloc_page_node(nid, GFP_ATOMIC); + if (gcr3_info->gcr3_tbl == NULL) { + pdom_id_free(domid); return -ENOMEM; + } + + gcr3_info->glx = levels; + + return 0; +} + +static u64 *__get_gcr3_pte(struct gcr3_tbl_info *gcr3_info, + ioasid_t pasid, bool alloc) +{ + int index; + u64 *pte; + u64 *root = gcr3_info->gcr3_tbl; + int level = gcr3_info->glx; + + while (true) { + + index = (pasid >> (9 * level)) & 0x1ff; + pte = &root[index]; - domain->glx = levels; - domain->flags |= PD_IOMMUV2_MASK; + if (level == 0) + break; + + if (!(*pte & GCR3_VALID)) { + if (!alloc) + return NULL; + + root = (void *)get_zeroed_page(GFP_ATOMIC); + if (root == NULL) + return NULL; + + *pte = iommu_virt_to_phys(root) | GCR3_VALID; + } - amd_iommu_domain_update(domain); + root = iommu_phys_to_virt(*pte & PAGE_MASK); + + level -= 1; + } + + return pte; +} + +static int update_gcr3(struct iommu_dev_data *dev_data, + ioasid_t pasid, unsigned long gcr3, bool set) +{ + struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; + u64 *pte; + + pte = __get_gcr3_pte(gcr3_info, pasid, true); + if (pte == NULL) + return -ENOMEM; + if (set) + *pte = (gcr3 & PAGE_MASK) | GCR3_VALID; + else + *pte = 0; + + dev_flush_pasid_all(dev_data, pasid); return 0; } -static void set_dte_entry(struct amd_iommu *iommu, u16 devid, - struct protection_domain *domain, bool ats, bool ppr) +int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid, + unsigned long gcr3) { - u64 pte_root = 0; - u64 flags = 0; - u32 old_domid; - struct dev_table_entry *dev_table = get_dev_table(iommu); + struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; + int ret; - if (domain->iop.mode != PAGE_MODE_NONE) - pte_root = iommu_virt_to_phys(domain->iop.root); + iommu_group_mutex_assert(dev_data->dev); - pte_root |= (domain->iop.mode & DEV_ENTRY_MODE_MASK) - << DEV_ENTRY_MODE_SHIFT; + ret = update_gcr3(dev_data, pasid, gcr3, true); + if (ret) + return ret; - pte_root |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V; + gcr3_info->pasid_cnt++; + return ret; +} - /* - * When SNP is enabled, Only set TV bit when IOMMU - * page translation is in use. - */ - if (!amd_iommu_snp_en || (domain->id != 0)) - pte_root |= DTE_FLAG_TV; +int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid) +{ + struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; + int ret; + + iommu_group_mutex_assert(dev_data->dev); - flags = dev_table[devid].data[1]; + ret = update_gcr3(dev_data, pasid, 0, false); + if (ret) + return ret; - if (ats) - flags |= DTE_FLAG_IOTLB; + gcr3_info->pasid_cnt--; + return ret; +} - if (ppr) - pte_root |= 1ULL << DEV_ENTRY_PPR; +static void make_clear_dte(struct iommu_dev_data *dev_data, struct dev_table_entry *ptr, + struct dev_table_entry *new) +{ + /* All existing DTE must have V bit set */ + new->data128[0] = DTE_FLAG_V; + new->data128[1] = 0; +} - if (domain->dirty_tracking) - pte_root |= DTE_FLAG_HAD; +/* + * Note: + * The old value for GCR3 table and GPT have been cleared from caller. + */ +static void set_dte_gcr3_table(struct amd_iommu *iommu, + struct iommu_dev_data *dev_data, + struct dev_table_entry *target) +{ + struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; + u64 gcr3; - if (domain->flags & PD_IOMMUV2_MASK) { - u64 gcr3 = iommu_virt_to_phys(domain->gcr3_tbl); - u64 glx = domain->glx; - u64 tmp; + if (!gcr3_info->gcr3_tbl) + return; - pte_root |= DTE_FLAG_GV; - pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT; + pr_debug("%s: devid=%#x, glx=%#x, gcr3_tbl=%#llx\n", + __func__, dev_data->devid, gcr3_info->glx, + (unsigned long long)gcr3_info->gcr3_tbl); - /* First mask out possible old values for GCR3 table */ - tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B; - flags &= ~tmp; + gcr3 = iommu_virt_to_phys(gcr3_info->gcr3_tbl); - tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C; - flags &= ~tmp; + target->data[0] |= DTE_FLAG_GV | + FIELD_PREP(DTE_GLX, gcr3_info->glx) | + FIELD_PREP(DTE_GCR3_14_12, gcr3 >> 12); + if (pdom_is_v2_pgtbl_mode(dev_data->domain)) + target->data[0] |= DTE_FLAG_GIOV; - /* Encode GCR3 table into DTE */ - tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A; - pte_root |= tmp; + target->data[1] |= FIELD_PREP(DTE_GCR3_30_15, gcr3 >> 15) | + FIELD_PREP(DTE_GCR3_51_31, gcr3 >> 31); - tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B; - flags |= tmp; + /* Guest page table can only support 4 and 5 levels */ + if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL) + target->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_5_LEVEL); + else + target->data[2] |= FIELD_PREP(DTE_GPT_LEVEL_MASK, GUEST_PGTABLE_4_LEVEL); +} - tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C; - flags |= tmp; +static void set_dte_entry(struct amd_iommu *iommu, + struct iommu_dev_data *dev_data) +{ + u16 domid; + u32 old_domid; + struct dev_table_entry *initial_dte; + struct dev_table_entry new = {}; + struct protection_domain *domain = dev_data->domain; + struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; + struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid]; - if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL) { - dev_table[devid].data[2] |= - ((u64)GUEST_PGTABLE_5_LEVEL << DTE_GPT_LEVEL_SHIFT); - } + if (gcr3_info && gcr3_info->gcr3_tbl) + domid = dev_data->gcr3_info.domid; + else + domid = domain->id; + + make_clear_dte(dev_data, dte, &new); + + if (domain->iop.mode != PAGE_MODE_NONE) + new.data[0] |= iommu_virt_to_phys(domain->iop.root); + + new.data[0] |= (domain->iop.mode & DEV_ENTRY_MODE_MASK) + << DEV_ENTRY_MODE_SHIFT; + + new.data[0] |= DTE_FLAG_IR | DTE_FLAG_IW; - if (domain->flags & PD_GIOV_MASK) - pte_root |= DTE_FLAG_GIOV; + /* + * When SNP is enabled, we can only support TV=1 with non-zero domain ID. + * This is prevented by the SNP-enable and IOMMU_DOMAIN_IDENTITY check in + * do_iommu_domain_alloc(). + */ + WARN_ON(amd_iommu_snp_en && (domid == 0)); + new.data[0] |= DTE_FLAG_TV; + + if (dev_data->ppr) + new.data[0] |= 1ULL << DEV_ENTRY_PPR; + + if (domain->dirty_tracking) + new.data[0] |= DTE_FLAG_HAD; + + if (dev_data->ats_enabled) + new.data[1] |= DTE_FLAG_IOTLB; + + old_domid = READ_ONCE(dte->data[1]) & DEV_DOMID_MASK; + new.data[1] |= domid; + + /* + * Restore cached persistent DTE bits, which can be set by information + * in IVRS table. See set_dev_entry_from_acpi(). + */ + initial_dte = amd_iommu_get_ivhd_dte_flags(iommu->pci_seg->id, dev_data->devid); + if (initial_dte) { + new.data128[0] |= initial_dte->data128[0]; + new.data128[1] |= initial_dte->data128[1]; } - flags &= ~DEV_DOMID_MASK; - flags |= domain->id; + set_dte_gcr3_table(iommu, dev_data, &new); - old_domid = dev_table[devid].data[1] & DEV_DOMID_MASK; - dev_table[devid].data[1] = flags; - dev_table[devid].data[0] = pte_root; + update_dte256(iommu, dev_data, &new); /* * A kdump kernel might be replacing a domain ID that was copied from @@ -1789,76 +2094,143 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid, } } -static void clear_dte_entry(struct amd_iommu *iommu, u16 devid) +/* + * Clear DMA-remap related flags to block all DMA (blockeded domain) + */ +static void clear_dte_entry(struct amd_iommu *iommu, struct iommu_dev_data *dev_data) { - struct dev_table_entry *dev_table = get_dev_table(iommu); + struct dev_table_entry new = {}; + struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid]; - /* remove entry from the device table seen by the hardware */ - dev_table[devid].data[0] = DTE_FLAG_V; + make_clear_dte(dev_data, dte, &new); + update_dte256(iommu, dev_data, &new); +} - if (!amd_iommu_snp_en) - dev_table[devid].data[0] |= DTE_FLAG_TV; +/* Update and flush DTE for the given device */ +static void dev_update_dte(struct iommu_dev_data *dev_data, bool set) +{ + struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev); - dev_table[devid].data[1] &= DTE_FLAG_MASK; + if (set) + set_dte_entry(iommu, dev_data); + else + clear_dte_entry(iommu, dev_data); - amd_iommu_apply_erratum_63(iommu, devid); + clone_aliases(iommu, dev_data->dev); + device_flush_dte(dev_data); + iommu_completion_wait(iommu); } -static void do_attach(struct iommu_dev_data *dev_data, - struct protection_domain *domain) +/* + * If domain is SVA capable then initialize GCR3 table. Also if domain is + * in v2 page table mode then update GCR3[0]. + */ +static int init_gcr3_table(struct iommu_dev_data *dev_data, + struct protection_domain *pdom) { - struct amd_iommu *iommu; - bool ats; - - iommu = rlookup_amd_iommu(dev_data->dev); - if (!iommu) - return; - ats = dev_data->ats_enabled; + struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); + int max_pasids = dev_data->max_pasids; + int ret = 0; - /* Update data structures */ - dev_data->domain = domain; - list_add(&dev_data->list, &domain->dev_list); + /* + * If domain is in pt mode then setup GCR3 table only if device + * is PASID capable + */ + if (pdom_is_in_pt_mode(pdom) && !pdev_pasid_supported(dev_data)) + return ret; - /* Update NUMA Node ID */ - if (domain->nid == NUMA_NO_NODE) - domain->nid = dev_to_node(dev_data->dev); + /* + * By default, setup GCR3 table to support MAX PASIDs + * supported by the device/IOMMU. + */ + ret = setup_gcr3_table(&dev_data->gcr3_info, iommu, + max_pasids > 0 ? max_pasids : 1); + if (ret) + return ret; - /* Do reference counting */ - domain->dev_iommu[iommu->index] += 1; - domain->dev_cnt += 1; + /* Setup GCR3[0] only if domain is setup with v2 page table mode */ + if (!pdom_is_v2_pgtbl_mode(pdom)) + return ret; - /* Update device table */ - set_dte_entry(iommu, dev_data->devid, domain, - ats, dev_data->ppr); - clone_aliases(iommu, dev_data->dev); + ret = update_gcr3(dev_data, 0, iommu_virt_to_phys(pdom->iop.pgd), true); + if (ret) + free_gcr3_table(&dev_data->gcr3_info); - device_flush_dte(dev_data); + return ret; } -static void do_detach(struct iommu_dev_data *dev_data) +static void destroy_gcr3_table(struct iommu_dev_data *dev_data, + struct protection_domain *pdom) { - struct protection_domain *domain = dev_data->domain; - struct amd_iommu *iommu; + struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info; - iommu = rlookup_amd_iommu(dev_data->dev); - if (!iommu) + if (pdom_is_v2_pgtbl_mode(pdom)) + update_gcr3(dev_data, 0, 0, false); + + if (gcr3_info->gcr3_tbl == NULL) return; - /* Update data structures */ - dev_data->domain = NULL; - list_del(&dev_data->list); - clear_dte_entry(iommu, dev_data->devid); - clone_aliases(iommu, dev_data->dev); + free_gcr3_table(gcr3_info); +} - /* Flush the DTE entry */ - device_flush_dte(dev_data); +static int pdom_attach_iommu(struct amd_iommu *iommu, + struct protection_domain *pdom) +{ + struct pdom_iommu_info *pdom_iommu_info, *curr; + unsigned long flags; + int ret = 0; - /* Flush IOTLB and wait for the flushes to finish */ - amd_iommu_domain_flush_all(domain); + spin_lock_irqsave(&pdom->lock, flags); - /* decrease reference counters - needs to happen after the flushes */ - domain->dev_iommu[iommu->index] -= 1; - domain->dev_cnt -= 1; + pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index); + if (pdom_iommu_info) { + pdom_iommu_info->refcnt++; + goto out_unlock; + } + + pdom_iommu_info = kzalloc(sizeof(*pdom_iommu_info), GFP_ATOMIC); + if (!pdom_iommu_info) { + ret = -ENOMEM; + goto out_unlock; + } + + pdom_iommu_info->iommu = iommu; + pdom_iommu_info->refcnt = 1; + + curr = xa_cmpxchg(&pdom->iommu_array, iommu->index, + NULL, pdom_iommu_info, GFP_ATOMIC); + if (curr) { + kfree(pdom_iommu_info); + ret = -ENOSPC; + goto out_unlock; + } + +out_unlock: + spin_unlock_irqrestore(&pdom->lock, flags); + return ret; +} + +static void pdom_detach_iommu(struct amd_iommu *iommu, + struct protection_domain *pdom) +{ + struct pdom_iommu_info *pdom_iommu_info; + unsigned long flags; + + spin_lock_irqsave(&pdom->lock, flags); + + pdom_iommu_info = xa_load(&pdom->iommu_array, iommu->index); + if (!pdom_iommu_info) { + spin_unlock_irqrestore(&pdom->lock, flags); + return; + } + + pdom_iommu_info->refcnt--; + if (pdom_iommu_info->refcnt == 0) { + xa_erase(&pdom->iommu_array, iommu->index); + kfree(pdom_iommu_info); + } + + spin_unlock_irqrestore(&pdom->lock, flags); } /* @@ -1868,31 +2240,60 @@ static void do_detach(struct iommu_dev_data *dev_data) static int attach_device(struct device *dev, struct protection_domain *domain) { - struct iommu_dev_data *dev_data; + struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); + struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); + struct pci_dev *pdev; unsigned long flags; int ret = 0; - spin_lock_irqsave(&domain->lock, flags); - - dev_data = dev_iommu_priv_get(dev); - - spin_lock(&dev_data->lock); + mutex_lock(&dev_data->mutex); if (dev_data->domain != NULL) { ret = -EBUSY; goto out; } - if (dev_is_pci(dev)) - pdev_enable_caps(to_pci_dev(dev)); + /* Do reference counting */ + ret = pdom_attach_iommu(iommu, domain); + if (ret) + goto out; - do_attach(dev_data, domain); + /* Setup GCR3 table */ + if (pdom_is_sva_capable(domain)) { + ret = init_gcr3_table(dev_data, domain); + if (ret) { + pdom_detach_iommu(iommu, domain); + goto out; + } + } -out: - spin_unlock(&dev_data->lock); + pdev = dev_is_pci(dev_data->dev) ? to_pci_dev(dev_data->dev) : NULL; + if (pdev && pdom_is_sva_capable(domain)) { + pdev_enable_caps(pdev); + + /* + * Device can continue to function even if IOPF + * enablement failed. Hence in error path just + * disable device PRI support. + */ + if (amd_iommu_iopf_add_device(iommu, dev_data)) + pdev_disable_cap_pri(pdev); + } else if (pdev) { + pdev_enable_cap_ats(pdev); + } + /* Update data structures */ + dev_data->domain = domain; + spin_lock_irqsave(&domain->lock, flags); + list_add(&dev_data->list, &domain->dev_list); spin_unlock_irqrestore(&domain->lock, flags); + /* Update device table */ + dev_update_dte(dev_data, true); + +out: + mutex_unlock(&dev_data->mutex); + return ret; } @@ -1901,16 +2302,12 @@ out: */ static void detach_device(struct device *dev) { - struct protection_domain *domain; - struct iommu_dev_data *dev_data; + struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); + struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data); + struct protection_domain *domain = dev_data->domain; unsigned long flags; - dev_data = dev_iommu_priv_get(dev); - domain = dev_data->domain; - - spin_lock_irqsave(&domain->lock, flags); - - spin_lock(&dev_data->lock); + mutex_lock(&dev_data->mutex); /* * First check if the device is still attached. It might already @@ -1921,21 +2318,43 @@ static void detach_device(struct device *dev) if (WARN_ON(!dev_data->domain)) goto out; - do_detach(dev_data); + /* Remove IOPF handler */ + if (dev_data->ppr) { + iopf_queue_flush_dev(dev); + amd_iommu_iopf_remove_device(iommu, dev_data); + } if (dev_is_pci(dev)) pdev_disable_caps(to_pci_dev(dev)); -out: - spin_unlock(&dev_data->lock); + /* Clear DTE and flush the entry */ + dev_update_dte(dev_data, false); + /* Flush IOTLB and wait for the flushes to finish */ + spin_lock_irqsave(&domain->lock, flags); + amd_iommu_domain_flush_all(domain); + list_del(&dev_data->list); spin_unlock_irqrestore(&domain->lock, flags); + + /* Clear GCR3 table */ + if (pdom_is_sva_capable(domain)) + destroy_gcr3_table(dev_data, domain); + + /* Update data structures */ + dev_data->domain = NULL; + + /* decrease reference counters - needs to happen after the flushes */ + pdom_detach_iommu(iommu, domain); + +out: + mutex_unlock(&dev_data->mutex); } static struct iommu_device *amd_iommu_probe_device(struct device *dev) { struct iommu_device *iommu_dev; struct amd_iommu *iommu; + struct iommu_dev_data *dev_data; int ret; if (!check_device(dev)) @@ -1954,40 +2373,45 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev) ret = iommu_init_device(iommu, dev); if (ret) { - if (ret != -ENOTSUPP) - dev_err(dev, "Failed to initialize - trying to proceed anyway\n"); + dev_err(dev, "Failed to initialize - trying to proceed anyway\n"); iommu_dev = ERR_PTR(ret); iommu_ignore_device(iommu, dev); - } else { - amd_iommu_set_pci_msi_domain(dev, iommu); - iommu_dev = &iommu->iommu; + goto out_err; + } + + amd_iommu_set_pci_msi_domain(dev, iommu); + iommu_dev = &iommu->iommu; + + /* + * If IOMMU and device supports PASID then it will contain max + * supported PASIDs, else it will be zero. + */ + dev_data = dev_iommu_priv_get(dev); + if (amd_iommu_pasid_supported() && dev_is_pci(dev) && + pdev_pasid_supported(dev_data)) { + dev_data->max_pasids = min_t(u32, iommu->iommu.max_pasids, + pci_max_pasids(to_pci_dev(dev))); } +out_err: iommu_completion_wait(iommu); - return iommu_dev; -} + if (dev_is_pci(dev)) + pci_prepare_ats(to_pci_dev(dev), PAGE_SHIFT); -static void amd_iommu_probe_finalize(struct device *dev) -{ - /* Domains are initialized for this device - have a look what we ended up with */ - set_dma_ops(dev, NULL); - iommu_setup_dma_ops(dev, 0, U64_MAX); + return iommu_dev; } static void amd_iommu_release_device(struct device *dev) { - struct amd_iommu *iommu; - - if (!check_device(dev)) - return; + struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); - iommu = rlookup_amd_iommu(dev); - if (!iommu) - return; + WARN_ON(dev_data->domain); - amd_iommu_uninit_device(dev); - iommu_completion_wait(iommu); + /* + * We keep dev_data around for unplugged devices and reuse it when the + * device is re-plugged - not doing so would introduce a ton of races. + */ } static struct iommu_group *amd_iommu_device_group(struct device *dev) @@ -2000,42 +2424,6 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev) /***************************************************************************** * - * The next functions belong to the dma_ops mapping/unmapping code. - * - *****************************************************************************/ - -static void update_device_table(struct protection_domain *domain) -{ - struct iommu_dev_data *dev_data; - - list_for_each_entry(dev_data, &domain->dev_list, list) { - struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev); - - if (!iommu) - continue; - set_dte_entry(iommu, dev_data->devid, domain, - dev_data->ats_enabled, dev_data->ppr); - clone_aliases(iommu, dev_data->dev); - } -} - -void amd_iommu_update_and_flush_device_table(struct protection_domain *domain) -{ - update_device_table(domain); - domain_flush_devices(domain); -} - -void amd_iommu_domain_update(struct protection_domain *domain) -{ - /* Update device table */ - amd_iommu_update_and_flush_device_table(domain); - - /* Flush domain TLB(s) and wait for completion */ - amd_iommu_domain_flush_all(domain); -} - -/***************************************************************************** - * * The following functions belong to the exported interface of AMD IOMMU * * This interface allows access to lower level functions of the IOMMU @@ -2044,138 +2432,70 @@ void amd_iommu_domain_update(struct protection_domain *domain) * *****************************************************************************/ -static void cleanup_domain(struct protection_domain *domain) -{ - struct iommu_dev_data *entry; - - lockdep_assert_held(&domain->lock); - - if (!domain->dev_cnt) - return; - - while (!list_empty(&domain->dev_list)) { - entry = list_first_entry(&domain->dev_list, - struct iommu_dev_data, list); - BUG_ON(!entry->domain); - do_detach(entry); - } - WARN_ON(domain->dev_cnt != 0); -} - -static void protection_domain_free(struct protection_domain *domain) +void protection_domain_free(struct protection_domain *domain) { - if (!domain) - return; - - if (domain->iop.pgtbl_cfg.tlb) - free_io_pgtable_ops(&domain->iop.iop.ops); - - if (domain->flags & PD_IOMMUV2_MASK) - free_gcr3_table(domain); - - if (domain->iop.root) - free_page((unsigned long)domain->iop.root); - - if (domain->id) - domain_id_free(domain->id); - + WARN_ON(!list_empty(&domain->dev_list)); + if (domain->domain.type & __IOMMU_DOMAIN_PAGING) + free_io_pgtable_ops(&domain->iop.pgtbl.ops); + pdom_id_free(domain->id); kfree(domain); } -static int protection_domain_init_v1(struct protection_domain *domain, int mode) +static void protection_domain_init(struct protection_domain *domain) { - u64 *pt_root = NULL; - - BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL); - - if (mode != PAGE_MODE_NONE) { - pt_root = (void *)get_zeroed_page(GFP_KERNEL); - if (!pt_root) - return -ENOMEM; - } - - amd_iommu_domain_set_pgtable(domain, pt_root, mode); - - return 0; -} - -static int protection_domain_init_v2(struct protection_domain *domain) -{ - domain->flags |= PD_GIOV_MASK; - - domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2; - - if (setup_gcr3_table(domain, 1)) - return -ENOMEM; - - return 0; + spin_lock_init(&domain->lock); + INIT_LIST_HEAD(&domain->dev_list); + INIT_LIST_HEAD(&domain->dev_data_list); + xa_init(&domain->iommu_array); } -static struct protection_domain *protection_domain_alloc(unsigned int type) +struct protection_domain *protection_domain_alloc(void) { - struct io_pgtable_ops *pgtbl_ops; struct protection_domain *domain; - int pgtable; - int ret; + int domid; domain = kzalloc(sizeof(*domain), GFP_KERNEL); if (!domain) return NULL; - domain->id = domain_id_alloc(); - if (!domain->id) - goto out_err; + domid = pdom_id_alloc(); + if (domid <= 0) { + kfree(domain); + return NULL; + } + domain->id = domid; - spin_lock_init(&domain->lock); - INIT_LIST_HEAD(&domain->dev_list); - domain->nid = NUMA_NO_NODE; + protection_domain_init(domain); - switch (type) { - /* No need to allocate io pgtable ops in passthrough mode */ - case IOMMU_DOMAIN_IDENTITY: - return domain; - case IOMMU_DOMAIN_DMA: - pgtable = amd_iommu_pgtable; - break; - /* - * Force IOMMU v1 page table when allocating - * domain for pass-through devices. - */ - case IOMMU_DOMAIN_UNMANAGED: - pgtable = AMD_IOMMU_V1; - break; - default: - goto out_err; - } + return domain; +} - switch (pgtable) { - case AMD_IOMMU_V1: - ret = protection_domain_init_v1(domain, DEFAULT_PGTABLE_LEVEL); - break; - case AMD_IOMMU_V2: - ret = protection_domain_init_v2(domain); +static int pdom_setup_pgtable(struct protection_domain *domain, + struct device *dev) +{ + struct io_pgtable_ops *pgtbl_ops; + enum io_pgtable_fmt fmt; + + switch (domain->pd_mode) { + case PD_MODE_V1: + fmt = AMD_IOMMU_V1; break; - default: - ret = -EINVAL; + case PD_MODE_V2: + fmt = AMD_IOMMU_V2; break; } - if (ret) - goto out_err; - - pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain); + domain->iop.pgtbl.cfg.amd.nid = dev_to_node(dev); + pgtbl_ops = alloc_io_pgtable_ops(fmt, &domain->iop.pgtbl.cfg, domain); if (!pgtbl_ops) - goto out_err; + return -ENOMEM; - return domain; -out_err: - protection_domain_free(domain); - return NULL; + return 0; } -static inline u64 dma_max_address(void) +static inline u64 dma_max_address(enum protection_domain_mode pgtable) { - if (amd_iommu_pgtable == AMD_IOMMU_V1) + if (pgtable == PD_MODE_V1) return ~0ULL; /* V2 with 4/5 level page table */ @@ -2187,99 +2507,145 @@ static bool amd_iommu_hd_support(struct amd_iommu *iommu) return iommu && (iommu->features & FEATURE_HDSUP); } -static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, - struct device *dev, u32 flags) +static struct iommu_domain * +do_iommu_domain_alloc(struct device *dev, u32 flags, + enum protection_domain_mode pgtable) { bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; + struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); struct protection_domain *domain; - struct amd_iommu *iommu = NULL; - - if (dev) { - iommu = rlookup_amd_iommu(dev); - if (!iommu) - return ERR_PTR(-ENODEV); - } - - /* - * Since DTE[Mode]=0 is prohibited on SNP-enabled system, - * default to use IOMMU_DOMAIN_DMA[_FQ]. - */ - if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY)) - return ERR_PTR(-EINVAL); - - if (dirty_tracking && !amd_iommu_hd_support(iommu)) - return ERR_PTR(-EOPNOTSUPP); + int ret; - domain = protection_domain_alloc(type); + domain = protection_domain_alloc(); if (!domain) return ERR_PTR(-ENOMEM); + domain->pd_mode = pgtable; + ret = pdom_setup_pgtable(domain, dev); + if (ret) { + pdom_id_free(domain->id); + kfree(domain); + return ERR_PTR(ret); + } + domain->domain.geometry.aperture_start = 0; - domain->domain.geometry.aperture_end = dma_max_address(); + domain->domain.geometry.aperture_end = dma_max_address(pgtable); domain->domain.geometry.force_aperture = true; + domain->domain.pgsize_bitmap = domain->iop.pgtbl.cfg.pgsize_bitmap; - if (iommu) { - domain->domain.type = type; - domain->domain.pgsize_bitmap = iommu->iommu.ops->pgsize_bitmap; - domain->domain.ops = iommu->iommu.ops->default_domain_ops; + domain->domain.type = IOMMU_DOMAIN_UNMANAGED; + domain->domain.ops = iommu->iommu.ops->default_domain_ops; - if (dirty_tracking) - domain->domain.dirty_ops = &amd_dirty_ops; - } + if (dirty_tracking) + domain->domain.dirty_ops = &amd_dirty_ops; return &domain->domain; } -static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type) +static struct iommu_domain * +amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags, + const struct iommu_user_data *user_data) + { - struct iommu_domain *domain; + struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); + const u32 supported_flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING | + IOMMU_HWPT_ALLOC_PASID; - domain = do_iommu_domain_alloc(type, NULL, 0); - if (IS_ERR(domain)) - return NULL; + if ((flags & ~supported_flags) || user_data) + return ERR_PTR(-EOPNOTSUPP); - return domain; + switch (flags & supported_flags) { + case IOMMU_HWPT_ALLOC_DIRTY_TRACKING: + /* Allocate domain with v1 page table for dirty tracking */ + if (!amd_iommu_hd_support(iommu)) + break; + return do_iommu_domain_alloc(dev, flags, PD_MODE_V1); + case IOMMU_HWPT_ALLOC_PASID: + /* Allocate domain with v2 page table if IOMMU supports PASID. */ + if (!amd_iommu_pasid_supported()) + break; + return do_iommu_domain_alloc(dev, flags, PD_MODE_V2); + case 0: + /* If nothing specific is required use the kernel commandline default */ + return do_iommu_domain_alloc(dev, 0, amd_iommu_pgtable); + default: + break; + } + return ERR_PTR(-EOPNOTSUPP); } -static struct iommu_domain * -amd_iommu_domain_alloc_user(struct device *dev, u32 flags, - struct iommu_domain *parent, - const struct iommu_user_data *user_data) +void amd_iommu_domain_free(struct iommu_domain *dom) +{ + struct protection_domain *domain = to_pdomain(dom); + protection_domain_free(domain); +} + +static int blocked_domain_attach_device(struct iommu_domain *domain, + struct device *dev) { - unsigned int type = IOMMU_DOMAIN_UNMANAGED; + struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); - if ((flags & ~IOMMU_HWPT_ALLOC_DIRTY_TRACKING) || parent || user_data) - return ERR_PTR(-EOPNOTSUPP); + if (dev_data->domain) + detach_device(dev); + + /* Clear DTE and flush the entry */ + mutex_lock(&dev_data->mutex); + dev_update_dte(dev_data, false); + mutex_unlock(&dev_data->mutex); - return do_iommu_domain_alloc(type, dev, flags); + return 0; } -static void amd_iommu_domain_free(struct iommu_domain *dom) +static int blocked_domain_set_dev_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid, + struct iommu_domain *old) { - struct protection_domain *domain; - unsigned long flags; + amd_iommu_remove_dev_pasid(dev, pasid, old); + return 0; +} - if (!dom) - return; +static struct iommu_domain blocked_domain = { + .type = IOMMU_DOMAIN_BLOCKED, + .ops = &(const struct iommu_domain_ops) { + .attach_dev = blocked_domain_attach_device, + .set_dev_pasid = blocked_domain_set_dev_pasid, + } +}; - domain = to_pdomain(dom); +static struct protection_domain identity_domain; - spin_lock_irqsave(&domain->lock, flags); +static const struct iommu_domain_ops identity_domain_ops = { + .attach_dev = amd_iommu_attach_device, +}; - cleanup_domain(domain); +void amd_iommu_init_identity_domain(void) +{ + struct iommu_domain *domain = &identity_domain.domain; - spin_unlock_irqrestore(&domain->lock, flags); + domain->type = IOMMU_DOMAIN_IDENTITY; + domain->ops = &identity_domain_ops; + domain->owner = &amd_iommu_ops; - protection_domain_free(domain); + identity_domain.id = pdom_id_alloc(); + + protection_domain_init(&identity_domain); } +/* Same as blocked domain except it supports only ops->attach_dev() */ +static struct iommu_domain release_domain = { + .type = IOMMU_DOMAIN_BLOCKED, + .ops = &(const struct iommu_domain_ops) { + .attach_dev = blocked_domain_attach_device, + } +}; + static int amd_iommu_attach_device(struct iommu_domain *dom, struct device *dev) { struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); struct protection_domain *domain = to_pdomain(dom); - struct amd_iommu *iommu = rlookup_amd_iommu(dev); + struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); int ret; /* @@ -2312,8 +2678,6 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, } #endif - iommu_completion_wait(iommu); - return ret; } @@ -2321,7 +2685,7 @@ static int amd_iommu_iotlb_sync_map(struct iommu_domain *dom, unsigned long iova, size_t size) { struct protection_domain *domain = to_pdomain(dom); - struct io_pgtable_ops *ops = &domain->iop.iop.ops; + struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops; if (ops->map_pages) domain_flush_np_cache(domain, iova, size); @@ -2333,11 +2697,11 @@ static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova, int iommu_prot, gfp_t gfp, size_t *mapped) { struct protection_domain *domain = to_pdomain(dom); - struct io_pgtable_ops *ops = &domain->iop.iop.ops; + struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops; int prot = 0; int ret = -EINVAL; - if ((amd_iommu_pgtable == AMD_IOMMU_V1) && + if ((domain->pd_mode == PD_MODE_V1) && (domain->iop.mode == PAGE_MODE_NONE)) return -EINVAL; @@ -2380,10 +2744,10 @@ static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova struct iommu_iotlb_gather *gather) { struct protection_domain *domain = to_pdomain(dom); - struct io_pgtable_ops *ops = &domain->iop.iop.ops; + struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops; size_t r; - if ((amd_iommu_pgtable == AMD_IOMMU_V1) && + if ((domain->pd_mode == PD_MODE_V1) && (domain->iop.mode == PAGE_MODE_NONE)) return 0; @@ -2399,7 +2763,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, dma_addr_t iova) { struct protection_domain *domain = to_pdomain(dom); - struct io_pgtable_ops *ops = &domain->iop.iop.ops; + struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops; return ops->iova_to_phys(ops, iova); } @@ -2418,7 +2782,7 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap) case IOMMU_CAP_DEFERRED_FLUSH: return true; case IOMMU_CAP_DIRTY_TRACKING: { - struct amd_iommu *iommu = rlookup_amd_iommu(dev); + struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); return amd_iommu_hd_support(iommu); } @@ -2433,12 +2797,12 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain, bool enable) { struct protection_domain *pdomain = to_pdomain(domain); - struct dev_table_entry *dev_table; + struct dev_table_entry *dte; struct iommu_dev_data *dev_data; bool domain_flush = false; struct amd_iommu *iommu; unsigned long flags; - u64 pte_root; + u64 new; spin_lock_irqsave(&pdomain->lock, flags); if (!(pdomain->dirty_tracking ^ enable)) { @@ -2447,18 +2811,15 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain, } list_for_each_entry(dev_data, &pdomain->dev_list, list) { - iommu = rlookup_amd_iommu(dev_data->dev); - if (!iommu) - continue; - - dev_table = get_dev_table(iommu); - pte_root = dev_table[dev_data->devid].data[0]; - - pte_root = (enable ? pte_root | DTE_FLAG_HAD : - pte_root & ~DTE_FLAG_HAD); + spin_lock(&dev_data->dte_lock); + iommu = get_amd_iommu_from_dev_data(dev_data); + dte = &get_dev_table(iommu)[dev_data->devid]; + new = dte->data[0]; + new = (enable ? new | DTE_FLAG_HAD : new & ~DTE_FLAG_HAD); + dte->data[0] = new; + spin_unlock(&dev_data->dte_lock); /* Flush device DTE */ - dev_table[dev_data->devid].data[0] = pte_root; device_flush_dte(dev_data); domain_flush = true; } @@ -2479,7 +2840,7 @@ static int amd_iommu_read_and_clear_dirty(struct iommu_domain *domain, struct iommu_dirty_bitmap *dirty) { struct protection_domain *pdomain = to_pdomain(domain); - struct io_pgtable_ops *ops = &pdomain->iop.iop.ops; + struct io_pgtable_ops *ops = &pdomain->iop.pgtbl.ops; unsigned long lflags; if (!ops || !ops->read_and_clear_dirty) @@ -2509,9 +2870,7 @@ static void amd_iommu_get_resv_regions(struct device *dev, return; devid = PCI_SBDF_TO_DEVID(sbdf); - iommu = rlookup_amd_iommu(dev); - if (!iommu) - return; + iommu = get_amd_iommu_from_dev(dev); pci_seg = iommu->pci_seg; list_for_each_entry(entry, &pci_seg->unity_map, list) { @@ -2556,7 +2915,7 @@ static void amd_iommu_get_resv_regions(struct device *dev, list_add_tail(®ion->list, head); } -bool amd_iommu_is_attach_deferred(struct device *dev) +static bool amd_iommu_is_attach_deferred(struct device *dev) { struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); @@ -2593,6 +2952,10 @@ static int amd_iommu_def_domain_type(struct device *dev) if (!dev_data) return 0; + /* Always use DMA domain for untrusted device */ + if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) + return IOMMU_DOMAIN_DMA; + /* * Do not identity map IOMMUv2 capable devices when: * - memory encryption is active, because some of those devices @@ -2620,18 +2983,54 @@ static const struct iommu_dirty_ops amd_dirty_ops = { .read_and_clear_dirty = amd_iommu_read_and_clear_dirty, }; +static int amd_iommu_dev_enable_feature(struct device *dev, + enum iommu_dev_features feat) +{ + int ret = 0; + + switch (feat) { + case IOMMU_DEV_FEAT_IOPF: + case IOMMU_DEV_FEAT_SVA: + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static int amd_iommu_dev_disable_feature(struct device *dev, + enum iommu_dev_features feat) +{ + int ret = 0; + + switch (feat) { + case IOMMU_DEV_FEAT_IOPF: + case IOMMU_DEV_FEAT_SVA: + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, - .domain_alloc = amd_iommu_domain_alloc, - .domain_alloc_user = amd_iommu_domain_alloc_user, + .blocked_domain = &blocked_domain, + .release_domain = &release_domain, + .identity_domain = &identity_domain.domain, + .domain_alloc_paging_flags = amd_iommu_domain_alloc_paging_flags, + .domain_alloc_sva = amd_iommu_domain_alloc_sva, .probe_device = amd_iommu_probe_device, .release_device = amd_iommu_release_device, - .probe_finalize = amd_iommu_probe_finalize, .device_group = amd_iommu_device_group, .get_resv_regions = amd_iommu_get_resv_regions, .is_attach_deferred = amd_iommu_is_attach_deferred, - .pgsize_bitmap = AMD_IOMMU_PGSIZES, .def_domain_type = amd_iommu_def_domain_type, + .dev_enable_feat = amd_iommu_dev_enable_feature, + .dev_disable_feat = amd_iommu_dev_disable_feature, + .page_response = amd_iommu_page_response, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = amd_iommu_attach_device, .map_pages = amd_iommu_map_pages, @@ -2645,216 +3044,6 @@ const struct iommu_ops amd_iommu_ops = { } }; -static int __flush_pasid(struct protection_domain *domain, u32 pasid, - u64 address, size_t size) -{ - struct iommu_dev_data *dev_data; - struct iommu_cmd cmd; - int i, ret; - - if (!(domain->flags & PD_IOMMUV2_MASK)) - return -EINVAL; - - build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, true); - - /* - * IOMMU TLB needs to be flushed before Device TLB to - * prevent device TLB refill from IOMMU TLB - */ - for (i = 0; i < amd_iommu_get_num_iommus(); ++i) { - if (domain->dev_iommu[i] == 0) - continue; - - ret = iommu_queue_command(amd_iommus[i], &cmd); - if (ret != 0) - goto out; - } - - /* Wait until IOMMU TLB flushes are complete */ - amd_iommu_domain_flush_complete(domain); - - /* Now flush device TLBs */ - list_for_each_entry(dev_data, &domain->dev_list, list) { - struct amd_iommu *iommu; - int qdep; - - /* - There might be non-IOMMUv2 capable devices in an IOMMUv2 - * domain. - */ - if (!dev_data->ats_enabled) - continue; - - qdep = dev_data->ats_qdep; - iommu = rlookup_amd_iommu(dev_data->dev); - if (!iommu) - continue; - build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, - address, size, pasid, true); - - ret = iommu_queue_command(iommu, &cmd); - if (ret != 0) - goto out; - } - - /* Wait until all device TLBs are flushed */ - amd_iommu_domain_flush_complete(domain); - - ret = 0; - -out: - - return ret; -} - -static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid, - u64 address) -{ - return __flush_pasid(domain, pasid, address, PAGE_SIZE); -} - -int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid, - u64 address) -{ - struct protection_domain *domain = to_pdomain(dom); - unsigned long flags; - int ret; - - spin_lock_irqsave(&domain->lock, flags); - ret = __amd_iommu_flush_page(domain, pasid, address); - spin_unlock_irqrestore(&domain->lock, flags); - - return ret; -} - -static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid) -{ - return __flush_pasid(domain, pasid, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS); -} - -int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid) -{ - struct protection_domain *domain = to_pdomain(dom); - unsigned long flags; - int ret; - - spin_lock_irqsave(&domain->lock, flags); - ret = __amd_iommu_flush_tlb(domain, pasid); - spin_unlock_irqrestore(&domain->lock, flags); - - return ret; -} - -static u64 *__get_gcr3_pte(u64 *root, int level, u32 pasid, bool alloc) -{ - int index; - u64 *pte; - - while (true) { - - index = (pasid >> (9 * level)) & 0x1ff; - pte = &root[index]; - - if (level == 0) - break; - - if (!(*pte & GCR3_VALID)) { - if (!alloc) - return NULL; - - root = (void *)get_zeroed_page(GFP_ATOMIC); - if (root == NULL) - return NULL; - - *pte = iommu_virt_to_phys(root) | GCR3_VALID; - } - - root = iommu_phys_to_virt(*pte & PAGE_MASK); - - level -= 1; - } - - return pte; -} - -static int __set_gcr3(struct protection_domain *domain, u32 pasid, - unsigned long cr3) -{ - u64 *pte; - - if (domain->iop.mode != PAGE_MODE_NONE) - return -EINVAL; - - pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true); - if (pte == NULL) - return -ENOMEM; - - *pte = (cr3 & PAGE_MASK) | GCR3_VALID; - - return __amd_iommu_flush_tlb(domain, pasid); -} - -static int __clear_gcr3(struct protection_domain *domain, u32 pasid) -{ - u64 *pte; - - if (domain->iop.mode != PAGE_MODE_NONE) - return -EINVAL; - - pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false); - if (pte == NULL) - return 0; - - *pte = 0; - - return __amd_iommu_flush_tlb(domain, pasid); -} - -int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid, - unsigned long cr3) -{ - struct protection_domain *domain = to_pdomain(dom); - unsigned long flags; - int ret; - - spin_lock_irqsave(&domain->lock, flags); - ret = __set_gcr3(domain, pasid, cr3); - spin_unlock_irqrestore(&domain->lock, flags); - - return ret; -} - -int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid) -{ - struct protection_domain *domain = to_pdomain(dom); - unsigned long flags; - int ret; - - spin_lock_irqsave(&domain->lock, flags); - ret = __clear_gcr3(domain, pasid); - spin_unlock_irqrestore(&domain->lock, flags); - - return ret; -} - -int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid, - int status, int tag) -{ - struct iommu_dev_data *dev_data; - struct amd_iommu *iommu; - struct iommu_cmd cmd; - - dev_data = dev_iommu_priv_get(&pdev->dev); - iommu = rlookup_amd_iommu(&pdev->dev); - if (!iommu) - return -ENODEV; - - build_complete_ppr(&cmd, dev_data->devid, pasid, status, - tag, dev_data->pri_tlp); - - return iommu_queue_command(iommu, &cmd); -} - #ifdef CONFIG_IRQ_REMAP /***************************************************************************** @@ -2877,7 +3066,7 @@ static void iommu_flush_irt_and_complete(struct amd_iommu *iommu, u16 devid) return; build_inv_irt(&cmd, devid); - data = atomic64_add_return(1, &iommu->cmd_sem_val); + data = atomic64_inc_return(&iommu->cmd_sem_val); build_completion_wait(&cmd2, iommu, data); raw_spin_lock_irqsave(&iommu->lock, flags); @@ -2895,17 +3084,23 @@ out: static void set_dte_irq_entry(struct amd_iommu *iommu, u16 devid, struct irq_remap_table *table) { - u64 dte; - struct dev_table_entry *dev_table = get_dev_table(iommu); + u64 new; + struct dev_table_entry *dte = &get_dev_table(iommu)[devid]; + struct iommu_dev_data *dev_data = search_dev_data(iommu, devid); - dte = dev_table[devid].data[2]; - dte &= ~DTE_IRQ_PHYS_ADDR_MASK; - dte |= iommu_virt_to_phys(table->table); - dte |= DTE_IRQ_REMAP_INTCTL; - dte |= DTE_INTTABLEN; - dte |= DTE_IRQ_REMAP_ENABLE; + if (dev_data) + spin_lock(&dev_data->dte_lock); - dev_table[devid].data[2] = dte; + new = READ_ONCE(dte->data[2]); + new &= ~DTE_IRQ_PHYS_ADDR_MASK; + new |= iommu_virt_to_phys(table->table); + new |= DTE_IRQ_REMAP_INTCTL; + new |= DTE_INTTABLEN; + new |= DTE_IRQ_REMAP_ENABLE; + WRITE_ONCE(dte->data[2], new); + + if (dev_data) + spin_unlock(&dev_data->dte_lock); } static struct irq_remap_table *get_irq_table(struct amd_iommu *iommu, u16 devid) @@ -3479,7 +3674,6 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, irq_data->chip_data = data; irq_data->chip = &amd_ir_chip; irq_remapping_prepare_irte(data, cfg, info, devid, index, i); - irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT); } return 0; @@ -3753,20 +3947,11 @@ static struct irq_chip amd_ir_chip = { }; static const struct msi_parent_ops amdvi_msi_parent_ops = { - .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED | - MSI_FLAG_MULTI_PCI_MSI | - MSI_FLAG_PCI_IMS, + .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED | MSI_FLAG_MULTI_PCI_MSI, .prefix = "IR-", .init_dev_msi_info = msi_parent_init_dev_msi_info, }; -static const struct msi_parent_ops virt_amdvi_msi_parent_ops = { - .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED | - MSI_FLAG_MULTI_PCI_MSI, - .prefix = "vIR-", - .init_dev_msi_info = msi_parent_init_dev_msi_info, -}; - int amd_iommu_create_irq_domain(struct amd_iommu *iommu) { struct fwnode_handle *fn; @@ -3784,11 +3969,7 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu) irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_AMDVI); iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT | IRQ_DOMAIN_FLAG_ISOLATED_MSI; - - if (amd_iommu_np_cache) - iommu->ir_domain->msi_parent_ops = &virt_amdvi_msi_parent_ops; - else - iommu->ir_domain->msi_parent_ops = &amdvi_msi_parent_ops; + iommu->ir_domain->msi_parent_ops = &amdvi_msi_parent_ops; return 0; } |