diff options
Diffstat (limited to 'drivers/iommu')
42 files changed, 842 insertions, 366 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 0a33d995d15d..70d29b14d851 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -200,6 +200,7 @@ source "drivers/iommu/riscv/Kconfig" config IRQ_REMAP bool "Support for Interrupt Remapping" depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI + select IRQ_MSI_LIB help Supports Interrupt remapping for IO-APIC and MSI devices. To use x2apic mode in the CPU's which support x2APIC enhancements or @@ -305,7 +306,6 @@ config APPLE_DART depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_DART select IOMMU_API select IOMMU_IO_PGTABLE_DART - default ARCH_APPLE help Support for Apple DART (Device Address Resolution Table) IOMMUs found in Apple ARM SoCs like the M1. diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig index 994063e5586f..ecef69c11144 100644 --- a/drivers/iommu/amd/Kconfig +++ b/drivers/iommu/amd/Kconfig @@ -7,6 +7,7 @@ config AMD_IOMMU select PCI_ATS select PCI_PRI select PCI_PASID + select IRQ_MSI_LIB select MMU_NOTIFIER select IOMMU_API select IOMMU_IOVA diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 29a8864381c3..9b4b589a54b5 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -28,9 +28,9 @@ void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu, gfp_t gfp, size_t size); #ifdef CONFIG_AMD_IOMMU_DEBUGFS -void amd_iommu_debugfs_setup(struct amd_iommu *iommu); +void amd_iommu_debugfs_setup(void); #else -static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {} +static inline void amd_iommu_debugfs_setup(void) {} #endif /* Needed for interrupt remapping */ @@ -42,7 +42,9 @@ int amd_iommu_enable_faulting(unsigned int cpu); extern int amd_iommu_guest_ir; extern enum protection_domain_mode amd_iommu_pgtable; extern int amd_iommu_gpt_level; +extern u8 amd_iommu_hpt_level; extern unsigned long amd_iommu_pgsize_bitmap; +extern bool amd_iommu_hatdis; /* Protection domain ops */ void amd_iommu_init_identity_domain(void); diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index ccbab3a4811a..687542608272 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -94,6 +94,7 @@ #define FEATURE_GA BIT_ULL(7) #define FEATURE_HE BIT_ULL(8) #define FEATURE_PC BIT_ULL(9) +#define FEATURE_HATS GENMASK_ULL(11, 10) #define FEATURE_GATS GENMASK_ULL(13, 12) #define FEATURE_GLX GENMASK_ULL(15, 14) #define FEATURE_GAM_VAPIC BIT_ULL(21) @@ -460,6 +461,9 @@ /* IOMMU Feature Reporting Field (for IVHD type 10h */ #define IOMMU_FEAT_GASUP_SHIFT 6 +/* IOMMU HATDIS for IVHD type 11h and 40h */ +#define IOMMU_IVHD_ATTR_HATDIS_SHIFT 0 + /* IOMMU Extended Feature Register (EFR) */ #define IOMMU_EFR_XTSUP_SHIFT 2 #define IOMMU_EFR_GASUP_SHIFT 7 @@ -558,7 +562,8 @@ struct amd_io_pgtable { }; enum protection_domain_mode { - PD_MODE_V1 = 1, + PD_MODE_NONE, + PD_MODE_V1, PD_MODE_V2, }; @@ -790,6 +795,8 @@ struct amd_iommu { #ifdef CONFIG_AMD_IOMMU_DEBUGFS /* DebugFS Info */ struct dentry *debugfs; + int dbg_mmio_offset; + int dbg_cap_offset; #endif /* IOPF support */ @@ -891,6 +898,13 @@ struct dev_table_entry { }; /* + * Structure defining one entry in the command buffer + */ +struct iommu_cmd { + u32 data[4]; +}; + +/* * Structure to sture persistent DTE flags from IVHD */ struct ivhd_dte_flags { diff --git a/drivers/iommu/amd/debugfs.c b/drivers/iommu/amd/debugfs.c index 545372fcc72f..10fa217a7119 100644 --- a/drivers/iommu/amd/debugfs.c +++ b/drivers/iommu/amd/debugfs.c @@ -11,22 +11,382 @@ #include <linux/pci.h> #include "amd_iommu.h" +#include "../irq_remapping.h" static struct dentry *amd_iommu_debugfs; -static DEFINE_MUTEX(amd_iommu_debugfs_lock); #define MAX_NAME_LEN 20 +#define OFS_IN_SZ 8 +#define DEVID_IN_SZ 16 -void amd_iommu_debugfs_setup(struct amd_iommu *iommu) +static int sbdf = -1; + +static ssize_t iommu_mmio_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct amd_iommu *iommu = m->private; + int ret; + + iommu->dbg_mmio_offset = -1; + + if (cnt > OFS_IN_SZ) + return -EINVAL; + + ret = kstrtou32_from_user(ubuf, cnt, 0, &iommu->dbg_mmio_offset); + if (ret) + return ret; + + if (iommu->dbg_mmio_offset > iommu->mmio_phys_end - 4) { + iommu->dbg_mmio_offset = -1; + return -EINVAL; + } + + return cnt; +} + +static int iommu_mmio_show(struct seq_file *m, void *unused) +{ + struct amd_iommu *iommu = m->private; + u64 value; + + if (iommu->dbg_mmio_offset < 0) { + seq_puts(m, "Please provide mmio register's offset\n"); + return 0; + } + + value = readq(iommu->mmio_base + iommu->dbg_mmio_offset); + seq_printf(m, "Offset:0x%x Value:0x%016llx\n", iommu->dbg_mmio_offset, value); + + return 0; +} +DEFINE_SHOW_STORE_ATTRIBUTE(iommu_mmio); + +static ssize_t iommu_capability_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct amd_iommu *iommu = m->private; + int ret; + + iommu->dbg_cap_offset = -1; + + if (cnt > OFS_IN_SZ) + return -EINVAL; + + ret = kstrtou32_from_user(ubuf, cnt, 0, &iommu->dbg_cap_offset); + if (ret) + return ret; + + /* Capability register at offset 0x14 is the last IOMMU capability register. */ + if (iommu->dbg_cap_offset > 0x14) { + iommu->dbg_cap_offset = -1; + return -EINVAL; + } + + return cnt; +} + +static int iommu_capability_show(struct seq_file *m, void *unused) +{ + struct amd_iommu *iommu = m->private; + u32 value; + int err; + + if (iommu->dbg_cap_offset < 0) { + seq_puts(m, "Please provide capability register's offset in the range [0x00 - 0x14]\n"); + return 0; + } + + err = pci_read_config_dword(iommu->dev, iommu->cap_ptr + iommu->dbg_cap_offset, &value); + if (err) { + seq_printf(m, "Not able to read capability register at 0x%x\n", + iommu->dbg_cap_offset); + return 0; + } + + seq_printf(m, "Offset:0x%x Value:0x%08x\n", iommu->dbg_cap_offset, value); + + return 0; +} +DEFINE_SHOW_STORE_ATTRIBUTE(iommu_capability); + +static int iommu_cmdbuf_show(struct seq_file *m, void *unused) +{ + struct amd_iommu *iommu = m->private; + struct iommu_cmd *cmd; + unsigned long flag; + u32 head, tail; + int i; + + raw_spin_lock_irqsave(&iommu->lock, flag); + head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); + tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); + seq_printf(m, "CMD Buffer Head Offset:%d Tail Offset:%d\n", + (head >> 4) & 0x7fff, (tail >> 4) & 0x7fff); + for (i = 0; i < CMD_BUFFER_ENTRIES; i++) { + cmd = (struct iommu_cmd *)(iommu->cmd_buf + i * sizeof(*cmd)); + seq_printf(m, "%3d: %08x %08x %08x %08x\n", i, cmd->data[0], + cmd->data[1], cmd->data[2], cmd->data[3]); + } + raw_spin_unlock_irqrestore(&iommu->lock, flag); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(iommu_cmdbuf); + +static ssize_t devid_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct amd_iommu_pci_seg *pci_seg; + int seg, bus, slot, func; + struct amd_iommu *iommu; + char *srcid_ptr; + u16 devid; + int i; + + sbdf = -1; + + if (cnt >= DEVID_IN_SZ) + return -EINVAL; + + srcid_ptr = memdup_user_nul(ubuf, cnt); + if (IS_ERR(srcid_ptr)) + return PTR_ERR(srcid_ptr); + + i = sscanf(srcid_ptr, "%x:%x:%x.%x", &seg, &bus, &slot, &func); + if (i != 4) { + i = sscanf(srcid_ptr, "%x:%x.%x", &bus, &slot, &func); + if (i != 3) { + kfree(srcid_ptr); + return -EINVAL; + } + seg = 0; + } + + devid = PCI_DEVID(bus, PCI_DEVFN(slot, func)); + + /* Check if user device id input is a valid input */ + for_each_pci_segment(pci_seg) { + if (pci_seg->id != seg) + continue; + if (devid > pci_seg->last_bdf) { + kfree(srcid_ptr); + return -EINVAL; + } + iommu = pci_seg->rlookup_table[devid]; + if (!iommu) { + kfree(srcid_ptr); + return -ENODEV; + } + break; + } + + if (pci_seg->id != seg) { + kfree(srcid_ptr); + return -EINVAL; + } + + sbdf = PCI_SEG_DEVID_TO_SBDF(seg, devid); + + kfree(srcid_ptr); + + return cnt; +} + +static int devid_show(struct seq_file *m, void *unused) { + u16 devid; + + if (sbdf >= 0) { + devid = PCI_SBDF_TO_DEVID(sbdf); + seq_printf(m, "%04x:%02x:%02x.%x\n", PCI_SBDF_TO_SEGID(sbdf), + PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid)); + } else + seq_puts(m, "No or Invalid input provided\n"); + + return 0; +} +DEFINE_SHOW_STORE_ATTRIBUTE(devid); + +static void dump_dte(struct seq_file *m, struct amd_iommu_pci_seg *pci_seg, u16 devid) +{ + struct dev_table_entry *dev_table; + struct amd_iommu *iommu; + + iommu = pci_seg->rlookup_table[devid]; + if (!iommu) + return; + + dev_table = get_dev_table(iommu); + if (!dev_table) { + seq_puts(m, "Device table not found"); + return; + } + + seq_printf(m, "%-12s %16s %16s %16s %16s iommu\n", "DeviceId", + "QWORD[3]", "QWORD[2]", "QWORD[1]", "QWORD[0]"); + seq_printf(m, "%04x:%02x:%02x.%x ", pci_seg->id, PCI_BUS_NUM(devid), + PCI_SLOT(devid), PCI_FUNC(devid)); + for (int i = 3; i >= 0; --i) + seq_printf(m, "%016llx ", dev_table[devid].data[i]); + seq_printf(m, "iommu%d\n", iommu->index); +} + +static int iommu_devtbl_show(struct seq_file *m, void *unused) +{ + struct amd_iommu_pci_seg *pci_seg; + u16 seg, devid; + + if (sbdf < 0) { + seq_puts(m, "Enter a valid device ID to 'devid' file\n"); + return 0; + } + seg = PCI_SBDF_TO_SEGID(sbdf); + devid = PCI_SBDF_TO_DEVID(sbdf); + + for_each_pci_segment(pci_seg) { + if (pci_seg->id != seg) + continue; + dump_dte(m, pci_seg, devid); + break; + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(iommu_devtbl); + +static void dump_128_irte(struct seq_file *m, struct irq_remap_table *table, u16 int_tab_len) +{ + struct irte_ga *ptr, *irte; + int index; + + for (index = 0; index < int_tab_len; index++) { + ptr = (struct irte_ga *)table->table; + irte = &ptr[index]; + + if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && + !irte->lo.fields_vapic.valid) + continue; + else if (!irte->lo.fields_remap.valid) + continue; + seq_printf(m, "IRT[%04d] %016llx %016llx\n", index, irte->hi.val, irte->lo.val); + } +} + +static void dump_32_irte(struct seq_file *m, struct irq_remap_table *table, u16 int_tab_len) +{ + union irte *ptr, *irte; + int index; + + for (index = 0; index < int_tab_len; index++) { + ptr = (union irte *)table->table; + irte = &ptr[index]; + + if (!irte->fields.valid) + continue; + seq_printf(m, "IRT[%04d] %08x\n", index, irte->val); + } +} + +static void dump_irte(struct seq_file *m, u16 devid, struct amd_iommu_pci_seg *pci_seg) +{ + struct dev_table_entry *dev_table; + struct irq_remap_table *table; + struct amd_iommu *iommu; + unsigned long flags; + u16 int_tab_len; + + table = pci_seg->irq_lookup_table[devid]; + if (!table) { + seq_printf(m, "IRQ lookup table not set for %04x:%02x:%02x:%x\n", + pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid)); + return; + } + + iommu = pci_seg->rlookup_table[devid]; + if (!iommu) + return; + + dev_table = get_dev_table(iommu); + if (!dev_table) { + seq_puts(m, "Device table not found"); + return; + } + + int_tab_len = dev_table[devid].data[2] & DTE_INTTABLEN_MASK; + if (int_tab_len != DTE_INTTABLEN_512 && int_tab_len != DTE_INTTABLEN_2K) { + seq_puts(m, "The device's DTE contains an invalid IRT length value."); + return; + } + + seq_printf(m, "DeviceId %04x:%02x:%02x.%x\n", pci_seg->id, PCI_BUS_NUM(devid), + PCI_SLOT(devid), PCI_FUNC(devid)); + + raw_spin_lock_irqsave(&table->lock, flags); + if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) + dump_128_irte(m, table, BIT(int_tab_len >> 1)); + else + dump_32_irte(m, table, BIT(int_tab_len >> 1)); + seq_puts(m, "\n"); + raw_spin_unlock_irqrestore(&table->lock, flags); +} + +static int iommu_irqtbl_show(struct seq_file *m, void *unused) +{ + struct amd_iommu_pci_seg *pci_seg; + u16 devid, seg; + + if (!irq_remapping_enabled) { + seq_puts(m, "Interrupt remapping is disabled\n"); + return 0; + } + + if (sbdf < 0) { + seq_puts(m, "Enter a valid device ID to 'devid' file\n"); + return 0; + } + + seg = PCI_SBDF_TO_SEGID(sbdf); + devid = PCI_SBDF_TO_DEVID(sbdf); + + for_each_pci_segment(pci_seg) { + if (pci_seg->id != seg) + continue; + dump_irte(m, devid, pci_seg); + break; + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(iommu_irqtbl); + +void amd_iommu_debugfs_setup(void) +{ + struct amd_iommu *iommu; char name[MAX_NAME_LEN + 1]; - mutex_lock(&amd_iommu_debugfs_lock); - if (!amd_iommu_debugfs) - amd_iommu_debugfs = debugfs_create_dir("amd", - iommu_debugfs_dir); - mutex_unlock(&amd_iommu_debugfs_lock); + amd_iommu_debugfs = debugfs_create_dir("amd", iommu_debugfs_dir); + + for_each_iommu(iommu) { + iommu->dbg_mmio_offset = -1; + iommu->dbg_cap_offset = -1; + + snprintf(name, MAX_NAME_LEN, "iommu%02d", iommu->index); + iommu->debugfs = debugfs_create_dir(name, amd_iommu_debugfs); + + debugfs_create_file("mmio", 0644, iommu->debugfs, iommu, + &iommu_mmio_fops); + debugfs_create_file("capability", 0644, iommu->debugfs, iommu, + &iommu_capability_fops); + debugfs_create_file("cmdbuf", 0444, iommu->debugfs, iommu, + &iommu_cmdbuf_fops); + } - snprintf(name, MAX_NAME_LEN, "iommu%02d", iommu->index); - iommu->debugfs = debugfs_create_dir(name, amd_iommu_debugfs); + debugfs_create_file("devid", 0644, amd_iommu_debugfs, NULL, + &devid_fops); + debugfs_create_file("devtbl", 0444, amd_iommu_debugfs, NULL, + &iommu_devtbl_fops); + debugfs_create_file("irqtbl", 0444, amd_iommu_debugfs, NULL, + &iommu_irqtbl_fops); } diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 9c17dfa76703..7b5af6176de9 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -152,6 +152,8 @@ bool amd_iommu_dump; bool amd_iommu_irq_remap __read_mostly; enum protection_domain_mode amd_iommu_pgtable = PD_MODE_V1; +/* Host page table level */ +u8 amd_iommu_hpt_level; /* Guest page table level */ int amd_iommu_gpt_level = PAGE_MODE_4_LEVEL; @@ -168,6 +170,9 @@ static int amd_iommu_target_ivhd_type; u64 amd_iommu_efr; u64 amd_iommu_efr2; +/* Host (v1) page table is not supported*/ +bool amd_iommu_hatdis; + /* SNP is enabled on the system? */ bool amd_iommu_snp_en; EXPORT_SYMBOL(amd_iommu_snp_en); @@ -1792,6 +1797,11 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h, if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE; + if (h->efr_attr & BIT(IOMMU_IVHD_ATTR_HATDIS_SHIFT)) { + pr_warn_once("Host Address Translation is not supported.\n"); + amd_iommu_hatdis = true; + } + early_iommu_features_init(iommu, h); break; @@ -2112,7 +2122,15 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) return ret; } - iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL); + ret = iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL); + if (ret || amd_iommu_pgtable == PD_MODE_NONE) { + /* + * Remove sysfs if DMA translation is not supported by the + * IOMMU. Do not return an error to enable IRQ remapping + * in state_next(), DTE[V, TV] must eventually be set to 0. + */ + iommu_device_sysfs_remove(&iommu->iommu); + } return pci_enable_device(iommu->dev); } @@ -2573,7 +2591,7 @@ static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg) u32 devid; struct dev_table_entry *dev_table = pci_seg->dev_table; - if (dev_table == NULL) + if (!dev_table || amd_iommu_pgtable == PD_MODE_NONE) return; for (devid = 0; devid <= pci_seg->last_bdf; ++devid) { @@ -3033,6 +3051,7 @@ static int __init early_amd_iommu_init(void) struct acpi_table_header *ivrs_base; int ret; acpi_status status; + u8 efr_hats; if (!amd_iommu_detected) return -ENODEV; @@ -3077,6 +3096,19 @@ static int __init early_amd_iommu_init(void) FIELD_GET(FEATURE_GATS, amd_iommu_efr) == GUEST_PGTABLE_5_LEVEL) amd_iommu_gpt_level = PAGE_MODE_5_LEVEL; + efr_hats = FIELD_GET(FEATURE_HATS, amd_iommu_efr); + if (efr_hats != 0x3) { + /* + * efr[HATS] bits specify the maximum host translation level + * supported, with LEVEL 4 being initial max level. + */ + amd_iommu_hpt_level = efr_hats + PAGE_MODE_4_LEVEL; + } else { + pr_warn_once(FW_BUG "Disable host address translation due to invalid translation level (%#x).\n", + efr_hats); + amd_iommu_hatdis = true; + } + if (amd_iommu_pgtable == PD_MODE_V2) { if (!amd_iommu_v2_pgtbl_supported()) { pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n"); @@ -3084,6 +3116,17 @@ static int __init early_amd_iommu_init(void) } } + if (amd_iommu_hatdis) { + /* + * Host (v1) page table is not available. Attempt to use + * Guest (v2) page table. + */ + if (amd_iommu_v2_pgtbl_supported()) + amd_iommu_pgtable = PD_MODE_V2; + else + amd_iommu_pgtable = PD_MODE_NONE; + } + /* Disable any previously enabled IOMMUs */ if (!is_kdump_kernel() || amd_iommu_disabled) disable_iommus(); @@ -3376,7 +3419,6 @@ int amd_iommu_enable_faulting(unsigned int cpu) */ static int __init amd_iommu_init(void) { - struct amd_iommu *iommu; int ret; ret = iommu_go_to_state(IOMMU_INITIALIZED); @@ -3390,8 +3432,8 @@ static int __init amd_iommu_init(void) } #endif - for_each_iommu(iommu) - amd_iommu_debugfs_setup(iommu); + if (!ret) + amd_iommu_debugfs_setup(); return ret; } diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c index 4d308c071134..a91e71f981ef 100644 --- a/drivers/iommu/amd/io_pgtable.c +++ b/drivers/iommu/amd/io_pgtable.c @@ -125,7 +125,7 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable, goto out; ret = false; - if (WARN_ON_ONCE(pgtable->mode == PAGE_MODE_6_LEVEL)) + if (WARN_ON_ONCE(pgtable->mode == amd_iommu_hpt_level)) goto out; *pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root)); @@ -526,7 +526,7 @@ static void v1_free_pgtable(struct io_pgtable *iop) /* Page-table is not visible to IOMMU anymore, so free it */ BUG_ON(pgtable->mode < PAGE_MODE_NONE || - pgtable->mode > PAGE_MODE_6_LEVEL); + pgtable->mode > amd_iommu_hpt_level); free_sub_pt(pgtable->root, pgtable->mode, &freelist); iommu_put_pages_list(&freelist); diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 3117d99cf83d..b9c01e102e50 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -25,6 +25,7 @@ #include <linux/notifier.h> #include <linux/export.h> #include <linux/irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/msi.h> #include <linux/irqdomain.h> #include <linux/percpu.h> @@ -63,13 +64,6 @@ static const struct iommu_dirty_ops amd_dirty_ops; int amd_iommu_max_glx_val = -1; /* - * general struct to manage commands send to an IOMMU - */ -struct iommu_cmd { - u32 data[4]; -}; - -/* * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap * to know which ones are already in use. */ @@ -634,8 +628,8 @@ static inline void pdev_disable_cap_pasid(struct pci_dev *pdev) static void pdev_enable_caps(struct pci_dev *pdev) { - pdev_enable_cap_ats(pdev); pdev_enable_cap_pasid(pdev); + pdev_enable_cap_ats(pdev); pdev_enable_cap_pri(pdev); } @@ -2424,6 +2418,13 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev) pci_max_pasids(to_pci_dev(dev))); } + if (amd_iommu_pgtable == PD_MODE_NONE) { + pr_warn_once("%s: DMA translation not supported by iommu.\n", + __func__); + iommu_dev = ERR_PTR(-ENODEV); + goto out_err; + } + out_err: iommu_completion_wait(iommu); @@ -2511,6 +2512,9 @@ static int pdom_setup_pgtable(struct protection_domain *domain, case PD_MODE_V2: fmt = AMD_IOMMU_V2; break; + case PD_MODE_NONE: + WARN_ON_ONCE(1); + return -EPERM; } domain->iop.pgtbl.cfg.amd.nid = dev_to_node(dev); @@ -2524,14 +2528,30 @@ static int pdom_setup_pgtable(struct protection_domain *domain, static inline u64 dma_max_address(enum protection_domain_mode pgtable) { if (pgtable == PD_MODE_V1) - return ~0ULL; + return PM_LEVEL_SIZE(amd_iommu_hpt_level); - /* V2 with 4/5 level page table */ - return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1); + /* + * V2 with 4/5 level page table. Note that "2.2.6.5 AMD64 4-Kbyte Page + * Translation" shows that the V2 table sign extends the top of the + * address space creating a reserved region in the middle of the + * translation, just like the CPU does. Further Vasant says the docs are + * incomplete and this only applies to non-zero PASIDs. If the AMDv2 + * page table is assigned to the 0 PASID then there is no sign extension + * check. + * + * Since the IOMMU must have a fixed geometry, and the core code does + * not understand sign extended addressing, we have to chop off the high + * bit to get consistent behavior with attachments of the domain to any + * PASID. + */ + return ((1ULL << (PM_LEVEL_SHIFT(amd_iommu_gpt_level) - 1)) - 1); } static bool amd_iommu_hd_support(struct amd_iommu *iommu) { + if (amd_iommu_hatdis) + return false; + return iommu && (iommu->features & FEATURE_HDSUP); } @@ -3970,29 +3990,30 @@ static struct irq_chip amd_ir_chip = { static const struct msi_parent_ops amdvi_msi_parent_ops = { .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED | MSI_FLAG_MULTI_PCI_MSI, + .bus_select_token = DOMAIN_BUS_AMDVI, + .bus_select_mask = MATCH_PCI_MSI, .prefix = "IR-", .init_dev_msi_info = msi_parent_init_dev_msi_info, }; int amd_iommu_create_irq_domain(struct amd_iommu *iommu) { - struct fwnode_handle *fn; + struct irq_domain_info info = { + .fwnode = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index), + .ops = &amd_ir_domain_ops, + .domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI, + .host_data = iommu, + .parent = arch_get_ir_parent_domain(), + }; - fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index); - if (!fn) + if (!info.fwnode) return -ENOMEM; - iommu->ir_domain = irq_domain_create_hierarchy(arch_get_ir_parent_domain(), 0, 0, - fn, &amd_ir_domain_ops, iommu); + + iommu->ir_domain = msi_create_parent_irq_domain(&info, &amdvi_msi_parent_ops); if (!iommu->ir_domain) { - irq_domain_free_fwnode(fn); + irq_domain_free_fwnode(info.fwnode); return -ENOMEM; } - - irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_AMDVI); - iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT | - IRQ_DOMAIN_FLAG_ISOLATED_MSI; - iommu->ir_domain->msi_parent_ops = &amdvi_msi_parent_ops; - return 0; } diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c index 757d24f67ad4..190f28d76615 100644 --- a/drivers/iommu/apple-dart.c +++ b/drivers/iommu/apple-dart.c @@ -991,7 +991,6 @@ static const struct iommu_ops apple_dart_iommu_ops = { .of_xlate = apple_dart_of_xlate, .def_domain_type = apple_dart_def_domain_type, .get_resv_regions = apple_dart_get_resv_regions, - .pgsize_bitmap = -1UL, /* Restricted during dart probe */ .owner = THIS_MODULE, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = apple_dart_attach_dev_paging, diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index 0601dece0a0d..59a480974d80 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -220,6 +220,9 @@ bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) feat_mask |= ARM_SMMU_FEAT_VAX; } + if (system_supports_bbml2_noabort()) + feat_mask |= ARM_SMMU_FEAT_BBML2; + if ((smmu->features & feat_mask) != feat_mask) return false; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 10cc6dc26b7b..f39bd7235011 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -38,7 +38,7 @@ module_param(disable_msipolling, bool, 0444); MODULE_PARM_DESC(disable_msipolling, "Disable MSI-based polling for CMD_SYNC completion."); -static struct iommu_ops arm_smmu_ops; +static const struct iommu_ops arm_smmu_ops; static struct iommu_dirty_ops arm_smmu_dirty_ops; enum arm_smmu_msi_index { @@ -2906,8 +2906,8 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state, master_domain = kzalloc(sizeof(*master_domain), GFP_KERNEL); if (!master_domain) { - kfree(state->vmaster); - return -ENOMEM; + ret = -ENOMEM; + goto err_free_vmaster; } master_domain->domain = new_domain; master_domain->master = master; @@ -2941,7 +2941,6 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state, !arm_smmu_master_canwbs(master)) { spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); - kfree(state->vmaster); ret = -EINVAL; goto err_iopf; } @@ -2967,6 +2966,8 @@ err_iopf: arm_smmu_disable_iopf(master, master_domain); err_free_master_domain: kfree(master_domain); +err_free_vmaster: + kfree(state->vmaster); return ret; } @@ -3674,7 +3675,7 @@ static int arm_smmu_def_domain_type(struct device *dev) return 0; } -static struct iommu_ops arm_smmu_ops = { +static const struct iommu_ops arm_smmu_ops = { .identity_domain = &arm_smmu_identity_domain, .blocked_domain = &arm_smmu_blocked_domain, .capable = arm_smmu_capable, @@ -3690,7 +3691,6 @@ static struct iommu_ops arm_smmu_ops = { .def_domain_type = arm_smmu_def_domain_type, .viommu_alloc = arm_vsmmu_alloc, .user_pasid_table = 1, - .pgsize_bitmap = -1UL, /* Restricted during device attach */ .owner = THIS_MODULE, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = arm_smmu_attach_dev, @@ -4457,6 +4457,9 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) if (FIELD_GET(IDR3_FWB, reg)) smmu->features |= ARM_SMMU_FEAT_S2FWB; + if (FIELD_GET(IDR3_BBM, reg) == 2) + smmu->features |= ARM_SMMU_FEAT_BBML2; + /* IDR5 */ reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5); @@ -4504,11 +4507,6 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) smmu->oas = 48; } - if (arm_smmu_ops.pgsize_bitmap == -1UL) - arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap; - else - arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap; - /* Set the DMA mask for our table walker */ if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas))) dev_warn(smmu->dev, diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index ea41d790463e..a33bf520ba97 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -60,6 +60,7 @@ struct arm_smmu_device; #define ARM_SMMU_IDR3 0xc #define IDR3_FWB (1 << 8) #define IDR3_RIL (1 << 10) +#define IDR3_BBM GENMASK(12, 11) #define ARM_SMMU_IDR5 0x14 #define IDR5_STALL_MAX GENMASK(31, 16) @@ -755,6 +756,7 @@ struct arm_smmu_device { #define ARM_SMMU_FEAT_HA (1 << 21) #define ARM_SMMU_FEAT_HD (1 << 22) #define ARM_SMMU_FEAT_S2FWB (1 << 23) +#define ARM_SMMU_FEAT_BBML2 (1 << 24) u32 features; #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index 62874b18f645..57c097e87613 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -355,7 +355,8 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain, priv->set_prr_addr = NULL; if (of_device_is_compatible(np, "qcom,smmu-500") && - of_device_is_compatible(np, "qcom,adreno-smmu")) { + !of_device_is_compatible(np, "qcom,sm8250-smmu-500") && + of_device_is_compatible(np, "qcom,adreno-smmu")) { priv->set_prr_bit = qcom_adreno_smmu_set_prr_bit; priv->set_prr_addr = qcom_adreno_smmu_set_prr_addr; } @@ -379,6 +380,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { { .compatible = "qcom,sdm670-mdss" }, { .compatible = "qcom,sdm845-mdss" }, { .compatible = "qcom,sdm845-mss-pil" }, + { .compatible = "qcom,sm6115-mdss" }, { .compatible = "qcom,sm6350-mdss" }, { .compatible = "qcom,sm6375-mdss" }, { .compatible = "qcom,sm8150-mdss" }, diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 8d95b14c7d5a..4ced4b5bee4d 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -109,7 +109,7 @@ static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) } static struct platform_driver arm_smmu_driver; -static struct iommu_ops arm_smmu_ops; +static const struct iommu_ops arm_smmu_ops; #ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS static struct device_node *dev_get_dev_node(struct device *dev) @@ -919,6 +919,8 @@ static void arm_smmu_destroy_domain_context(struct arm_smmu_domain *smmu_domain) static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev) { struct arm_smmu_domain *smmu_domain; + struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev); + struct arm_smmu_device *smmu = cfg->smmu; /* * Allocate the domain and initialise some of its data structures. @@ -931,6 +933,7 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev) mutex_init(&smmu_domain->init_mutex); spin_lock_init(&smmu_domain->cb_lock); + smmu_domain->domain.pgsize_bitmap = smmu->pgsize_bitmap; return &smmu_domain->domain; } @@ -1627,7 +1630,7 @@ static int arm_smmu_def_domain_type(struct device *dev) return 0; } -static struct iommu_ops arm_smmu_ops = { +static const struct iommu_ops arm_smmu_ops = { .identity_domain = &arm_smmu_identity_domain, .blocked_domain = &arm_smmu_blocked_domain, .capable = arm_smmu_capable, @@ -1639,7 +1642,6 @@ static struct iommu_ops arm_smmu_ops = { .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, .def_domain_type = arm_smmu_def_domain_type, - .pgsize_bitmap = -1UL, /* Restricted during device attach */ .owner = THIS_MODULE, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = arm_smmu_attach_dev, @@ -1919,10 +1921,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K) smmu->pgsize_bitmap |= SZ_64K | SZ_512M; - if (arm_smmu_ops.pgsize_bitmap == -1UL) - arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap; - else - arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap; dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", smmu->pgsize_bitmap); diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c index 3907924646a2..c5be95e56031 100644 --- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c +++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c @@ -229,7 +229,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, goto out_unlock; pgtbl_cfg = (struct io_pgtable_cfg) { - .pgsize_bitmap = qcom_iommu_ops.pgsize_bitmap, + .pgsize_bitmap = domain->pgsize_bitmap, .ias = 32, .oas = 40, .tlb = &qcom_flush_ops, @@ -246,8 +246,6 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, goto out_clear_iommu; } - /* Update the domain's page sizes to reflect the page table format */ - domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; domain->geometry.aperture_end = (1ULL << pgtbl_cfg.ias) - 1; domain->geometry.force_aperture = true; @@ -337,6 +335,7 @@ static struct iommu_domain *qcom_iommu_domain_alloc_paging(struct device *dev) mutex_init(&qcom_domain->init_mutex); spin_lock_init(&qcom_domain->pgtbl_lock); + qcom_domain->domain.pgsize_bitmap = SZ_4K; return &qcom_domain->domain; } @@ -598,7 +597,6 @@ static const struct iommu_ops qcom_iommu_ops = { .probe_device = qcom_iommu_probe_device, .device_group = generic_device_group, .of_xlate = qcom_iommu_of_xlate, - .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = qcom_iommu_attach_dev, .map_pages = qcom_iommu_map, diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index fcb6a0f7c082..b6edd178fe25 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -22,6 +22,7 @@ #include <linux/pm_runtime.h> #include <linux/slab.h> +#include "dma-iommu.h" #include "iommu-pages.h" typedef u32 sysmmu_iova_t; @@ -925,6 +926,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev) spin_lock_init(&domain->pgtablelock); INIT_LIST_HEAD(&domain->clients); + domain->domain.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE; + domain->domain.geometry.aperture_start = 0; domain->domain.geometry.aperture_end = ~0UL; domain->domain.geometry.force_aperture = true; @@ -1477,7 +1480,7 @@ static const struct iommu_ops exynos_iommu_ops = { .device_group = generic_device_group, .probe_device = exynos_iommu_probe_device, .release_device = exynos_iommu_release_device, - .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, + .get_resv_regions = iommu_dma_get_resv_regions, .of_xlate = exynos_iommu_of_xlate, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = exynos_iommu_attach_device, diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c index 761ab647f372..0961ac805944 100644 --- a/drivers/iommu/hyperv-iommu.c +++ b/drivers/iommu/hyperv-iommu.c @@ -193,15 +193,13 @@ struct hyperv_root_ir_data { static void hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg) { - u64 status; - u32 vector; - struct irq_cfg *cfg; - int ioapic_id; - const struct cpumask *affinity; - int cpu; - struct hv_interrupt_entry entry; struct hyperv_root_ir_data *data = irq_data->chip_data; + struct hv_interrupt_entry entry; + const struct cpumask *affinity; struct IO_APIC_route_entry e; + struct irq_cfg *cfg; + int cpu, ioapic_id; + u32 vector; cfg = irqd_cfg(irq_data); affinity = irq_data_get_effective_affinity_mask(irq_data); @@ -214,23 +212,16 @@ hyperv_root_ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg) && data->entry.ioapic_rte.as_uint64) { entry = data->entry; - status = hv_unmap_ioapic_interrupt(ioapic_id, &entry); - - if (status != HV_STATUS_SUCCESS) - hv_status_debug(status, "failed to unmap\n"); + (void)hv_unmap_ioapic_interrupt(ioapic_id, &entry); data->entry.ioapic_rte.as_uint64 = 0; data->entry.source = 0; /* Invalid source */ } - status = hv_map_ioapic_interrupt(ioapic_id, data->is_level, cpu, - vector, &entry); - - if (status != HV_STATUS_SUCCESS) { - hv_status_err(status, "map failed\n"); + if (hv_map_ioapic_interrupt(ioapic_id, data->is_level, cpu, + vector, &entry)) return; - } data->entry = entry; @@ -322,10 +313,10 @@ static void hyperv_root_irq_remapping_free(struct irq_domain *domain, data = irq_data->chip_data; e = &data->entry; - if (e->source == HV_DEVICE_TYPE_IOAPIC - && e->ioapic_rte.as_uint64) - hv_unmap_ioapic_interrupt(data->ioapic_id, - &data->entry); + if (e->source == HV_DEVICE_TYPE_IOAPIC && + e->ioapic_rte.as_uint64) + (void)hv_unmap_ioapic_interrupt(data->ioapic_id, + &data->entry); kfree(data); } diff --git a/drivers/iommu/intel/cache.c b/drivers/iommu/intel/cache.c index 47692cbfaabd..265e7290256b 100644 --- a/drivers/iommu/intel/cache.c +++ b/drivers/iommu/intel/cache.c @@ -370,7 +370,7 @@ static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag * struct intel_iommu *iommu = tag->iommu; u64 type = DMA_TLB_PSI_FLUSH; - if (domain->use_first_level) { + if (intel_domain_is_fs_paging(domain)) { qi_batch_add_piotlb(iommu, tag->domain_id, tag->pasid, addr, pages, ih, domain->qi_batch); return; @@ -422,22 +422,6 @@ static void cache_tag_flush_devtlb_psi(struct dmar_domain *domain, struct cache_ domain->qi_batch); } -static void cache_tag_flush_devtlb_all(struct dmar_domain *domain, struct cache_tag *tag) -{ - struct intel_iommu *iommu = tag->iommu; - struct device_domain_info *info; - u16 sid; - - info = dev_iommu_priv_get(tag->dev); - sid = PCI_DEVID(info->bus, info->devfn); - - qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0, - MAX_AGAW_PFN_WIDTH, domain->qi_batch); - if (info->dtlb_extra_inval) - qi_batch_add_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0, - MAX_AGAW_PFN_WIDTH, domain->qi_batch); -} - /* * Invalidates a range of IOVA from @start (inclusive) to @end (inclusive) * when the memory mappings in the target domain have been modified. @@ -450,7 +434,13 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start, struct cache_tag *tag; unsigned long flags; - addr = calculate_psi_aligned_address(start, end, &pages, &mask); + if (start == 0 && end == ULONG_MAX) { + addr = 0; + pages = -1; + mask = MAX_AGAW_PFN_WIDTH; + } else { + addr = calculate_psi_aligned_address(start, end, &pages, &mask); + } spin_lock_irqsave(&domain->cache_lock, flags); list_for_each_entry(tag, &domain->cache_tags, node) { @@ -491,31 +481,7 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start, */ void cache_tag_flush_all(struct dmar_domain *domain) { - struct intel_iommu *iommu = NULL; - struct cache_tag *tag; - unsigned long flags; - - spin_lock_irqsave(&domain->cache_lock, flags); - list_for_each_entry(tag, &domain->cache_tags, node) { - if (iommu && iommu != tag->iommu) - qi_batch_flush_descs(iommu, domain->qi_batch); - iommu = tag->iommu; - - switch (tag->type) { - case CACHE_TAG_IOTLB: - case CACHE_TAG_NESTING_IOTLB: - cache_tag_flush_iotlb(domain, tag, 0, -1, 0, 0); - break; - case CACHE_TAG_DEVTLB: - case CACHE_TAG_NESTING_DEVTLB: - cache_tag_flush_devtlb_all(domain, tag); - break; - } - - trace_cache_tag_flush_all(tag); - } - qi_batch_flush_descs(iommu, domain->qi_batch); - spin_unlock_irqrestore(&domain->cache_lock, flags); + cache_tag_flush_range(domain, 0, ULONG_MAX, 0); } /* @@ -545,7 +511,8 @@ void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start, qi_batch_flush_descs(iommu, domain->qi_batch); iommu = tag->iommu; - if (!cap_caching_mode(iommu->cap) || domain->use_first_level) { + if (!cap_caching_mode(iommu->cap) || + intel_domain_is_fs_paging(domain)) { iommu_flush_write_buffer(iommu); continue; } diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index b61d9ea27aa9..ec975c73cfe6 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -935,14 +935,11 @@ void __init detect_intel_iommu(void) pci_request_acs(); } -#ifdef CONFIG_X86 if (!ret) { x86_init.iommu.iommu_init = intel_iommu_init; x86_platform.iommu_shutdown = intel_iommu_shutdown; } -#endif - if (dmar_tbl) { acpi_put_table(dmar_tbl); dmar_tbl = NULL; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 148b944143b8..f7a00af6778e 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -57,6 +57,8 @@ static void __init check_tylersburg_isoch(void); static int rwbf_quirk; +#define rwbf_required(iommu) (rwbf_quirk || cap_rwbf((iommu)->cap)) + /* * set to 1 to panic kernel if can't successfully enable VT-d * (used when kernel is launched w/ TXT) @@ -1391,28 +1393,10 @@ void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu) if (--info->refcnt == 0) { ida_free(&iommu->domain_ida, info->did); xa_erase(&domain->iommu_array, iommu->seq_id); - domain->nid = NUMA_NO_NODE; kfree(info); } } -static void domain_exit(struct dmar_domain *domain) -{ - if (domain->pgd) { - struct iommu_pages_list freelist = - IOMMU_PAGES_LIST_INIT(freelist); - - domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist); - iommu_put_pages_list(&freelist); - } - - if (WARN_ON(!list_empty(&domain->devices))) - return; - - kfree(domain->qi_batch); - kfree(domain); -} - /* * For kdump cases, old valid entries may be cached due to the * in-flight DMA and copied pgtable, but there is no unmapping @@ -1480,6 +1464,9 @@ static int domain_context_mapping_one(struct dmar_domain *domain, struct context_entry *context; int ret; + if (WARN_ON(!intel_domain_is_ss_paging(domain))) + return -EINVAL; + pr_debug("Set context mapping for %02x:%02x.%d\n", bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); @@ -1736,15 +1723,14 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 intel_context_flush_no_pasid(info, context, did); } -int __domain_setup_first_level(struct intel_iommu *iommu, - struct device *dev, ioasid_t pasid, - u16 did, pgd_t *pgd, int flags, - struct iommu_domain *old) +int __domain_setup_first_level(struct intel_iommu *iommu, struct device *dev, + ioasid_t pasid, u16 did, phys_addr_t fsptptr, + int flags, struct iommu_domain *old) { if (!old) - return intel_pasid_setup_first_level(iommu, dev, pgd, - pasid, did, flags); - return intel_pasid_replace_first_level(iommu, dev, pgd, pasid, did, + return intel_pasid_setup_first_level(iommu, dev, fsptptr, pasid, + did, flags); + return intel_pasid_replace_first_level(iommu, dev, fsptptr, pasid, did, iommu_domain_did(old, iommu), flags); } @@ -1793,7 +1779,7 @@ static int domain_setup_first_level(struct intel_iommu *iommu, return __domain_setup_first_level(iommu, dev, pasid, domain_id_iommu(domain, iommu), - (pgd_t *)pgd, flags, old); + __pa(pgd), flags, old); } static int dmar_domain_attach_device(struct dmar_domain *domain, @@ -1819,12 +1805,14 @@ static int dmar_domain_attach_device(struct dmar_domain *domain, if (!sm_supported(iommu)) ret = domain_context_mapping(domain, dev); - else if (domain->use_first_level) + else if (intel_domain_is_fs_paging(domain)) ret = domain_setup_first_level(iommu, domain, dev, IOMMU_NO_PASID, NULL); - else + else if (intel_domain_is_ss_paging(domain)) ret = domain_setup_second_level(iommu, domain, dev, IOMMU_NO_PASID, NULL); + else if (WARN_ON(true)) + ret = -EINVAL; if (ret) goto out_block_translation; @@ -3286,10 +3274,14 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st spin_lock_init(&domain->lock); spin_lock_init(&domain->cache_lock); xa_init(&domain->iommu_array); + INIT_LIST_HEAD(&domain->s1_domains); + spin_lock_init(&domain->s1_lock); domain->nid = dev_to_node(dev); domain->use_first_level = first_stage; + domain->domain.type = IOMMU_DOMAIN_UNMANAGED; + /* calculate the address width */ addr_width = agaw_to_width(iommu->agaw); if (addr_width > cap_mgaw(iommu->cap)) @@ -3331,71 +3323,168 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st } static struct iommu_domain * -intel_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags, - const struct iommu_user_data *user_data) +intel_iommu_domain_alloc_first_stage(struct device *dev, + struct intel_iommu *iommu, u32 flags) +{ + struct dmar_domain *dmar_domain; + + if (flags & ~IOMMU_HWPT_ALLOC_PASID) + return ERR_PTR(-EOPNOTSUPP); + + /* Only SL is available in legacy mode */ + if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) + return ERR_PTR(-EOPNOTSUPP); + + dmar_domain = paging_domain_alloc(dev, true); + if (IS_ERR(dmar_domain)) + return ERR_CAST(dmar_domain); + + dmar_domain->domain.ops = &intel_fs_paging_domain_ops; + /* + * iotlb sync for map is only needed for legacy implementations that + * explicitly require flushing internal write buffers to ensure memory + * coherence. + */ + if (rwbf_required(iommu)) + dmar_domain->iotlb_sync_map = true; + + return &dmar_domain->domain; +} + +static struct iommu_domain * +intel_iommu_domain_alloc_second_stage(struct device *dev, + struct intel_iommu *iommu, u32 flags) { - struct device_domain_info *info = dev_iommu_priv_get(dev); - bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; - bool nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT; - struct intel_iommu *iommu = info->iommu; struct dmar_domain *dmar_domain; - struct iommu_domain *domain; - bool first_stage; if (flags & (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING | IOMMU_HWPT_ALLOC_PASID))) return ERR_PTR(-EOPNOTSUPP); - if (nested_parent && !nested_supported(iommu)) + + if (((flags & IOMMU_HWPT_ALLOC_NEST_PARENT) && + !nested_supported(iommu)) || + ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) && + !ssads_supported(iommu))) return ERR_PTR(-EOPNOTSUPP); - if (user_data || (dirty_tracking && !ssads_supported(iommu))) + + /* Legacy mode always supports second stage */ + if (sm_supported(iommu) && !ecap_slts(iommu->ecap)) return ERR_PTR(-EOPNOTSUPP); + dmar_domain = paging_domain_alloc(dev, false); + if (IS_ERR(dmar_domain)) + return ERR_CAST(dmar_domain); + + dmar_domain->domain.ops = &intel_ss_paging_domain_ops; + dmar_domain->nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT; + + if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) + dmar_domain->domain.dirty_ops = &intel_dirty_ops; + /* - * Always allocate the guest compatible page table unless - * IOMMU_HWPT_ALLOC_NEST_PARENT or IOMMU_HWPT_ALLOC_DIRTY_TRACKING - * is specified. + * Besides the internal write buffer flush, the caching mode used for + * legacy nested translation (which utilizes shadowing page tables) + * also requires iotlb sync on map. */ - if (nested_parent || dirty_tracking) { - if (!sm_supported(iommu) || !ecap_slts(iommu->ecap)) - return ERR_PTR(-EOPNOTSUPP); - first_stage = false; - } else { - first_stage = first_level_by_default(iommu); - } + if (rwbf_required(iommu) || cap_caching_mode(iommu->cap)) + dmar_domain->iotlb_sync_map = true; - dmar_domain = paging_domain_alloc(dev, first_stage); - if (IS_ERR(dmar_domain)) - return ERR_CAST(dmar_domain); - domain = &dmar_domain->domain; - domain->type = IOMMU_DOMAIN_UNMANAGED; - domain->owner = &intel_iommu_ops; - domain->ops = intel_iommu_ops.default_domain_ops; - - if (nested_parent) { - dmar_domain->nested_parent = true; - INIT_LIST_HEAD(&dmar_domain->s1_domains); - spin_lock_init(&dmar_domain->s1_lock); - } + return &dmar_domain->domain; +} - if (dirty_tracking) { - if (dmar_domain->use_first_level) { - iommu_domain_free(domain); - return ERR_PTR(-EOPNOTSUPP); - } - domain->dirty_ops = &intel_dirty_ops; - } +static struct iommu_domain * +intel_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags, + const struct iommu_user_data *user_data) +{ + struct device_domain_info *info = dev_iommu_priv_get(dev); + struct intel_iommu *iommu = info->iommu; + struct iommu_domain *domain; - return domain; + if (user_data) + return ERR_PTR(-EOPNOTSUPP); + + /* Prefer first stage if possible by default. */ + domain = intel_iommu_domain_alloc_first_stage(dev, iommu, flags); + if (domain != ERR_PTR(-EOPNOTSUPP)) + return domain; + return intel_iommu_domain_alloc_second_stage(dev, iommu, flags); } static void intel_iommu_domain_free(struct iommu_domain *domain) { struct dmar_domain *dmar_domain = to_dmar_domain(domain); - WARN_ON(dmar_domain->nested_parent && - !list_empty(&dmar_domain->s1_domains)); - domain_exit(dmar_domain); + if (WARN_ON(dmar_domain->nested_parent && + !list_empty(&dmar_domain->s1_domains))) + return; + + if (WARN_ON(!list_empty(&dmar_domain->devices))) + return; + + if (dmar_domain->pgd) { + struct iommu_pages_list freelist = + IOMMU_PAGES_LIST_INIT(freelist); + + domain_unmap(dmar_domain, 0, DOMAIN_MAX_PFN(dmar_domain->gaw), + &freelist); + iommu_put_pages_list(&freelist); + } + + kfree(dmar_domain->qi_batch); + kfree(dmar_domain); +} + +static int paging_domain_compatible_first_stage(struct dmar_domain *dmar_domain, + struct intel_iommu *iommu) +{ + if (WARN_ON(dmar_domain->domain.dirty_ops || + dmar_domain->nested_parent)) + return -EINVAL; + + /* Only SL is available in legacy mode */ + if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) + return -EINVAL; + + /* Same page size support */ + if (!cap_fl1gp_support(iommu->cap) && + (dmar_domain->domain.pgsize_bitmap & SZ_1G)) + return -EINVAL; + + /* iotlb sync on map requirement */ + if ((rwbf_required(iommu)) && !dmar_domain->iotlb_sync_map) + return -EINVAL; + + return 0; +} + +static int +paging_domain_compatible_second_stage(struct dmar_domain *dmar_domain, + struct intel_iommu *iommu) +{ + unsigned int sslps = cap_super_page_val(iommu->cap); + + if (dmar_domain->domain.dirty_ops && !ssads_supported(iommu)) + return -EINVAL; + if (dmar_domain->nested_parent && !nested_supported(iommu)) + return -EINVAL; + + /* Legacy mode always supports second stage */ + if (sm_supported(iommu) && !ecap_slts(iommu->ecap)) + return -EINVAL; + + /* Same page size support */ + if (!(sslps & BIT(0)) && (dmar_domain->domain.pgsize_bitmap & SZ_2M)) + return -EINVAL; + if (!(sslps & BIT(1)) && (dmar_domain->domain.pgsize_bitmap & SZ_1G)) + return -EINVAL; + + /* iotlb sync on map requirement */ + if ((rwbf_required(iommu) || cap_caching_mode(iommu->cap)) && + !dmar_domain->iotlb_sync_map) + return -EINVAL; + + return 0; } int paging_domain_compatible(struct iommu_domain *domain, struct device *dev) @@ -3403,28 +3492,29 @@ int paging_domain_compatible(struct iommu_domain *domain, struct device *dev) struct device_domain_info *info = dev_iommu_priv_get(dev); struct dmar_domain *dmar_domain = to_dmar_domain(domain); struct intel_iommu *iommu = info->iommu; + int ret = -EINVAL; int addr_width; - if (WARN_ON_ONCE(!(domain->type & __IOMMU_DOMAIN_PAGING))) - return -EPERM; + if (intel_domain_is_fs_paging(dmar_domain)) + ret = paging_domain_compatible_first_stage(dmar_domain, iommu); + else if (intel_domain_is_ss_paging(dmar_domain)) + ret = paging_domain_compatible_second_stage(dmar_domain, iommu); + else if (WARN_ON(true)) + ret = -EINVAL; + if (ret) + return ret; + /* + * FIXME this is locked wrong, it needs to be under the + * dmar_domain->lock + */ if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap)) return -EINVAL; - if (domain->dirty_ops && !ssads_supported(iommu)) - return -EINVAL; - if (dmar_domain->iommu_coherency != iommu_paging_structure_coherency(iommu)) return -EINVAL; - if (dmar_domain->iommu_superpage != - iommu_superpage_capability(iommu, dmar_domain->use_first_level)) - return -EINVAL; - - if (dmar_domain->use_first_level && - (!sm_supported(iommu) || !ecap_flts(iommu->ecap))) - return -EINVAL; /* check if this iommu agaw is sufficient for max mapped address */ addr_width = agaw_to_width(iommu->agaw); @@ -3610,44 +3700,41 @@ static bool domain_support_force_snooping(struct dmar_domain *domain) return support; } -static void domain_set_force_snooping(struct dmar_domain *domain) +static bool intel_iommu_enforce_cache_coherency_fs(struct iommu_domain *domain) { + struct dmar_domain *dmar_domain = to_dmar_domain(domain); struct device_domain_info *info; - assert_spin_locked(&domain->lock); - /* - * Second level page table supports per-PTE snoop control. The - * iommu_map() interface will handle this by setting SNP bit. - */ - if (!domain->use_first_level) { - domain->set_pte_snp = true; - return; - } + guard(spinlock_irqsave)(&dmar_domain->lock); - list_for_each_entry(info, &domain->devices, link) + if (dmar_domain->force_snooping) + return true; + + if (!domain_support_force_snooping(dmar_domain)) + return false; + + dmar_domain->force_snooping = true; + list_for_each_entry(info, &dmar_domain->devices, link) intel_pasid_setup_page_snoop_control(info->iommu, info->dev, IOMMU_NO_PASID); + return true; } -static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain) +static bool intel_iommu_enforce_cache_coherency_ss(struct iommu_domain *domain) { struct dmar_domain *dmar_domain = to_dmar_domain(domain); - unsigned long flags; - - if (dmar_domain->force_snooping) - return true; - spin_lock_irqsave(&dmar_domain->lock, flags); + guard(spinlock_irqsave)(&dmar_domain->lock); if (!domain_support_force_snooping(dmar_domain) || - (!dmar_domain->use_first_level && dmar_domain->has_mappings)) { - spin_unlock_irqrestore(&dmar_domain->lock, flags); + dmar_domain->has_mappings) return false; - } - domain_set_force_snooping(dmar_domain); + /* + * Second level page table supports per-PTE snoop control. The + * iommu_map() interface will handle this by setting SNP bit. + */ + dmar_domain->set_pte_snp = true; dmar_domain->force_snooping = true; - spin_unlock_irqrestore(&dmar_domain->lock, flags); - return true; } @@ -3954,7 +4041,10 @@ static bool risky_device(struct pci_dev *pdev) static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain, unsigned long iova, size_t size) { - cache_tag_flush_range_np(to_dmar_domain(domain), iova, iova + size - 1); + struct dmar_domain *dmar_domain = to_dmar_domain(domain); + + if (dmar_domain->iotlb_sync_map) + cache_tag_flush_range_np(dmar_domain, iova, iova + size - 1); return 0; } @@ -4000,8 +4090,8 @@ static int blocking_domain_set_dev_pasid(struct iommu_domain *domain, { struct device_domain_info *info = dev_iommu_priv_get(dev); - iopf_for_domain_remove(old, dev); intel_pasid_tear_down_entry(info->iommu, dev, pasid, false); + iopf_for_domain_remove(old, dev); domain_remove_dev_pasid(old, dev, pasid); return 0; @@ -4078,12 +4168,15 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain, if (ret) goto out_remove_dev_pasid; - if (dmar_domain->use_first_level) + if (intel_domain_is_fs_paging(dmar_domain)) ret = domain_setup_first_level(iommu, dmar_domain, dev, pasid, old); - else + else if (intel_domain_is_ss_paging(dmar_domain)) ret = domain_setup_second_level(iommu, dmar_domain, dev, pasid, old); + else if (WARN_ON(true)) + ret = -EINVAL; + if (ret) goto out_unwind_iopf; @@ -4358,6 +4451,32 @@ static struct iommu_domain identity_domain = { }, }; +const struct iommu_domain_ops intel_fs_paging_domain_ops = { + .attach_dev = intel_iommu_attach_device, + .set_dev_pasid = intel_iommu_set_dev_pasid, + .map_pages = intel_iommu_map_pages, + .unmap_pages = intel_iommu_unmap_pages, + .iotlb_sync_map = intel_iommu_iotlb_sync_map, + .flush_iotlb_all = intel_flush_iotlb_all, + .iotlb_sync = intel_iommu_tlb_sync, + .iova_to_phys = intel_iommu_iova_to_phys, + .free = intel_iommu_domain_free, + .enforce_cache_coherency = intel_iommu_enforce_cache_coherency_fs, +}; + +const struct iommu_domain_ops intel_ss_paging_domain_ops = { + .attach_dev = intel_iommu_attach_device, + .set_dev_pasid = intel_iommu_set_dev_pasid, + .map_pages = intel_iommu_map_pages, + .unmap_pages = intel_iommu_unmap_pages, + .iotlb_sync_map = intel_iommu_iotlb_sync_map, + .flush_iotlb_all = intel_flush_iotlb_all, + .iotlb_sync = intel_iommu_tlb_sync, + .iova_to_phys = intel_iommu_iova_to_phys, + .free = intel_iommu_domain_free, + .enforce_cache_coherency = intel_iommu_enforce_cache_coherency_ss, +}; + const struct iommu_ops intel_iommu_ops = { .blocked_domain = &blocking_domain, .release_domain = &blocking_domain, @@ -4374,20 +4493,7 @@ const struct iommu_ops intel_iommu_ops = { .device_group = intel_iommu_device_group, .is_attach_deferred = intel_iommu_is_attach_deferred, .def_domain_type = device_def_domain_type, - .pgsize_bitmap = SZ_4K, .page_response = intel_iommu_page_response, - .default_domain_ops = &(const struct iommu_domain_ops) { - .attach_dev = intel_iommu_attach_device, - .set_dev_pasid = intel_iommu_set_dev_pasid, - .map_pages = intel_iommu_map_pages, - .unmap_pages = intel_iommu_unmap_pages, - .iotlb_sync_map = intel_iommu_iotlb_sync_map, - .flush_iotlb_all = intel_flush_iotlb_all, - .iotlb_sync = intel_iommu_tlb_sync, - .iova_to_phys = intel_iommu_iova_to_phys, - .free = intel_iommu_domain_free, - .enforce_cache_coherency = intel_iommu_enforce_cache_coherency, - } }; static void quirk_iommu_igfx(struct pci_dev *dev) diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index 2d1afab5eedc..d09b92871659 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -614,6 +614,9 @@ struct dmar_domain { u8 has_mappings:1; /* Has mappings configured through * iommu_map() interface. */ + u8 iotlb_sync_map:1; /* Need to flush IOTLB cache or write + * buffer when creating mappings. + */ spinlock_t lock; /* Protect device tracking lists */ struct list_head devices; /* all devices' list */ @@ -1252,10 +1255,9 @@ domain_add_dev_pasid(struct iommu_domain *domain, void domain_remove_dev_pasid(struct iommu_domain *domain, struct device *dev, ioasid_t pasid); -int __domain_setup_first_level(struct intel_iommu *iommu, - struct device *dev, ioasid_t pasid, - u16 did, pgd_t *pgd, int flags, - struct iommu_domain *old); +int __domain_setup_first_level(struct intel_iommu *iommu, struct device *dev, + ioasid_t pasid, u16 did, phys_addr_t fsptptr, + int flags, struct iommu_domain *old); int dmar_ir_support(void); @@ -1378,6 +1380,18 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, u8 devfn, int alloc); extern const struct iommu_ops intel_iommu_ops; +extern const struct iommu_domain_ops intel_fs_paging_domain_ops; +extern const struct iommu_domain_ops intel_ss_paging_domain_ops; + +static inline bool intel_domain_is_fs_paging(struct dmar_domain *domain) +{ + return domain->domain.ops == &intel_fs_paging_domain_ops; +} + +static inline bool intel_domain_is_ss_paging(struct dmar_domain *domain) +{ + return domain->domain.ops == &intel_ss_paging_domain_ops; +} #ifdef CONFIG_INTEL_IOMMU extern int intel_iommu_sm; diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index cf7b6882ec75..2ef71ba696b1 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -10,6 +10,7 @@ #include <linux/hpet.h> #include <linux/pci.h> #include <linux/irq.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/acpi.h> #include <linux/irqdomain.h> #include <linux/crash_dump.h> @@ -518,8 +519,14 @@ static void iommu_enable_irq_remapping(struct intel_iommu *iommu) static int intel_setup_irq_remapping(struct intel_iommu *iommu) { + struct irq_domain_info info = { + .ops = &intel_ir_domain_ops, + .parent = arch_get_ir_parent_domain(), + .domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI, + .size = INTR_REMAP_TABLE_ENTRIES, + .host_data = iommu, + }; struct ir_table *ir_table; - struct fwnode_handle *fn; unsigned long *bitmap; void *ir_table_base; @@ -544,25 +551,16 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) goto out_free_pages; } - fn = irq_domain_alloc_named_id_fwnode("INTEL-IR", iommu->seq_id); - if (!fn) + info.fwnode = irq_domain_alloc_named_id_fwnode("INTEL-IR", iommu->seq_id); + if (!info.fwnode) goto out_free_bitmap; - iommu->ir_domain = - irq_domain_create_hierarchy(arch_get_ir_parent_domain(), - 0, INTR_REMAP_TABLE_ENTRIES, - fn, &intel_ir_domain_ops, - iommu); + iommu->ir_domain = msi_create_parent_irq_domain(&info, &dmar_msi_parent_ops); if (!iommu->ir_domain) { pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); goto out_free_fwnode; } - irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_DMAR); - iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT | - IRQ_DOMAIN_FLAG_ISOLATED_MSI; - iommu->ir_domain->msi_parent_ops = &dmar_msi_parent_ops; - ir_table->base = ir_table_base; ir_table->bitmap = bitmap; iommu->ir_table = ir_table; @@ -608,7 +606,7 @@ out_free_ir_domain: irq_domain_remove(iommu->ir_domain); iommu->ir_domain = NULL; out_free_fwnode: - irq_domain_free_fwnode(fn); + irq_domain_free_fwnode(info.fwnode); out_free_bitmap: bitmap_free(bitmap); out_free_pages: @@ -1530,6 +1528,8 @@ static const struct irq_domain_ops intel_ir_domain_ops = { static const struct msi_parent_ops dmar_msi_parent_ops = { .supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED | MSI_FLAG_MULTI_PCI_MSI, + .bus_select_token = DOMAIN_BUS_DMAR, + .bus_select_mask = MATCH_PCI_MSI, .prefix = "IR-", .init_dev_msi_info = msi_parent_init_dev_msi_info, }; diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c index fc312f649f9e..1b6ad9c900a5 100644 --- a/drivers/iommu/intel/nested.c +++ b/drivers/iommu/intel/nested.c @@ -216,8 +216,7 @@ intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent, /* Must be nested domain */ if (user_data->type != IOMMU_HWPT_DATA_VTD_S1) return ERR_PTR(-EOPNOTSUPP); - if (parent->ops != intel_iommu_ops.default_domain_ops || - !s2_domain->nested_parent) + if (!intel_domain_is_ss_paging(s2_domain) || !s2_domain->nested_parent) return ERR_PTR(-EINVAL); ret = iommu_copy_struct_from_user(&vtd, user_data, @@ -229,7 +228,6 @@ intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent, if (!domain) return ERR_PTR(-ENOMEM); - domain->use_first_level = true; domain->s2_domain = s2_domain; domain->s1_cfg = vtd; domain->domain.ops = &intel_nested_domain_ops; diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index ac67a056b6c8..52f678975da7 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -348,14 +348,15 @@ static void intel_pasid_flush_present(struct intel_iommu *iommu, */ static void pasid_pte_config_first_level(struct intel_iommu *iommu, struct pasid_entry *pte, - pgd_t *pgd, u16 did, int flags) + phys_addr_t fsptptr, u16 did, + int flags) { lockdep_assert_held(&iommu->lock); pasid_clear_entry(pte); /* Setup the first level page table pointer: */ - pasid_set_flptr(pte, (u64)__pa(pgd)); + pasid_set_flptr(pte, fsptptr); if (flags & PASID_FLAG_FL5LP) pasid_set_flpm(pte, 1); @@ -372,9 +373,9 @@ static void pasid_pte_config_first_level(struct intel_iommu *iommu, pasid_set_present(pte); } -int intel_pasid_setup_first_level(struct intel_iommu *iommu, - struct device *dev, pgd_t *pgd, - u32 pasid, u16 did, int flags) +int intel_pasid_setup_first_level(struct intel_iommu *iommu, struct device *dev, + phys_addr_t fsptptr, u32 pasid, u16 did, + int flags) { struct pasid_entry *pte; @@ -402,7 +403,7 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu, return -EBUSY; } - pasid_pte_config_first_level(iommu, pte, pgd, did, flags); + pasid_pte_config_first_level(iommu, pte, fsptptr, did, flags); spin_unlock(&iommu->lock); @@ -412,7 +413,7 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu, } int intel_pasid_replace_first_level(struct intel_iommu *iommu, - struct device *dev, pgd_t *pgd, + struct device *dev, phys_addr_t fsptptr, u32 pasid, u16 did, u16 old_did, int flags) { @@ -430,7 +431,7 @@ int intel_pasid_replace_first_level(struct intel_iommu *iommu, return -EINVAL; } - pasid_pte_config_first_level(iommu, &new_pte, pgd, did, flags); + pasid_pte_config_first_level(iommu, &new_pte, fsptptr, did, flags); spin_lock(&iommu->lock); pte = intel_pasid_get_entry(dev, pasid); diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index fd0fd1a0df84..a771a77d4239 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -288,9 +288,9 @@ extern unsigned int intel_pasid_max_id; int intel_pasid_alloc_table(struct device *dev); void intel_pasid_free_table(struct device *dev); struct pasid_table *intel_pasid_get_table(struct device *dev); -int intel_pasid_setup_first_level(struct intel_iommu *iommu, - struct device *dev, pgd_t *pgd, - u32 pasid, u16 did, int flags); +int intel_pasid_setup_first_level(struct intel_iommu *iommu, struct device *dev, + phys_addr_t fsptptr, u32 pasid, u16 did, + int flags); int intel_pasid_setup_second_level(struct intel_iommu *iommu, struct dmar_domain *domain, struct device *dev, u32 pasid); @@ -302,9 +302,8 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu, int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev, u32 pasid, struct dmar_domain *domain); int intel_pasid_replace_first_level(struct intel_iommu *iommu, - struct device *dev, pgd_t *pgd, - u32 pasid, u16 did, u16 old_did, - int flags); + struct device *dev, phys_addr_t fsptptr, + u32 pasid, u16 did, u16 old_did, int flags); int intel_pasid_replace_second_level(struct intel_iommu *iommu, struct dmar_domain *domain, struct device *dev, u16 old_did, diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index f3da596410b5..e147f71f91b7 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -171,7 +171,7 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain, /* Setup the pasid table: */ sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0; ret = __domain_setup_first_level(iommu, dev, pasid, - FLPT_DEFAULT_DID, mm->pgd, + FLPT_DEFAULT_DID, __pa(mm->pgd), sflags, old); if (ret) goto out_unwind_iopf; @@ -214,7 +214,6 @@ struct iommu_domain *intel_svm_domain_alloc(struct device *dev, return ERR_PTR(-ENOMEM); domain->domain.ops = &intel_svm_domain_ops; - domain->use_first_level = true; INIT_LIST_HEAD(&domain->dev_pasids); INIT_LIST_HEAD(&domain->cache_tags); spin_lock_init(&domain->cache_lock); diff --git a/drivers/iommu/intel/trace.h b/drivers/iommu/intel/trace.h index 9defdae6ebae..6311ba3f1691 100644 --- a/drivers/iommu/intel/trace.h +++ b/drivers/iommu/intel/trace.h @@ -130,11 +130,6 @@ DEFINE_EVENT(cache_tag_log, cache_tag_unassign, TP_ARGS(tag) ); -DEFINE_EVENT(cache_tag_log, cache_tag_flush_all, - TP_PROTO(struct cache_tag *tag), - TP_ARGS(tag) -); - DECLARE_EVENT_CLASS(cache_tag_flush, TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end, unsigned long addr, unsigned long pages, unsigned long mask), diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 96425e92f313..7e8e2216c294 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -85,11 +85,6 @@ #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5) #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0) -#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2) -/* Ignore the contiguous bit for block splitting */ -#define ARM_LPAE_PTE_ATTR_HI_MASK (ARM_LPAE_PTE_XN | ARM_LPAE_PTE_DBM) -#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \ - ARM_LPAE_PTE_ATTR_HI_MASK) /* Software bit for solving coherency races */ #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55) @@ -155,8 +150,6 @@ #define iopte_type(pte) \ (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) -#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK) - #define iopte_writeable_dirty(pte) \ (((pte) & ARM_LPAE_PTE_AP_WR_CLEAN_MASK) == ARM_LPAE_PTE_DBM) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index a4b606c591da..060ebe330ee1 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2002,13 +2002,6 @@ static void iommu_domain_init(struct iommu_domain *domain, unsigned int type, domain->owner = ops; if (!domain->ops) domain->ops = ops->default_domain_ops; - - /* - * If not already set, assume all sizes by default; the driver - * may override this later - */ - if (!domain->pgsize_bitmap) - domain->pgsize_bitmap = ops->pgsize_bitmap; } static struct iommu_domain * diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index 6bd0abf9a641..c52bf037a2f0 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -801,7 +801,6 @@ static const struct iommu_ops mock_ops = { .default_domain = &mock_blocking_domain, .blocked_domain = &mock_blocking_domain, .owner = THIS_MODULE, - .pgsize_bitmap = MOCK_IO_PAGE_SIZE, .hw_info = mock_domain_hw_info, .domain_alloc_paging_flags = mock_domain_alloc_paging_flags, .domain_alloc_nested = mock_domain_alloc_nested, diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 90341b24a811..ffa892f65714 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -430,7 +430,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) * non-secure mode. */ domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; - domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; + domain->cfg.pgsize_bitmap = domain->io_domain.pgsize_bitmap; domain->cfg.ias = 32; domain->cfg.oas = 40; domain->cfg.tlb = &ipmmu_flush_ops; @@ -571,6 +571,7 @@ static struct iommu_domain *ipmmu_domain_alloc_paging(struct device *dev) return NULL; mutex_init(&domain->mutex); + domain->io_domain.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; return &domain->io_domain; } @@ -882,7 +883,6 @@ static const struct iommu_ops ipmmu_ops = { */ .device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA) ? generic_device_group : generic_single_device_group, - .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, .of_xlate = ipmmu_of_xlate, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = ipmmu_attach_device, diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 2769e4544038..43a61ba021a5 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -312,6 +312,8 @@ static struct iommu_domain *msm_iommu_domain_alloc_paging(struct device *dev) INIT_LIST_HEAD(&priv->list_attached); + priv->domain.pgsize_bitmap = MSM_IOMMU_PGSIZES; + priv->domain.geometry.aperture_start = 0; priv->domain.geometry.aperture_end = (1ULL << 32) - 1; priv->domain.geometry.force_aperture = true; @@ -339,7 +341,7 @@ static int msm_iommu_domain_config(struct msm_priv *priv) spin_lock_init(&priv->pgtlock); priv->cfg = (struct io_pgtable_cfg) { - .pgsize_bitmap = msm_iommu_ops.pgsize_bitmap, + .pgsize_bitmap = priv->domain.pgsize_bitmap, .ias = 32, .oas = 32, .tlb = &msm_iommu_flush_ops, @@ -352,8 +354,6 @@ static int msm_iommu_domain_config(struct msm_priv *priv) return -EINVAL; } - msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap; - return 0; } @@ -692,7 +692,6 @@ static struct iommu_ops msm_iommu_ops = { .domain_alloc_paging = msm_iommu_domain_alloc_paging, .probe_device = msm_iommu_probe_device, .device_group = generic_device_group, - .pgsize_bitmap = MSM_IOMMU_PGSIZES, .of_xlate = qcom_iommu_of_xlate, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = msm_iommu_attach_dev, diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index cb95fecf6016..0e0285348d2b 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -648,7 +648,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom, if (share_dom) { dom->iop = share_dom->iop; dom->cfg = share_dom->cfg; - dom->domain.pgsize_bitmap = share_dom->cfg.pgsize_bitmap; + dom->domain.pgsize_bitmap = share_dom->domain.pgsize_bitmap; goto update_iova_region; } @@ -656,7 +656,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom, .quirks = IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_PERMS | IO_PGTABLE_QUIRK_ARM_MTK_EXT, - .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap, + .pgsize_bitmap = dom->domain.pgsize_bitmap, .ias = MTK_IOMMU_HAS_FLAG(data->plat_data, IOVA_34_EN) ? 34 : 32, .iommu_dev = data->dev, }; @@ -675,9 +675,6 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom, return -ENOMEM; } - /* Update our support page sizes bitmap */ - dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap; - data->share_dom = dom; update_iova_region: @@ -697,6 +694,7 @@ static struct iommu_domain *mtk_iommu_domain_alloc_paging(struct device *dev) if (!dom) return NULL; mutex_init(&dom->mutex); + dom->domain.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M; return &dom->domain; } @@ -1019,7 +1017,6 @@ static const struct iommu_ops mtk_iommu_ops = { .device_group = mtk_iommu_device_group, .of_xlate = mtk_iommu_of_xlate, .get_resv_regions = mtk_iommu_get_resv_regions, - .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, .owner = THIS_MODULE, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = mtk_iommu_attach_device, diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index 66824982e05f..10cc0b1197e8 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -288,6 +288,8 @@ static struct iommu_domain *mtk_iommu_v1_domain_alloc_paging(struct device *dev) if (!dom) return NULL; + dom->domain.pgsize_bitmap = MT2701_IOMMU_PAGE_SIZE; + return &dom->domain; } @@ -509,14 +511,10 @@ static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev) static void mtk_iommu_v1_probe_finalize(struct device *dev) { - struct dma_iommu_mapping *mtk_mapping; - struct mtk_iommu_v1_data *data; + __maybe_unused struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev); int err; - data = dev_iommu_priv_get(dev); - mtk_mapping = data->mapping; - - err = arm_iommu_attach_device(dev, mtk_mapping); + err = arm_iommu_attach_device(dev, data->mapping); if (err) dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n"); } @@ -582,7 +580,6 @@ static const struct iommu_ops mtk_iommu_v1_ops = { .probe_finalize = mtk_iommu_v1_probe_finalize, .release_device = mtk_iommu_v1_release_device, .device_group = generic_device_group, - .pgsize_bitmap = MT2701_IOMMU_PAGE_SIZE, .owner = THIS_MODULE, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = mtk_iommu_v1_attach_device, diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 3c62337f43c6..6fb93927bdb9 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1123,29 +1123,15 @@ static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev, struct omap_iommu *obj) { struct device_node *np = pdev->dev.of_node; - int ret; if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu")) return 0; - if (!of_property_read_bool(np, "ti,syscon-mmuconfig")) { - dev_err(&pdev->dev, "ti,syscon-mmuconfig property is missing\n"); - return -EINVAL; - } - - obj->syscfg = - syscon_regmap_lookup_by_phandle(np, "ti,syscon-mmuconfig"); - if (IS_ERR(obj->syscfg)) { - /* can fail with -EPROBE_DEFER */ - ret = PTR_ERR(obj->syscfg); - return ret; - } - - if (of_property_read_u32_index(np, "ti,syscon-mmuconfig", 1, - &obj->id)) { - dev_err(&pdev->dev, "couldn't get the IOMMU instance id within subsystem\n"); - return -EINVAL; - } + obj->syscfg = syscon_regmap_lookup_by_phandle_args(np, "ti,syscon-mmuconfig", + 1, &obj->id); + if (IS_ERR(obj->syscfg)) + return dev_err_probe(&pdev->dev, PTR_ERR(obj->syscfg), + "ti,syscon-mmuconfig property is missing\n"); if (obj->id != 0 && obj->id != 1) { dev_err(&pdev->dev, "invalid IOMMU instance id\n"); @@ -1584,6 +1570,8 @@ static struct iommu_domain *omap_iommu_domain_alloc_paging(struct device *dev) spin_lock_init(&omap_domain->lock); + omap_domain->domain.pgsize_bitmap = OMAP_IOMMU_PGSIZES; + omap_domain->domain.geometry.aperture_start = 0; omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1; omap_domain->domain.geometry.force_aperture = true; @@ -1735,7 +1723,6 @@ static const struct iommu_ops omap_iommu_ops = { .release_device = omap_iommu_release_device, .device_group = generic_single_device_group, .of_xlate = omap_iommu_of_xlate, - .pgsize_bitmap = OMAP_IOMMU_PGSIZES, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = omap_iommu_attach_dev, .map_pages = omap_iommu_map, diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c index bb57092ca901..2d0d31ba2886 100644 --- a/drivers/iommu/riscv/iommu.c +++ b/drivers/iommu/riscv/iommu.c @@ -1533,7 +1533,6 @@ static void riscv_iommu_release_device(struct device *dev) } static const struct iommu_ops riscv_iommu_ops = { - .pgsize_bitmap = SZ_4K, .of_xlate = riscv_iommu_of_xlate, .identity_domain = &riscv_iommu_identity_domain, .blocked_domain = &riscv_iommu_blocking_domain, diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index e6bb3c784017..0861dd469bd8 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -1081,6 +1081,8 @@ static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev) spin_lock_init(&rk_domain->dt_lock); INIT_LIST_HEAD(&rk_domain->iommus); + rk_domain->domain.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP; + rk_domain->domain.geometry.aperture_start = 0; rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32); rk_domain->domain.geometry.force_aperture = true; @@ -1170,7 +1172,6 @@ static const struct iommu_ops rk_iommu_ops = { .probe_device = rk_iommu_probe_device, .release_device = rk_iommu_release_device, .device_group = generic_single_device_group, - .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, .of_xlate = rk_iommu_of_xlate, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = rk_iommu_attach_device, diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index 433b59f43530..9c80d61deb2c 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -557,6 +557,7 @@ static struct iommu_domain *s390_domain_alloc_paging(struct device *dev) } zdev->end_dma = zdev->start_dma + aperture_size - 1; + s390_domain->domain.pgsize_bitmap = SZ_4K; s390_domain->domain.geometry.force_aperture = true; s390_domain->domain.geometry.aperture_start = 0; s390_domain->domain.geometry.aperture_end = max_tbl_size(s390_domain); @@ -1158,7 +1159,6 @@ static struct iommu_domain blocking_domain = { .domain_alloc_paging = s390_domain_alloc_paging, \ .probe_device = s390_iommu_probe_device, \ .device_group = generic_device_group, \ - .pgsize_bitmap = SZ_4K, \ .get_resv_regions = s390_iommu_get_resv_regions, \ .default_domain_ops = &(const struct iommu_domain_ops) { \ .attach_dev = s390_iommu_attach_device, \ diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c index 941d1f361c8c..c7ca1d8a0b15 100644 --- a/drivers/iommu/sprd-iommu.c +++ b/drivers/iommu/sprd-iommu.c @@ -143,6 +143,8 @@ static struct iommu_domain *sprd_iommu_domain_alloc_paging(struct device *dev) spin_lock_init(&dom->pgtlock); + dom->domain.pgsize_bitmap = SPRD_IOMMU_PAGE_SIZE; + dom->domain.geometry.aperture_start = 0; dom->domain.geometry.aperture_end = SZ_256M - 1; dom->domain.geometry.force_aperture = true; @@ -410,7 +412,6 @@ static const struct iommu_ops sprd_iommu_ops = { .probe_device = sprd_iommu_probe_device, .device_group = generic_single_device_group, .of_xlate = sprd_iommu_of_xlate, - .pgsize_bitmap = SPRD_IOMMU_PAGE_SIZE, .owner = THIS_MODULE, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = sprd_iommu_attach_device, diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c index 76c9620af4bb..de10b569d9a9 100644 --- a/drivers/iommu/sun50i-iommu.c +++ b/drivers/iommu/sun50i-iommu.c @@ -697,6 +697,8 @@ sun50i_iommu_domain_alloc_paging(struct device *dev) refcount_set(&sun50i_domain->refcnt, 1); + sun50i_domain->domain.pgsize_bitmap = SZ_4K; + sun50i_domain->domain.geometry.aperture_start = 0; sun50i_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32); sun50i_domain->domain.geometry.force_aperture = true; @@ -842,7 +844,6 @@ static int sun50i_iommu_of_xlate(struct device *dev, static const struct iommu_ops sun50i_iommu_ops = { .identity_domain = &sun50i_iommu_identity_domain, - .pgsize_bitmap = SZ_4K, .device_group = generic_single_device_group, .domain_alloc_paging = sun50i_iommu_domain_alloc_paging, .of_xlate = sun50i_iommu_of_xlate, diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index e58fe9d8b9e7..36cdd5fbab07 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -318,6 +318,8 @@ static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev) spin_lock_init(&as->lock); + as->domain.pgsize_bitmap = SZ_4K; + /* setup aperture */ as->domain.geometry.aperture_start = 0; as->domain.geometry.aperture_end = 0xffffffff; @@ -1002,7 +1004,6 @@ static const struct iommu_ops tegra_smmu_ops = { .probe_device = tegra_smmu_probe_device, .device_group = tegra_smmu_device_group, .of_xlate = tegra_smmu_of_xlate, - .pgsize_bitmap = SZ_4K, .default_domain_ops = &(const struct iommu_domain_ops) { .attach_dev = tegra_smmu_attach_dev, .map_pages = tegra_smmu_map, diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index ecd41fb03e5a..532db1de201b 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -998,7 +998,7 @@ static void viommu_get_resv_regions(struct device *dev, struct list_head *head) iommu_dma_get_resv_regions(dev, head); } -static struct iommu_ops viommu_ops; +static const struct iommu_ops viommu_ops; static struct virtio_driver virtio_iommu_drv; static int viommu_match_node(struct device *dev, const void *data) @@ -1086,7 +1086,7 @@ static bool viommu_capable(struct device *dev, enum iommu_cap cap) } } -static struct iommu_ops viommu_ops = { +static const struct iommu_ops viommu_ops = { .capable = viommu_capable, .domain_alloc_identity = viommu_domain_alloc_identity, .domain_alloc_paging = viommu_domain_alloc_paging, @@ -1217,8 +1217,6 @@ static int viommu_probe(struct virtio_device *vdev) viommu->first_domain++; } - viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; - virtio_device_ready(vdev); /* Populate the event queue with buffers */ |