summaryrefslogtreecommitdiff
path: root/drivers/iommu/amd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/amd')
-rw-r--r--drivers/iommu/amd/Kconfig1
-rw-r--r--drivers/iommu/amd/amd_iommu.h6
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h17
-rw-r--r--drivers/iommu/amd/debugfs.c378
-rw-r--r--drivers/iommu/amd/init.c52
-rw-r--r--drivers/iommu/amd/io_pgtable.c4
-rw-r--r--drivers/iommu/amd/iommu.c192
7 files changed, 559 insertions, 91 deletions
diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig
index 994063e5586f..ecef69c11144 100644
--- a/drivers/iommu/amd/Kconfig
+++ b/drivers/iommu/amd/Kconfig
@@ -7,6 +7,7 @@ config AMD_IOMMU
select PCI_ATS
select PCI_PRI
select PCI_PASID
+ select IRQ_MSI_LIB
select MMU_NOTIFIER
select IOMMU_API
select IOMMU_IOVA
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 29a8864381c3..9b4b589a54b5 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -28,9 +28,9 @@ void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
gfp_t gfp, size_t size);
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
-void amd_iommu_debugfs_setup(struct amd_iommu *iommu);
+void amd_iommu_debugfs_setup(void);
#else
-static inline void amd_iommu_debugfs_setup(struct amd_iommu *iommu) {}
+static inline void amd_iommu_debugfs_setup(void) {}
#endif
/* Needed for interrupt remapping */
@@ -42,7 +42,9 @@ int amd_iommu_enable_faulting(unsigned int cpu);
extern int amd_iommu_guest_ir;
extern enum protection_domain_mode amd_iommu_pgtable;
extern int amd_iommu_gpt_level;
+extern u8 amd_iommu_hpt_level;
extern unsigned long amd_iommu_pgsize_bitmap;
+extern bool amd_iommu_hatdis;
/* Protection domain ops */
void amd_iommu_init_identity_domain(void);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index ccbab3a4811a..5219d7ddfdaa 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -94,6 +94,7 @@
#define FEATURE_GA BIT_ULL(7)
#define FEATURE_HE BIT_ULL(8)
#define FEATURE_PC BIT_ULL(9)
+#define FEATURE_HATS GENMASK_ULL(11, 10)
#define FEATURE_GATS GENMASK_ULL(13, 12)
#define FEATURE_GLX GENMASK_ULL(15, 14)
#define FEATURE_GAM_VAPIC BIT_ULL(21)
@@ -460,6 +461,9 @@
/* IOMMU Feature Reporting Field (for IVHD type 10h */
#define IOMMU_FEAT_GASUP_SHIFT 6
+/* IOMMU HATDIS for IVHD type 11h and 40h */
+#define IOMMU_IVHD_ATTR_HATDIS_SHIFT 0
+
/* IOMMU Extended Feature Register (EFR) */
#define IOMMU_EFR_XTSUP_SHIFT 2
#define IOMMU_EFR_GASUP_SHIFT 7
@@ -558,7 +562,8 @@ struct amd_io_pgtable {
};
enum protection_domain_mode {
- PD_MODE_V1 = 1,
+ PD_MODE_NONE,
+ PD_MODE_V1,
PD_MODE_V2,
};
@@ -790,6 +795,8 @@ struct amd_iommu {
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
/* DebugFS Info */
struct dentry *debugfs;
+ int dbg_mmio_offset;
+ int dbg_cap_offset;
#endif
/* IOPF support */
@@ -891,6 +898,13 @@ struct dev_table_entry {
};
/*
+ * Structure defining one entry in the command buffer
+ */
+struct iommu_cmd {
+ u32 data[4];
+};
+
+/*
* Structure to sture persistent DTE flags from IVHD
*/
struct ivhd_dte_flags {
@@ -1054,7 +1068,6 @@ struct irq_2_irte {
};
struct amd_ir_data {
- u32 cached_ga_tag;
struct amd_iommu *iommu;
struct irq_2_irte irq_2_irte;
struct msi_msg msi_entry;
diff --git a/drivers/iommu/amd/debugfs.c b/drivers/iommu/amd/debugfs.c
index 545372fcc72f..10fa217a7119 100644
--- a/drivers/iommu/amd/debugfs.c
+++ b/drivers/iommu/amd/debugfs.c
@@ -11,22 +11,382 @@
#include <linux/pci.h>
#include "amd_iommu.h"
+#include "../irq_remapping.h"
static struct dentry *amd_iommu_debugfs;
-static DEFINE_MUTEX(amd_iommu_debugfs_lock);
#define MAX_NAME_LEN 20
+#define OFS_IN_SZ 8
+#define DEVID_IN_SZ 16
-void amd_iommu_debugfs_setup(struct amd_iommu *iommu)
+static int sbdf = -1;
+
+static ssize_t iommu_mmio_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct amd_iommu *iommu = m->private;
+ int ret;
+
+ iommu->dbg_mmio_offset = -1;
+
+ if (cnt > OFS_IN_SZ)
+ return -EINVAL;
+
+ ret = kstrtou32_from_user(ubuf, cnt, 0, &iommu->dbg_mmio_offset);
+ if (ret)
+ return ret;
+
+ if (iommu->dbg_mmio_offset > iommu->mmio_phys_end - 4) {
+ iommu->dbg_mmio_offset = -1;
+ return -EINVAL;
+ }
+
+ return cnt;
+}
+
+static int iommu_mmio_show(struct seq_file *m, void *unused)
+{
+ struct amd_iommu *iommu = m->private;
+ u64 value;
+
+ if (iommu->dbg_mmio_offset < 0) {
+ seq_puts(m, "Please provide mmio register's offset\n");
+ return 0;
+ }
+
+ value = readq(iommu->mmio_base + iommu->dbg_mmio_offset);
+ seq_printf(m, "Offset:0x%x Value:0x%016llx\n", iommu->dbg_mmio_offset, value);
+
+ return 0;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(iommu_mmio);
+
+static ssize_t iommu_capability_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct amd_iommu *iommu = m->private;
+ int ret;
+
+ iommu->dbg_cap_offset = -1;
+
+ if (cnt > OFS_IN_SZ)
+ return -EINVAL;
+
+ ret = kstrtou32_from_user(ubuf, cnt, 0, &iommu->dbg_cap_offset);
+ if (ret)
+ return ret;
+
+ /* Capability register at offset 0x14 is the last IOMMU capability register. */
+ if (iommu->dbg_cap_offset > 0x14) {
+ iommu->dbg_cap_offset = -1;
+ return -EINVAL;
+ }
+
+ return cnt;
+}
+
+static int iommu_capability_show(struct seq_file *m, void *unused)
+{
+ struct amd_iommu *iommu = m->private;
+ u32 value;
+ int err;
+
+ if (iommu->dbg_cap_offset < 0) {
+ seq_puts(m, "Please provide capability register's offset in the range [0x00 - 0x14]\n");
+ return 0;
+ }
+
+ err = pci_read_config_dword(iommu->dev, iommu->cap_ptr + iommu->dbg_cap_offset, &value);
+ if (err) {
+ seq_printf(m, "Not able to read capability register at 0x%x\n",
+ iommu->dbg_cap_offset);
+ return 0;
+ }
+
+ seq_printf(m, "Offset:0x%x Value:0x%08x\n", iommu->dbg_cap_offset, value);
+
+ return 0;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(iommu_capability);
+
+static int iommu_cmdbuf_show(struct seq_file *m, void *unused)
+{
+ struct amd_iommu *iommu = m->private;
+ struct iommu_cmd *cmd;
+ unsigned long flag;
+ u32 head, tail;
+ int i;
+
+ raw_spin_lock_irqsave(&iommu->lock, flag);
+ head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
+ tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
+ seq_printf(m, "CMD Buffer Head Offset:%d Tail Offset:%d\n",
+ (head >> 4) & 0x7fff, (tail >> 4) & 0x7fff);
+ for (i = 0; i < CMD_BUFFER_ENTRIES; i++) {
+ cmd = (struct iommu_cmd *)(iommu->cmd_buf + i * sizeof(*cmd));
+ seq_printf(m, "%3d: %08x %08x %08x %08x\n", i, cmd->data[0],
+ cmd->data[1], cmd->data[2], cmd->data[3]);
+ }
+ raw_spin_unlock_irqrestore(&iommu->lock, flag);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(iommu_cmdbuf);
+
+static ssize_t devid_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+ int seg, bus, slot, func;
+ struct amd_iommu *iommu;
+ char *srcid_ptr;
+ u16 devid;
+ int i;
+
+ sbdf = -1;
+
+ if (cnt >= DEVID_IN_SZ)
+ return -EINVAL;
+
+ srcid_ptr = memdup_user_nul(ubuf, cnt);
+ if (IS_ERR(srcid_ptr))
+ return PTR_ERR(srcid_ptr);
+
+ i = sscanf(srcid_ptr, "%x:%x:%x.%x", &seg, &bus, &slot, &func);
+ if (i != 4) {
+ i = sscanf(srcid_ptr, "%x:%x.%x", &bus, &slot, &func);
+ if (i != 3) {
+ kfree(srcid_ptr);
+ return -EINVAL;
+ }
+ seg = 0;
+ }
+
+ devid = PCI_DEVID(bus, PCI_DEVFN(slot, func));
+
+ /* Check if user device id input is a valid input */
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->id != seg)
+ continue;
+ if (devid > pci_seg->last_bdf) {
+ kfree(srcid_ptr);
+ return -EINVAL;
+ }
+ iommu = pci_seg->rlookup_table[devid];
+ if (!iommu) {
+ kfree(srcid_ptr);
+ return -ENODEV;
+ }
+ break;
+ }
+
+ if (pci_seg->id != seg) {
+ kfree(srcid_ptr);
+ return -EINVAL;
+ }
+
+ sbdf = PCI_SEG_DEVID_TO_SBDF(seg, devid);
+
+ kfree(srcid_ptr);
+
+ return cnt;
+}
+
+static int devid_show(struct seq_file *m, void *unused)
{
+ u16 devid;
+
+ if (sbdf >= 0) {
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+ seq_printf(m, "%04x:%02x:%02x.%x\n", PCI_SBDF_TO_SEGID(sbdf),
+ PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid));
+ } else
+ seq_puts(m, "No or Invalid input provided\n");
+
+ return 0;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(devid);
+
+static void dump_dte(struct seq_file *m, struct amd_iommu_pci_seg *pci_seg, u16 devid)
+{
+ struct dev_table_entry *dev_table;
+ struct amd_iommu *iommu;
+
+ iommu = pci_seg->rlookup_table[devid];
+ if (!iommu)
+ return;
+
+ dev_table = get_dev_table(iommu);
+ if (!dev_table) {
+ seq_puts(m, "Device table not found");
+ return;
+ }
+
+ seq_printf(m, "%-12s %16s %16s %16s %16s iommu\n", "DeviceId",
+ "QWORD[3]", "QWORD[2]", "QWORD[1]", "QWORD[0]");
+ seq_printf(m, "%04x:%02x:%02x.%x ", pci_seg->id, PCI_BUS_NUM(devid),
+ PCI_SLOT(devid), PCI_FUNC(devid));
+ for (int i = 3; i >= 0; --i)
+ seq_printf(m, "%016llx ", dev_table[devid].data[i]);
+ seq_printf(m, "iommu%d\n", iommu->index);
+}
+
+static int iommu_devtbl_show(struct seq_file *m, void *unused)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+ u16 seg, devid;
+
+ if (sbdf < 0) {
+ seq_puts(m, "Enter a valid device ID to 'devid' file\n");
+ return 0;
+ }
+ seg = PCI_SBDF_TO_SEGID(sbdf);
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->id != seg)
+ continue;
+ dump_dte(m, pci_seg, devid);
+ break;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(iommu_devtbl);
+
+static void dump_128_irte(struct seq_file *m, struct irq_remap_table *table, u16 int_tab_len)
+{
+ struct irte_ga *ptr, *irte;
+ int index;
+
+ for (index = 0; index < int_tab_len; index++) {
+ ptr = (struct irte_ga *)table->table;
+ irte = &ptr[index];
+
+ if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
+ !irte->lo.fields_vapic.valid)
+ continue;
+ else if (!irte->lo.fields_remap.valid)
+ continue;
+ seq_printf(m, "IRT[%04d] %016llx %016llx\n", index, irte->hi.val, irte->lo.val);
+ }
+}
+
+static void dump_32_irte(struct seq_file *m, struct irq_remap_table *table, u16 int_tab_len)
+{
+ union irte *ptr, *irte;
+ int index;
+
+ for (index = 0; index < int_tab_len; index++) {
+ ptr = (union irte *)table->table;
+ irte = &ptr[index];
+
+ if (!irte->fields.valid)
+ continue;
+ seq_printf(m, "IRT[%04d] %08x\n", index, irte->val);
+ }
+}
+
+static void dump_irte(struct seq_file *m, u16 devid, struct amd_iommu_pci_seg *pci_seg)
+{
+ struct dev_table_entry *dev_table;
+ struct irq_remap_table *table;
+ struct amd_iommu *iommu;
+ unsigned long flags;
+ u16 int_tab_len;
+
+ table = pci_seg->irq_lookup_table[devid];
+ if (!table) {
+ seq_printf(m, "IRQ lookup table not set for %04x:%02x:%02x:%x\n",
+ pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid));
+ return;
+ }
+
+ iommu = pci_seg->rlookup_table[devid];
+ if (!iommu)
+ return;
+
+ dev_table = get_dev_table(iommu);
+ if (!dev_table) {
+ seq_puts(m, "Device table not found");
+ return;
+ }
+
+ int_tab_len = dev_table[devid].data[2] & DTE_INTTABLEN_MASK;
+ if (int_tab_len != DTE_INTTABLEN_512 && int_tab_len != DTE_INTTABLEN_2K) {
+ seq_puts(m, "The device's DTE contains an invalid IRT length value.");
+ return;
+ }
+
+ seq_printf(m, "DeviceId %04x:%02x:%02x.%x\n", pci_seg->id, PCI_BUS_NUM(devid),
+ PCI_SLOT(devid), PCI_FUNC(devid));
+
+ raw_spin_lock_irqsave(&table->lock, flags);
+ if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
+ dump_128_irte(m, table, BIT(int_tab_len >> 1));
+ else
+ dump_32_irte(m, table, BIT(int_tab_len >> 1));
+ seq_puts(m, "\n");
+ raw_spin_unlock_irqrestore(&table->lock, flags);
+}
+
+static int iommu_irqtbl_show(struct seq_file *m, void *unused)
+{
+ struct amd_iommu_pci_seg *pci_seg;
+ u16 devid, seg;
+
+ if (!irq_remapping_enabled) {
+ seq_puts(m, "Interrupt remapping is disabled\n");
+ return 0;
+ }
+
+ if (sbdf < 0) {
+ seq_puts(m, "Enter a valid device ID to 'devid' file\n");
+ return 0;
+ }
+
+ seg = PCI_SBDF_TO_SEGID(sbdf);
+ devid = PCI_SBDF_TO_DEVID(sbdf);
+
+ for_each_pci_segment(pci_seg) {
+ if (pci_seg->id != seg)
+ continue;
+ dump_irte(m, devid, pci_seg);
+ break;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(iommu_irqtbl);
+
+void amd_iommu_debugfs_setup(void)
+{
+ struct amd_iommu *iommu;
char name[MAX_NAME_LEN + 1];
- mutex_lock(&amd_iommu_debugfs_lock);
- if (!amd_iommu_debugfs)
- amd_iommu_debugfs = debugfs_create_dir("amd",
- iommu_debugfs_dir);
- mutex_unlock(&amd_iommu_debugfs_lock);
+ amd_iommu_debugfs = debugfs_create_dir("amd", iommu_debugfs_dir);
+
+ for_each_iommu(iommu) {
+ iommu->dbg_mmio_offset = -1;
+ iommu->dbg_cap_offset = -1;
+
+ snprintf(name, MAX_NAME_LEN, "iommu%02d", iommu->index);
+ iommu->debugfs = debugfs_create_dir(name, amd_iommu_debugfs);
+
+ debugfs_create_file("mmio", 0644, iommu->debugfs, iommu,
+ &iommu_mmio_fops);
+ debugfs_create_file("capability", 0644, iommu->debugfs, iommu,
+ &iommu_capability_fops);
+ debugfs_create_file("cmdbuf", 0444, iommu->debugfs, iommu,
+ &iommu_cmdbuf_fops);
+ }
- snprintf(name, MAX_NAME_LEN, "iommu%02d", iommu->index);
- iommu->debugfs = debugfs_create_dir(name, amd_iommu_debugfs);
+ debugfs_create_file("devid", 0644, amd_iommu_debugfs, NULL,
+ &devid_fops);
+ debugfs_create_file("devtbl", 0444, amd_iommu_debugfs, NULL,
+ &iommu_devtbl_fops);
+ debugfs_create_file("irqtbl", 0444, amd_iommu_debugfs, NULL,
+ &iommu_irqtbl_fops);
}
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 9c17dfa76703..7b5af6176de9 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -152,6 +152,8 @@ bool amd_iommu_dump;
bool amd_iommu_irq_remap __read_mostly;
enum protection_domain_mode amd_iommu_pgtable = PD_MODE_V1;
+/* Host page table level */
+u8 amd_iommu_hpt_level;
/* Guest page table level */
int amd_iommu_gpt_level = PAGE_MODE_4_LEVEL;
@@ -168,6 +170,9 @@ static int amd_iommu_target_ivhd_type;
u64 amd_iommu_efr;
u64 amd_iommu_efr2;
+/* Host (v1) page table is not supported*/
+bool amd_iommu_hatdis;
+
/* SNP is enabled on the system? */
bool amd_iommu_snp_en;
EXPORT_SYMBOL(amd_iommu_snp_en);
@@ -1792,6 +1797,11 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h,
if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
+ if (h->efr_attr & BIT(IOMMU_IVHD_ATTR_HATDIS_SHIFT)) {
+ pr_warn_once("Host Address Translation is not supported.\n");
+ amd_iommu_hatdis = true;
+ }
+
early_iommu_features_init(iommu, h);
break;
@@ -2112,7 +2122,15 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
return ret;
}
- iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
+ ret = iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
+ if (ret || amd_iommu_pgtable == PD_MODE_NONE) {
+ /*
+ * Remove sysfs if DMA translation is not supported by the
+ * IOMMU. Do not return an error to enable IRQ remapping
+ * in state_next(), DTE[V, TV] must eventually be set to 0.
+ */
+ iommu_device_sysfs_remove(&iommu->iommu);
+ }
return pci_enable_device(iommu->dev);
}
@@ -2573,7 +2591,7 @@ static void init_device_table_dma(struct amd_iommu_pci_seg *pci_seg)
u32 devid;
struct dev_table_entry *dev_table = pci_seg->dev_table;
- if (dev_table == NULL)
+ if (!dev_table || amd_iommu_pgtable == PD_MODE_NONE)
return;
for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
@@ -3033,6 +3051,7 @@ static int __init early_amd_iommu_init(void)
struct acpi_table_header *ivrs_base;
int ret;
acpi_status status;
+ u8 efr_hats;
if (!amd_iommu_detected)
return -ENODEV;
@@ -3077,6 +3096,19 @@ static int __init early_amd_iommu_init(void)
FIELD_GET(FEATURE_GATS, amd_iommu_efr) == GUEST_PGTABLE_5_LEVEL)
amd_iommu_gpt_level = PAGE_MODE_5_LEVEL;
+ efr_hats = FIELD_GET(FEATURE_HATS, amd_iommu_efr);
+ if (efr_hats != 0x3) {
+ /*
+ * efr[HATS] bits specify the maximum host translation level
+ * supported, with LEVEL 4 being initial max level.
+ */
+ amd_iommu_hpt_level = efr_hats + PAGE_MODE_4_LEVEL;
+ } else {
+ pr_warn_once(FW_BUG "Disable host address translation due to invalid translation level (%#x).\n",
+ efr_hats);
+ amd_iommu_hatdis = true;
+ }
+
if (amd_iommu_pgtable == PD_MODE_V2) {
if (!amd_iommu_v2_pgtbl_supported()) {
pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
@@ -3084,6 +3116,17 @@ static int __init early_amd_iommu_init(void)
}
}
+ if (amd_iommu_hatdis) {
+ /*
+ * Host (v1) page table is not available. Attempt to use
+ * Guest (v2) page table.
+ */
+ if (amd_iommu_v2_pgtbl_supported())
+ amd_iommu_pgtable = PD_MODE_V2;
+ else
+ amd_iommu_pgtable = PD_MODE_NONE;
+ }
+
/* Disable any previously enabled IOMMUs */
if (!is_kdump_kernel() || amd_iommu_disabled)
disable_iommus();
@@ -3376,7 +3419,6 @@ int amd_iommu_enable_faulting(unsigned int cpu)
*/
static int __init amd_iommu_init(void)
{
- struct amd_iommu *iommu;
int ret;
ret = iommu_go_to_state(IOMMU_INITIALIZED);
@@ -3390,8 +3432,8 @@ static int __init amd_iommu_init(void)
}
#endif
- for_each_iommu(iommu)
- amd_iommu_debugfs_setup(iommu);
+ if (!ret)
+ amd_iommu_debugfs_setup();
return ret;
}
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 4d308c071134..a91e71f981ef 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -125,7 +125,7 @@ static bool increase_address_space(struct amd_io_pgtable *pgtable,
goto out;
ret = false;
- if (WARN_ON_ONCE(pgtable->mode == PAGE_MODE_6_LEVEL))
+ if (WARN_ON_ONCE(pgtable->mode == amd_iommu_hpt_level))
goto out;
*pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root));
@@ -526,7 +526,7 @@ static void v1_free_pgtable(struct io_pgtable *iop)
/* Page-table is not visible to IOMMU anymore, so free it */
BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
- pgtable->mode > PAGE_MODE_6_LEVEL);
+ pgtable->mode > amd_iommu_hpt_level);
free_sub_pt(pgtable->root, pgtable->mode, &freelist);
iommu_put_pages_list(&freelist);
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 3117d99cf83d..eb348c63a8d0 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -25,6 +25,7 @@
#include <linux/notifier.h>
#include <linux/export.h>
#include <linux/irq.h>
+#include <linux/irqchip/irq-msi-lib.h>
#include <linux/msi.h>
#include <linux/irqdomain.h>
#include <linux/percpu.h>
@@ -63,13 +64,6 @@ static const struct iommu_dirty_ops amd_dirty_ops;
int amd_iommu_max_glx_val = -1;
/*
- * general struct to manage commands send to an IOMMU
- */
-struct iommu_cmd {
- u32 data[4];
-};
-
-/*
* AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
* to know which ones are already in use.
*/
@@ -634,8 +628,8 @@ static inline void pdev_disable_cap_pasid(struct pci_dev *pdev)
static void pdev_enable_caps(struct pci_dev *pdev)
{
- pdev_enable_cap_ats(pdev);
pdev_enable_cap_pasid(pdev);
+ pdev_enable_cap_ats(pdev);
pdev_enable_cap_pri(pdev);
}
@@ -2424,6 +2418,13 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev)
pci_max_pasids(to_pci_dev(dev)));
}
+ if (amd_iommu_pgtable == PD_MODE_NONE) {
+ pr_warn_once("%s: DMA translation not supported by iommu.\n",
+ __func__);
+ iommu_dev = ERR_PTR(-ENODEV);
+ goto out_err;
+ }
+
out_err:
iommu_completion_wait(iommu);
@@ -2511,6 +2512,9 @@ static int pdom_setup_pgtable(struct protection_domain *domain,
case PD_MODE_V2:
fmt = AMD_IOMMU_V2;
break;
+ case PD_MODE_NONE:
+ WARN_ON_ONCE(1);
+ return -EPERM;
}
domain->iop.pgtbl.cfg.amd.nid = dev_to_node(dev);
@@ -2524,14 +2528,30 @@ static int pdom_setup_pgtable(struct protection_domain *domain,
static inline u64 dma_max_address(enum protection_domain_mode pgtable)
{
if (pgtable == PD_MODE_V1)
- return ~0ULL;
+ return PM_LEVEL_SIZE(amd_iommu_hpt_level);
- /* V2 with 4/5 level page table */
- return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1);
+ /*
+ * V2 with 4/5 level page table. Note that "2.2.6.5 AMD64 4-Kbyte Page
+ * Translation" shows that the V2 table sign extends the top of the
+ * address space creating a reserved region in the middle of the
+ * translation, just like the CPU does. Further Vasant says the docs are
+ * incomplete and this only applies to non-zero PASIDs. If the AMDv2
+ * page table is assigned to the 0 PASID then there is no sign extension
+ * check.
+ *
+ * Since the IOMMU must have a fixed geometry, and the core code does
+ * not understand sign extended addressing, we have to chop off the high
+ * bit to get consistent behavior with attachments of the domain to any
+ * PASID.
+ */
+ return ((1ULL << (PM_LEVEL_SHIFT(amd_iommu_gpt_level) - 1)) - 1);
}
static bool amd_iommu_hd_support(struct amd_iommu *iommu)
{
+ if (amd_iommu_hatdis)
+ return false;
+
return iommu && (iommu->features & FEATURE_HDSUP);
}
@@ -3804,13 +3824,70 @@ static const struct irq_domain_ops amd_ir_domain_ops = {
.deactivate = irq_remapping_deactivate,
};
-int amd_iommu_activate_guest_mode(void *data)
+static void __amd_iommu_update_ga(struct irte_ga *entry, int cpu,
+ bool ga_log_intr)
+{
+ if (cpu >= 0) {
+ entry->lo.fields_vapic.destination =
+ APICID_TO_IRTE_DEST_LO(cpu);
+ entry->hi.fields.destination =
+ APICID_TO_IRTE_DEST_HI(cpu);
+ entry->lo.fields_vapic.is_run = true;
+ entry->lo.fields_vapic.ga_log_intr = false;
+ } else {
+ entry->lo.fields_vapic.is_run = false;
+ entry->lo.fields_vapic.ga_log_intr = ga_log_intr;
+ }
+}
+
+/*
+ * Update the pCPU information for an IRTE that is configured to post IRQs to
+ * a vCPU, without issuing an IOMMU invalidation for the IRTE.
+ *
+ * If the vCPU is associated with a pCPU (@cpu >= 0), configure the Destination
+ * with the pCPU's APIC ID, set IsRun, and clear GALogIntr. If the vCPU isn't
+ * associated with a pCPU (@cpu < 0), clear IsRun and set/clear GALogIntr based
+ * on input from the caller (e.g. KVM only requests GALogIntr when the vCPU is
+ * blocking and requires a notification wake event). I.e. treat vCPUs that are
+ * associated with a pCPU as running. This API is intended to be used when a
+ * vCPU is scheduled in/out (or stops running for any reason), to do a fast
+ * update of IsRun, GALogIntr, and (conditionally) Destination.
+ *
+ * Per the IOMMU spec, the Destination, IsRun, and GATag fields are not cached
+ * and thus don't require an invalidation to ensure the IOMMU consumes fresh
+ * information.
+ */
+int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr)
+{
+ struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
+ struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
+
+ if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)))
+ return -EINVAL;
+
+ if (!entry || !entry->lo.fields_vapic.guest_mode)
+ return 0;
+
+ if (!ir_data->iommu)
+ return -ENODEV;
+
+ __amd_iommu_update_ga(entry, cpu, ga_log_intr);
+
+ return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
+ ir_data->irq_2_irte.index, entry);
+}
+EXPORT_SYMBOL(amd_iommu_update_ga);
+
+int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr)
{
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
u64 valid;
- if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || !entry)
+ if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)))
+ return -EINVAL;
+
+ if (!entry)
return 0;
valid = entry->lo.fields_vapic.valid;
@@ -3820,11 +3897,12 @@ int amd_iommu_activate_guest_mode(void *data)
entry->lo.fields_vapic.valid = valid;
entry->lo.fields_vapic.guest_mode = 1;
- entry->lo.fields_vapic.ga_log_intr = 1;
entry->hi.fields.ga_root_ptr = ir_data->ga_root_ptr;
entry->hi.fields.vector = ir_data->ga_vector;
entry->lo.fields_vapic.ga_tag = ir_data->ga_tag;
+ __amd_iommu_update_ga(entry, cpu, ga_log_intr);
+
return modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
ir_data->irq_2_irte.index, entry);
}
@@ -3837,8 +3915,10 @@ int amd_iommu_deactivate_guest_mode(void *data)
struct irq_cfg *cfg = ir_data->cfg;
u64 valid;
- if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
- !entry || !entry->lo.fields_vapic.guest_mode)
+ if (WARN_ON_ONCE(!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)))
+ return -EINVAL;
+
+ if (!entry || !entry->lo.fields_vapic.guest_mode)
return 0;
valid = entry->lo.fields_remap.valid;
@@ -3860,11 +3940,10 @@ int amd_iommu_deactivate_guest_mode(void *data)
}
EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
-static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
+static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *info)
{
int ret;
- struct amd_iommu_pi_data *pi_data = vcpu_info;
- struct vcpu_data *vcpu_pi_info = pi_data->vcpu_data;
+ struct amd_iommu_pi_data *pi_data = info;
struct amd_ir_data *ir_data = data->chip_data;
struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
struct iommu_dev_data *dev_data;
@@ -3885,25 +3964,20 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
return -EINVAL;
ir_data->cfg = irqd_cfg(data);
- pi_data->ir_data = ir_data;
- pi_data->prev_ga_tag = ir_data->cached_ga_tag;
- if (pi_data->is_guest_mode) {
- ir_data->ga_root_ptr = (pi_data->base >> 12);
- ir_data->ga_vector = vcpu_pi_info->vector;
+ if (pi_data) {
+ pi_data->ir_data = ir_data;
+
+ ir_data->ga_root_ptr = (pi_data->vapic_addr >> 12);
+ ir_data->ga_vector = pi_data->vector;
ir_data->ga_tag = pi_data->ga_tag;
- ret = amd_iommu_activate_guest_mode(ir_data);
- if (!ret)
- ir_data->cached_ga_tag = pi_data->ga_tag;
+ if (pi_data->is_guest_mode)
+ ret = amd_iommu_activate_guest_mode(ir_data, pi_data->cpu,
+ pi_data->ga_log_intr);
+ else
+ ret = amd_iommu_deactivate_guest_mode(ir_data);
} else {
ret = amd_iommu_deactivate_guest_mode(ir_data);
-
- /*
- * This communicates the ga_tag back to the caller
- * so that it can do all the necessary clean up.
- */
- if (!ret)
- ir_data->cached_ga_tag = 0;
}
return ret;
@@ -3970,54 +4044,30 @@ static struct irq_chip amd_ir_chip = {
static const struct msi_parent_ops amdvi_msi_parent_ops = {
.supported_flags = X86_VECTOR_MSI_FLAGS_SUPPORTED | MSI_FLAG_MULTI_PCI_MSI,
+ .bus_select_token = DOMAIN_BUS_AMDVI,
+ .bus_select_mask = MATCH_PCI_MSI,
.prefix = "IR-",
.init_dev_msi_info = msi_parent_init_dev_msi_info,
};
int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
{
- struct fwnode_handle *fn;
+ struct irq_domain_info info = {
+ .fwnode = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index),
+ .ops = &amd_ir_domain_ops,
+ .domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI,
+ .host_data = iommu,
+ .parent = arch_get_ir_parent_domain(),
+ };
- fn = irq_domain_alloc_named_id_fwnode("AMD-IR", iommu->index);
- if (!fn)
+ if (!info.fwnode)
return -ENOMEM;
- iommu->ir_domain = irq_domain_create_hierarchy(arch_get_ir_parent_domain(), 0, 0,
- fn, &amd_ir_domain_ops, iommu);
+
+ iommu->ir_domain = msi_create_parent_irq_domain(&info, &amdvi_msi_parent_ops);
if (!iommu->ir_domain) {
- irq_domain_free_fwnode(fn);
+ irq_domain_free_fwnode(info.fwnode);
return -ENOMEM;
}
-
- irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_AMDVI);
- iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT |
- IRQ_DOMAIN_FLAG_ISOLATED_MSI;
- iommu->ir_domain->msi_parent_ops = &amdvi_msi_parent_ops;
-
return 0;
}
-
-int amd_iommu_update_ga(int cpu, bool is_run, void *data)
-{
- struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
- struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
-
- if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
- !entry || !entry->lo.fields_vapic.guest_mode)
- return 0;
-
- if (!ir_data->iommu)
- return -ENODEV;
-
- if (cpu >= 0) {
- entry->lo.fields_vapic.destination =
- APICID_TO_IRTE_DEST_LO(cpu);
- entry->hi.fields.destination =
- APICID_TO_IRTE_DEST_HI(cpu);
- }
- entry->lo.fields_vapic.is_run = is_run;
-
- return __modify_irte_ga(ir_data->iommu, ir_data->irq_2_irte.devid,
- ir_data->irq_2_irte.index, entry);
-}
-EXPORT_SYMBOL(amd_iommu_update_ga);
#endif